Commit 2361be23666232dbb4851a527f466c4cbf5340fc
Committed by
Rafael J. Wysocki
1 parent
72a4ce340a
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
cpufreq: Don't create empty /sys/devices/system/cpu/cpufreq directory
When we don't have any file in cpu/cpufreq directory we shouldn't create it. Specially with the introduction of per-policy governor instance patchset, even governors are moved to cpu/cpu*/cpufreq/governor-name directory and so this directory is just not required. Lets have it only when required. Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Showing 4 changed files with 56 additions and 6 deletions Inline Diff
drivers/cpufreq/acpi-cpufreq.c
1 | /* | 1 | /* |
2 | * acpi-cpufreq.c - ACPI Processor P-States Driver | 2 | * acpi-cpufreq.c - ACPI Processor P-States Driver |
3 | * | 3 | * |
4 | * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> | 4 | * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> |
5 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> | 5 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> |
6 | * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de> | 6 | * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de> |
7 | * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com> | 7 | * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com> |
8 | * | 8 | * |
9 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 9 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
12 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
13 | * the Free Software Foundation; either version 2 of the License, or (at | 13 | * the Free Software Foundation; either version 2 of the License, or (at |
14 | * your option) any later version. | 14 | * your option) any later version. |
15 | * | 15 | * |
16 | * This program is distributed in the hope that it will be useful, but | 16 | * This program is distributed in the hope that it will be useful, but |
17 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 17 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
19 | * General Public License for more details. | 19 | * General Public License for more details. |
20 | * | 20 | * |
21 | * You should have received a copy of the GNU General Public License along | 21 | * You should have received a copy of the GNU General Public License along |
22 | * with this program; if not, write to the Free Software Foundation, Inc., | 22 | * with this program; if not, write to the Free Software Foundation, Inc., |
23 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | 23 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
24 | * | 24 | * |
25 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | 25 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #include <linux/kernel.h> | 28 | #include <linux/kernel.h> |
29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/init.h> | 30 | #include <linux/init.h> |
31 | #include <linux/smp.h> | 31 | #include <linux/smp.h> |
32 | #include <linux/sched.h> | 32 | #include <linux/sched.h> |
33 | #include <linux/cpufreq.h> | 33 | #include <linux/cpufreq.h> |
34 | #include <linux/compiler.h> | 34 | #include <linux/compiler.h> |
35 | #include <linux/dmi.h> | 35 | #include <linux/dmi.h> |
36 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
37 | 37 | ||
38 | #include <linux/acpi.h> | 38 | #include <linux/acpi.h> |
39 | #include <linux/io.h> | 39 | #include <linux/io.h> |
40 | #include <linux/delay.h> | 40 | #include <linux/delay.h> |
41 | #include <linux/uaccess.h> | 41 | #include <linux/uaccess.h> |
42 | 42 | ||
43 | #include <acpi/processor.h> | 43 | #include <acpi/processor.h> |
44 | 44 | ||
45 | #include <asm/msr.h> | 45 | #include <asm/msr.h> |
46 | #include <asm/processor.h> | 46 | #include <asm/processor.h> |
47 | #include <asm/cpufeature.h> | 47 | #include <asm/cpufeature.h> |
48 | #include "mperf.h" | 48 | #include "mperf.h" |
49 | 49 | ||
50 | MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); | 50 | MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); |
51 | MODULE_DESCRIPTION("ACPI Processor P-States Driver"); | 51 | MODULE_DESCRIPTION("ACPI Processor P-States Driver"); |
52 | MODULE_LICENSE("GPL"); | 52 | MODULE_LICENSE("GPL"); |
53 | 53 | ||
54 | #define PFX "acpi-cpufreq: " | 54 | #define PFX "acpi-cpufreq: " |
55 | 55 | ||
56 | enum { | 56 | enum { |
57 | UNDEFINED_CAPABLE = 0, | 57 | UNDEFINED_CAPABLE = 0, |
58 | SYSTEM_INTEL_MSR_CAPABLE, | 58 | SYSTEM_INTEL_MSR_CAPABLE, |
59 | SYSTEM_AMD_MSR_CAPABLE, | 59 | SYSTEM_AMD_MSR_CAPABLE, |
60 | SYSTEM_IO_CAPABLE, | 60 | SYSTEM_IO_CAPABLE, |
61 | }; | 61 | }; |
62 | 62 | ||
63 | #define INTEL_MSR_RANGE (0xffff) | 63 | #define INTEL_MSR_RANGE (0xffff) |
64 | #define AMD_MSR_RANGE (0x7) | 64 | #define AMD_MSR_RANGE (0x7) |
65 | 65 | ||
66 | #define MSR_K7_HWCR_CPB_DIS (1ULL << 25) | 66 | #define MSR_K7_HWCR_CPB_DIS (1ULL << 25) |
67 | 67 | ||
68 | struct acpi_cpufreq_data { | 68 | struct acpi_cpufreq_data { |
69 | struct acpi_processor_performance *acpi_data; | 69 | struct acpi_processor_performance *acpi_data; |
70 | struct cpufreq_frequency_table *freq_table; | 70 | struct cpufreq_frequency_table *freq_table; |
71 | unsigned int resume; | 71 | unsigned int resume; |
72 | unsigned int cpu_feature; | 72 | unsigned int cpu_feature; |
73 | }; | 73 | }; |
74 | 74 | ||
75 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data); | 75 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data); |
76 | 76 | ||
77 | /* acpi_perf_data is a pointer to percpu data. */ | 77 | /* acpi_perf_data is a pointer to percpu data. */ |
78 | static struct acpi_processor_performance __percpu *acpi_perf_data; | 78 | static struct acpi_processor_performance __percpu *acpi_perf_data; |
79 | 79 | ||
80 | static struct cpufreq_driver acpi_cpufreq_driver; | 80 | static struct cpufreq_driver acpi_cpufreq_driver; |
81 | 81 | ||
82 | static unsigned int acpi_pstate_strict; | 82 | static unsigned int acpi_pstate_strict; |
83 | static bool boost_enabled, boost_supported; | 83 | static bool boost_enabled, boost_supported; |
84 | static struct msr __percpu *msrs; | 84 | static struct msr __percpu *msrs; |
85 | 85 | ||
86 | static bool boost_state(unsigned int cpu) | 86 | static bool boost_state(unsigned int cpu) |
87 | { | 87 | { |
88 | u32 lo, hi; | 88 | u32 lo, hi; |
89 | u64 msr; | 89 | u64 msr; |
90 | 90 | ||
91 | switch (boot_cpu_data.x86_vendor) { | 91 | switch (boot_cpu_data.x86_vendor) { |
92 | case X86_VENDOR_INTEL: | 92 | case X86_VENDOR_INTEL: |
93 | rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi); | 93 | rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi); |
94 | msr = lo | ((u64)hi << 32); | 94 | msr = lo | ((u64)hi << 32); |
95 | return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); | 95 | return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); |
96 | case X86_VENDOR_AMD: | 96 | case X86_VENDOR_AMD: |
97 | rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); | 97 | rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); |
98 | msr = lo | ((u64)hi << 32); | 98 | msr = lo | ((u64)hi << 32); |
99 | return !(msr & MSR_K7_HWCR_CPB_DIS); | 99 | return !(msr & MSR_K7_HWCR_CPB_DIS); |
100 | } | 100 | } |
101 | return false; | 101 | return false; |
102 | } | 102 | } |
103 | 103 | ||
104 | static void boost_set_msrs(bool enable, const struct cpumask *cpumask) | 104 | static void boost_set_msrs(bool enable, const struct cpumask *cpumask) |
105 | { | 105 | { |
106 | u32 cpu; | 106 | u32 cpu; |
107 | u32 msr_addr; | 107 | u32 msr_addr; |
108 | u64 msr_mask; | 108 | u64 msr_mask; |
109 | 109 | ||
110 | switch (boot_cpu_data.x86_vendor) { | 110 | switch (boot_cpu_data.x86_vendor) { |
111 | case X86_VENDOR_INTEL: | 111 | case X86_VENDOR_INTEL: |
112 | msr_addr = MSR_IA32_MISC_ENABLE; | 112 | msr_addr = MSR_IA32_MISC_ENABLE; |
113 | msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE; | 113 | msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE; |
114 | break; | 114 | break; |
115 | case X86_VENDOR_AMD: | 115 | case X86_VENDOR_AMD: |
116 | msr_addr = MSR_K7_HWCR; | 116 | msr_addr = MSR_K7_HWCR; |
117 | msr_mask = MSR_K7_HWCR_CPB_DIS; | 117 | msr_mask = MSR_K7_HWCR_CPB_DIS; |
118 | break; | 118 | break; |
119 | default: | 119 | default: |
120 | return; | 120 | return; |
121 | } | 121 | } |
122 | 122 | ||
123 | rdmsr_on_cpus(cpumask, msr_addr, msrs); | 123 | rdmsr_on_cpus(cpumask, msr_addr, msrs); |
124 | 124 | ||
125 | for_each_cpu(cpu, cpumask) { | 125 | for_each_cpu(cpu, cpumask) { |
126 | struct msr *reg = per_cpu_ptr(msrs, cpu); | 126 | struct msr *reg = per_cpu_ptr(msrs, cpu); |
127 | if (enable) | 127 | if (enable) |
128 | reg->q &= ~msr_mask; | 128 | reg->q &= ~msr_mask; |
129 | else | 129 | else |
130 | reg->q |= msr_mask; | 130 | reg->q |= msr_mask; |
131 | } | 131 | } |
132 | 132 | ||
133 | wrmsr_on_cpus(cpumask, msr_addr, msrs); | 133 | wrmsr_on_cpus(cpumask, msr_addr, msrs); |
134 | } | 134 | } |
135 | 135 | ||
136 | static ssize_t _store_boost(const char *buf, size_t count) | 136 | static ssize_t _store_boost(const char *buf, size_t count) |
137 | { | 137 | { |
138 | int ret; | 138 | int ret; |
139 | unsigned long val = 0; | 139 | unsigned long val = 0; |
140 | 140 | ||
141 | if (!boost_supported) | 141 | if (!boost_supported) |
142 | return -EINVAL; | 142 | return -EINVAL; |
143 | 143 | ||
144 | ret = kstrtoul(buf, 10, &val); | 144 | ret = kstrtoul(buf, 10, &val); |
145 | if (ret || (val > 1)) | 145 | if (ret || (val > 1)) |
146 | return -EINVAL; | 146 | return -EINVAL; |
147 | 147 | ||
148 | if ((val && boost_enabled) || (!val && !boost_enabled)) | 148 | if ((val && boost_enabled) || (!val && !boost_enabled)) |
149 | return count; | 149 | return count; |
150 | 150 | ||
151 | get_online_cpus(); | 151 | get_online_cpus(); |
152 | 152 | ||
153 | boost_set_msrs(val, cpu_online_mask); | 153 | boost_set_msrs(val, cpu_online_mask); |
154 | 154 | ||
155 | put_online_cpus(); | 155 | put_online_cpus(); |
156 | 156 | ||
157 | boost_enabled = val; | 157 | boost_enabled = val; |
158 | pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis"); | 158 | pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis"); |
159 | 159 | ||
160 | return count; | 160 | return count; |
161 | } | 161 | } |
162 | 162 | ||
163 | static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr, | 163 | static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr, |
164 | const char *buf, size_t count) | 164 | const char *buf, size_t count) |
165 | { | 165 | { |
166 | return _store_boost(buf, count); | 166 | return _store_boost(buf, count); |
167 | } | 167 | } |
168 | 168 | ||
169 | static ssize_t show_global_boost(struct kobject *kobj, | 169 | static ssize_t show_global_boost(struct kobject *kobj, |
170 | struct attribute *attr, char *buf) | 170 | struct attribute *attr, char *buf) |
171 | { | 171 | { |
172 | return sprintf(buf, "%u\n", boost_enabled); | 172 | return sprintf(buf, "%u\n", boost_enabled); |
173 | } | 173 | } |
174 | 174 | ||
175 | static struct global_attr global_boost = __ATTR(boost, 0644, | 175 | static struct global_attr global_boost = __ATTR(boost, 0644, |
176 | show_global_boost, | 176 | show_global_boost, |
177 | store_global_boost); | 177 | store_global_boost); |
178 | 178 | ||
179 | #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB | 179 | #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB |
180 | static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf, | 180 | static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf, |
181 | size_t count) | 181 | size_t count) |
182 | { | 182 | { |
183 | return _store_boost(buf, count); | 183 | return _store_boost(buf, count); |
184 | } | 184 | } |
185 | 185 | ||
186 | static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf) | 186 | static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf) |
187 | { | 187 | { |
188 | return sprintf(buf, "%u\n", boost_enabled); | 188 | return sprintf(buf, "%u\n", boost_enabled); |
189 | } | 189 | } |
190 | 190 | ||
191 | static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb); | 191 | static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb); |
192 | #endif | 192 | #endif |
193 | 193 | ||
194 | static int check_est_cpu(unsigned int cpuid) | 194 | static int check_est_cpu(unsigned int cpuid) |
195 | { | 195 | { |
196 | struct cpuinfo_x86 *cpu = &cpu_data(cpuid); | 196 | struct cpuinfo_x86 *cpu = &cpu_data(cpuid); |
197 | 197 | ||
198 | return cpu_has(cpu, X86_FEATURE_EST); | 198 | return cpu_has(cpu, X86_FEATURE_EST); |
199 | } | 199 | } |
200 | 200 | ||
201 | static int check_amd_hwpstate_cpu(unsigned int cpuid) | 201 | static int check_amd_hwpstate_cpu(unsigned int cpuid) |
202 | { | 202 | { |
203 | struct cpuinfo_x86 *cpu = &cpu_data(cpuid); | 203 | struct cpuinfo_x86 *cpu = &cpu_data(cpuid); |
204 | 204 | ||
205 | return cpu_has(cpu, X86_FEATURE_HW_PSTATE); | 205 | return cpu_has(cpu, X86_FEATURE_HW_PSTATE); |
206 | } | 206 | } |
207 | 207 | ||
208 | static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) | 208 | static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) |
209 | { | 209 | { |
210 | struct acpi_processor_performance *perf; | 210 | struct acpi_processor_performance *perf; |
211 | int i; | 211 | int i; |
212 | 212 | ||
213 | perf = data->acpi_data; | 213 | perf = data->acpi_data; |
214 | 214 | ||
215 | for (i = 0; i < perf->state_count; i++) { | 215 | for (i = 0; i < perf->state_count; i++) { |
216 | if (value == perf->states[i].status) | 216 | if (value == perf->states[i].status) |
217 | return data->freq_table[i].frequency; | 217 | return data->freq_table[i].frequency; |
218 | } | 218 | } |
219 | return 0; | 219 | return 0; |
220 | } | 220 | } |
221 | 221 | ||
222 | static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) | 222 | static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) |
223 | { | 223 | { |
224 | int i; | 224 | int i; |
225 | struct acpi_processor_performance *perf; | 225 | struct acpi_processor_performance *perf; |
226 | 226 | ||
227 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) | 227 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) |
228 | msr &= AMD_MSR_RANGE; | 228 | msr &= AMD_MSR_RANGE; |
229 | else | 229 | else |
230 | msr &= INTEL_MSR_RANGE; | 230 | msr &= INTEL_MSR_RANGE; |
231 | 231 | ||
232 | perf = data->acpi_data; | 232 | perf = data->acpi_data; |
233 | 233 | ||
234 | for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { | 234 | for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { |
235 | if (msr == perf->states[data->freq_table[i].index].status) | 235 | if (msr == perf->states[data->freq_table[i].index].status) |
236 | return data->freq_table[i].frequency; | 236 | return data->freq_table[i].frequency; |
237 | } | 237 | } |
238 | return data->freq_table[0].frequency; | 238 | return data->freq_table[0].frequency; |
239 | } | 239 | } |
240 | 240 | ||
241 | static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data) | 241 | static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data) |
242 | { | 242 | { |
243 | switch (data->cpu_feature) { | 243 | switch (data->cpu_feature) { |
244 | case SYSTEM_INTEL_MSR_CAPABLE: | 244 | case SYSTEM_INTEL_MSR_CAPABLE: |
245 | case SYSTEM_AMD_MSR_CAPABLE: | 245 | case SYSTEM_AMD_MSR_CAPABLE: |
246 | return extract_msr(val, data); | 246 | return extract_msr(val, data); |
247 | case SYSTEM_IO_CAPABLE: | 247 | case SYSTEM_IO_CAPABLE: |
248 | return extract_io(val, data); | 248 | return extract_io(val, data); |
249 | default: | 249 | default: |
250 | return 0; | 250 | return 0; |
251 | } | 251 | } |
252 | } | 252 | } |
253 | 253 | ||
254 | struct msr_addr { | 254 | struct msr_addr { |
255 | u32 reg; | 255 | u32 reg; |
256 | }; | 256 | }; |
257 | 257 | ||
258 | struct io_addr { | 258 | struct io_addr { |
259 | u16 port; | 259 | u16 port; |
260 | u8 bit_width; | 260 | u8 bit_width; |
261 | }; | 261 | }; |
262 | 262 | ||
263 | struct drv_cmd { | 263 | struct drv_cmd { |
264 | unsigned int type; | 264 | unsigned int type; |
265 | const struct cpumask *mask; | 265 | const struct cpumask *mask; |
266 | union { | 266 | union { |
267 | struct msr_addr msr; | 267 | struct msr_addr msr; |
268 | struct io_addr io; | 268 | struct io_addr io; |
269 | } addr; | 269 | } addr; |
270 | u32 val; | 270 | u32 val; |
271 | }; | 271 | }; |
272 | 272 | ||
273 | /* Called via smp_call_function_single(), on the target CPU */ | 273 | /* Called via smp_call_function_single(), on the target CPU */ |
274 | static void do_drv_read(void *_cmd) | 274 | static void do_drv_read(void *_cmd) |
275 | { | 275 | { |
276 | struct drv_cmd *cmd = _cmd; | 276 | struct drv_cmd *cmd = _cmd; |
277 | u32 h; | 277 | u32 h; |
278 | 278 | ||
279 | switch (cmd->type) { | 279 | switch (cmd->type) { |
280 | case SYSTEM_INTEL_MSR_CAPABLE: | 280 | case SYSTEM_INTEL_MSR_CAPABLE: |
281 | case SYSTEM_AMD_MSR_CAPABLE: | 281 | case SYSTEM_AMD_MSR_CAPABLE: |
282 | rdmsr(cmd->addr.msr.reg, cmd->val, h); | 282 | rdmsr(cmd->addr.msr.reg, cmd->val, h); |
283 | break; | 283 | break; |
284 | case SYSTEM_IO_CAPABLE: | 284 | case SYSTEM_IO_CAPABLE: |
285 | acpi_os_read_port((acpi_io_address)cmd->addr.io.port, | 285 | acpi_os_read_port((acpi_io_address)cmd->addr.io.port, |
286 | &cmd->val, | 286 | &cmd->val, |
287 | (u32)cmd->addr.io.bit_width); | 287 | (u32)cmd->addr.io.bit_width); |
288 | break; | 288 | break; |
289 | default: | 289 | default: |
290 | break; | 290 | break; |
291 | } | 291 | } |
292 | } | 292 | } |
293 | 293 | ||
294 | /* Called via smp_call_function_many(), on the target CPUs */ | 294 | /* Called via smp_call_function_many(), on the target CPUs */ |
295 | static void do_drv_write(void *_cmd) | 295 | static void do_drv_write(void *_cmd) |
296 | { | 296 | { |
297 | struct drv_cmd *cmd = _cmd; | 297 | struct drv_cmd *cmd = _cmd; |
298 | u32 lo, hi; | 298 | u32 lo, hi; |
299 | 299 | ||
300 | switch (cmd->type) { | 300 | switch (cmd->type) { |
301 | case SYSTEM_INTEL_MSR_CAPABLE: | 301 | case SYSTEM_INTEL_MSR_CAPABLE: |
302 | rdmsr(cmd->addr.msr.reg, lo, hi); | 302 | rdmsr(cmd->addr.msr.reg, lo, hi); |
303 | lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); | 303 | lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); |
304 | wrmsr(cmd->addr.msr.reg, lo, hi); | 304 | wrmsr(cmd->addr.msr.reg, lo, hi); |
305 | break; | 305 | break; |
306 | case SYSTEM_AMD_MSR_CAPABLE: | 306 | case SYSTEM_AMD_MSR_CAPABLE: |
307 | wrmsr(cmd->addr.msr.reg, cmd->val, 0); | 307 | wrmsr(cmd->addr.msr.reg, cmd->val, 0); |
308 | break; | 308 | break; |
309 | case SYSTEM_IO_CAPABLE: | 309 | case SYSTEM_IO_CAPABLE: |
310 | acpi_os_write_port((acpi_io_address)cmd->addr.io.port, | 310 | acpi_os_write_port((acpi_io_address)cmd->addr.io.port, |
311 | cmd->val, | 311 | cmd->val, |
312 | (u32)cmd->addr.io.bit_width); | 312 | (u32)cmd->addr.io.bit_width); |
313 | break; | 313 | break; |
314 | default: | 314 | default: |
315 | break; | 315 | break; |
316 | } | 316 | } |
317 | } | 317 | } |
318 | 318 | ||
319 | static void drv_read(struct drv_cmd *cmd) | 319 | static void drv_read(struct drv_cmd *cmd) |
320 | { | 320 | { |
321 | int err; | 321 | int err; |
322 | cmd->val = 0; | 322 | cmd->val = 0; |
323 | 323 | ||
324 | err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1); | 324 | err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1); |
325 | WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */ | 325 | WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */ |
326 | } | 326 | } |
327 | 327 | ||
328 | static void drv_write(struct drv_cmd *cmd) | 328 | static void drv_write(struct drv_cmd *cmd) |
329 | { | 329 | { |
330 | int this_cpu; | 330 | int this_cpu; |
331 | 331 | ||
332 | this_cpu = get_cpu(); | 332 | this_cpu = get_cpu(); |
333 | if (cpumask_test_cpu(this_cpu, cmd->mask)) | 333 | if (cpumask_test_cpu(this_cpu, cmd->mask)) |
334 | do_drv_write(cmd); | 334 | do_drv_write(cmd); |
335 | smp_call_function_many(cmd->mask, do_drv_write, cmd, 1); | 335 | smp_call_function_many(cmd->mask, do_drv_write, cmd, 1); |
336 | put_cpu(); | 336 | put_cpu(); |
337 | } | 337 | } |
338 | 338 | ||
339 | static u32 get_cur_val(const struct cpumask *mask) | 339 | static u32 get_cur_val(const struct cpumask *mask) |
340 | { | 340 | { |
341 | struct acpi_processor_performance *perf; | 341 | struct acpi_processor_performance *perf; |
342 | struct drv_cmd cmd; | 342 | struct drv_cmd cmd; |
343 | 343 | ||
344 | if (unlikely(cpumask_empty(mask))) | 344 | if (unlikely(cpumask_empty(mask))) |
345 | return 0; | 345 | return 0; |
346 | 346 | ||
347 | switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { | 347 | switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { |
348 | case SYSTEM_INTEL_MSR_CAPABLE: | 348 | case SYSTEM_INTEL_MSR_CAPABLE: |
349 | cmd.type = SYSTEM_INTEL_MSR_CAPABLE; | 349 | cmd.type = SYSTEM_INTEL_MSR_CAPABLE; |
350 | cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; | 350 | cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; |
351 | break; | 351 | break; |
352 | case SYSTEM_AMD_MSR_CAPABLE: | 352 | case SYSTEM_AMD_MSR_CAPABLE: |
353 | cmd.type = SYSTEM_AMD_MSR_CAPABLE; | 353 | cmd.type = SYSTEM_AMD_MSR_CAPABLE; |
354 | cmd.addr.msr.reg = MSR_AMD_PERF_STATUS; | 354 | cmd.addr.msr.reg = MSR_AMD_PERF_STATUS; |
355 | break; | 355 | break; |
356 | case SYSTEM_IO_CAPABLE: | 356 | case SYSTEM_IO_CAPABLE: |
357 | cmd.type = SYSTEM_IO_CAPABLE; | 357 | cmd.type = SYSTEM_IO_CAPABLE; |
358 | perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; | 358 | perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; |
359 | cmd.addr.io.port = perf->control_register.address; | 359 | cmd.addr.io.port = perf->control_register.address; |
360 | cmd.addr.io.bit_width = perf->control_register.bit_width; | 360 | cmd.addr.io.bit_width = perf->control_register.bit_width; |
361 | break; | 361 | break; |
362 | default: | 362 | default: |
363 | return 0; | 363 | return 0; |
364 | } | 364 | } |
365 | 365 | ||
366 | cmd.mask = mask; | 366 | cmd.mask = mask; |
367 | drv_read(&cmd); | 367 | drv_read(&cmd); |
368 | 368 | ||
369 | pr_debug("get_cur_val = %u\n", cmd.val); | 369 | pr_debug("get_cur_val = %u\n", cmd.val); |
370 | 370 | ||
371 | return cmd.val; | 371 | return cmd.val; |
372 | } | 372 | } |
373 | 373 | ||
374 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | 374 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) |
375 | { | 375 | { |
376 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu); | 376 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu); |
377 | unsigned int freq; | 377 | unsigned int freq; |
378 | unsigned int cached_freq; | 378 | unsigned int cached_freq; |
379 | 379 | ||
380 | pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); | 380 | pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); |
381 | 381 | ||
382 | if (unlikely(data == NULL || | 382 | if (unlikely(data == NULL || |
383 | data->acpi_data == NULL || data->freq_table == NULL)) { | 383 | data->acpi_data == NULL || data->freq_table == NULL)) { |
384 | return 0; | 384 | return 0; |
385 | } | 385 | } |
386 | 386 | ||
387 | cached_freq = data->freq_table[data->acpi_data->state].frequency; | 387 | cached_freq = data->freq_table[data->acpi_data->state].frequency; |
388 | freq = extract_freq(get_cur_val(cpumask_of(cpu)), data); | 388 | freq = extract_freq(get_cur_val(cpumask_of(cpu)), data); |
389 | if (freq != cached_freq) { | 389 | if (freq != cached_freq) { |
390 | /* | 390 | /* |
391 | * The dreaded BIOS frequency change behind our back. | 391 | * The dreaded BIOS frequency change behind our back. |
392 | * Force set the frequency on next target call. | 392 | * Force set the frequency on next target call. |
393 | */ | 393 | */ |
394 | data->resume = 1; | 394 | data->resume = 1; |
395 | } | 395 | } |
396 | 396 | ||
397 | pr_debug("cur freq = %u\n", freq); | 397 | pr_debug("cur freq = %u\n", freq); |
398 | 398 | ||
399 | return freq; | 399 | return freq; |
400 | } | 400 | } |
401 | 401 | ||
402 | static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, | 402 | static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, |
403 | struct acpi_cpufreq_data *data) | 403 | struct acpi_cpufreq_data *data) |
404 | { | 404 | { |
405 | unsigned int cur_freq; | 405 | unsigned int cur_freq; |
406 | unsigned int i; | 406 | unsigned int i; |
407 | 407 | ||
408 | for (i = 0; i < 100; i++) { | 408 | for (i = 0; i < 100; i++) { |
409 | cur_freq = extract_freq(get_cur_val(mask), data); | 409 | cur_freq = extract_freq(get_cur_val(mask), data); |
410 | if (cur_freq == freq) | 410 | if (cur_freq == freq) |
411 | return 1; | 411 | return 1; |
412 | udelay(10); | 412 | udelay(10); |
413 | } | 413 | } |
414 | return 0; | 414 | return 0; |
415 | } | 415 | } |
416 | 416 | ||
417 | static int acpi_cpufreq_target(struct cpufreq_policy *policy, | 417 | static int acpi_cpufreq_target(struct cpufreq_policy *policy, |
418 | unsigned int target_freq, unsigned int relation) | 418 | unsigned int target_freq, unsigned int relation) |
419 | { | 419 | { |
420 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); | 420 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
421 | struct acpi_processor_performance *perf; | 421 | struct acpi_processor_performance *perf; |
422 | struct cpufreq_freqs freqs; | 422 | struct cpufreq_freqs freqs; |
423 | struct drv_cmd cmd; | 423 | struct drv_cmd cmd; |
424 | unsigned int next_state = 0; /* Index into freq_table */ | 424 | unsigned int next_state = 0; /* Index into freq_table */ |
425 | unsigned int next_perf_state = 0; /* Index into perf table */ | 425 | unsigned int next_perf_state = 0; /* Index into perf table */ |
426 | int result = 0; | 426 | int result = 0; |
427 | 427 | ||
428 | pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); | 428 | pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu); |
429 | 429 | ||
430 | if (unlikely(data == NULL || | 430 | if (unlikely(data == NULL || |
431 | data->acpi_data == NULL || data->freq_table == NULL)) { | 431 | data->acpi_data == NULL || data->freq_table == NULL)) { |
432 | return -ENODEV; | 432 | return -ENODEV; |
433 | } | 433 | } |
434 | 434 | ||
435 | perf = data->acpi_data; | 435 | perf = data->acpi_data; |
436 | result = cpufreq_frequency_table_target(policy, | 436 | result = cpufreq_frequency_table_target(policy, |
437 | data->freq_table, | 437 | data->freq_table, |
438 | target_freq, | 438 | target_freq, |
439 | relation, &next_state); | 439 | relation, &next_state); |
440 | if (unlikely(result)) { | 440 | if (unlikely(result)) { |
441 | result = -ENODEV; | 441 | result = -ENODEV; |
442 | goto out; | 442 | goto out; |
443 | } | 443 | } |
444 | 444 | ||
445 | next_perf_state = data->freq_table[next_state].index; | 445 | next_perf_state = data->freq_table[next_state].index; |
446 | if (perf->state == next_perf_state) { | 446 | if (perf->state == next_perf_state) { |
447 | if (unlikely(data->resume)) { | 447 | if (unlikely(data->resume)) { |
448 | pr_debug("Called after resume, resetting to P%d\n", | 448 | pr_debug("Called after resume, resetting to P%d\n", |
449 | next_perf_state); | 449 | next_perf_state); |
450 | data->resume = 0; | 450 | data->resume = 0; |
451 | } else { | 451 | } else { |
452 | pr_debug("Already at target state (P%d)\n", | 452 | pr_debug("Already at target state (P%d)\n", |
453 | next_perf_state); | 453 | next_perf_state); |
454 | goto out; | 454 | goto out; |
455 | } | 455 | } |
456 | } | 456 | } |
457 | 457 | ||
458 | switch (data->cpu_feature) { | 458 | switch (data->cpu_feature) { |
459 | case SYSTEM_INTEL_MSR_CAPABLE: | 459 | case SYSTEM_INTEL_MSR_CAPABLE: |
460 | cmd.type = SYSTEM_INTEL_MSR_CAPABLE; | 460 | cmd.type = SYSTEM_INTEL_MSR_CAPABLE; |
461 | cmd.addr.msr.reg = MSR_IA32_PERF_CTL; | 461 | cmd.addr.msr.reg = MSR_IA32_PERF_CTL; |
462 | cmd.val = (u32) perf->states[next_perf_state].control; | 462 | cmd.val = (u32) perf->states[next_perf_state].control; |
463 | break; | 463 | break; |
464 | case SYSTEM_AMD_MSR_CAPABLE: | 464 | case SYSTEM_AMD_MSR_CAPABLE: |
465 | cmd.type = SYSTEM_AMD_MSR_CAPABLE; | 465 | cmd.type = SYSTEM_AMD_MSR_CAPABLE; |
466 | cmd.addr.msr.reg = MSR_AMD_PERF_CTL; | 466 | cmd.addr.msr.reg = MSR_AMD_PERF_CTL; |
467 | cmd.val = (u32) perf->states[next_perf_state].control; | 467 | cmd.val = (u32) perf->states[next_perf_state].control; |
468 | break; | 468 | break; |
469 | case SYSTEM_IO_CAPABLE: | 469 | case SYSTEM_IO_CAPABLE: |
470 | cmd.type = SYSTEM_IO_CAPABLE; | 470 | cmd.type = SYSTEM_IO_CAPABLE; |
471 | cmd.addr.io.port = perf->control_register.address; | 471 | cmd.addr.io.port = perf->control_register.address; |
472 | cmd.addr.io.bit_width = perf->control_register.bit_width; | 472 | cmd.addr.io.bit_width = perf->control_register.bit_width; |
473 | cmd.val = (u32) perf->states[next_perf_state].control; | 473 | cmd.val = (u32) perf->states[next_perf_state].control; |
474 | break; | 474 | break; |
475 | default: | 475 | default: |
476 | result = -ENODEV; | 476 | result = -ENODEV; |
477 | goto out; | 477 | goto out; |
478 | } | 478 | } |
479 | 479 | ||
480 | /* cpufreq holds the hotplug lock, so we are safe from here on */ | 480 | /* cpufreq holds the hotplug lock, so we are safe from here on */ |
481 | if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) | 481 | if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) |
482 | cmd.mask = policy->cpus; | 482 | cmd.mask = policy->cpus; |
483 | else | 483 | else |
484 | cmd.mask = cpumask_of(policy->cpu); | 484 | cmd.mask = cpumask_of(policy->cpu); |
485 | 485 | ||
486 | freqs.old = perf->states[perf->state].core_frequency * 1000; | 486 | freqs.old = perf->states[perf->state].core_frequency * 1000; |
487 | freqs.new = data->freq_table[next_state].frequency; | 487 | freqs.new = data->freq_table[next_state].frequency; |
488 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 488 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); |
489 | 489 | ||
490 | drv_write(&cmd); | 490 | drv_write(&cmd); |
491 | 491 | ||
492 | if (acpi_pstate_strict) { | 492 | if (acpi_pstate_strict) { |
493 | if (!check_freqs(cmd.mask, freqs.new, data)) { | 493 | if (!check_freqs(cmd.mask, freqs.new, data)) { |
494 | pr_debug("acpi_cpufreq_target failed (%d)\n", | 494 | pr_debug("acpi_cpufreq_target failed (%d)\n", |
495 | policy->cpu); | 495 | policy->cpu); |
496 | result = -EAGAIN; | 496 | result = -EAGAIN; |
497 | goto out; | 497 | goto out; |
498 | } | 498 | } |
499 | } | 499 | } |
500 | 500 | ||
501 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 501 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); |
502 | perf->state = next_perf_state; | 502 | perf->state = next_perf_state; |
503 | 503 | ||
504 | out: | 504 | out: |
505 | return result; | 505 | return result; |
506 | } | 506 | } |
507 | 507 | ||
508 | static int acpi_cpufreq_verify(struct cpufreq_policy *policy) | 508 | static int acpi_cpufreq_verify(struct cpufreq_policy *policy) |
509 | { | 509 | { |
510 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); | 510 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
511 | 511 | ||
512 | pr_debug("acpi_cpufreq_verify\n"); | 512 | pr_debug("acpi_cpufreq_verify\n"); |
513 | 513 | ||
514 | return cpufreq_frequency_table_verify(policy, data->freq_table); | 514 | return cpufreq_frequency_table_verify(policy, data->freq_table); |
515 | } | 515 | } |
516 | 516 | ||
517 | static unsigned long | 517 | static unsigned long |
518 | acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) | 518 | acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) |
519 | { | 519 | { |
520 | struct acpi_processor_performance *perf = data->acpi_data; | 520 | struct acpi_processor_performance *perf = data->acpi_data; |
521 | 521 | ||
522 | if (cpu_khz) { | 522 | if (cpu_khz) { |
523 | /* search the closest match to cpu_khz */ | 523 | /* search the closest match to cpu_khz */ |
524 | unsigned int i; | 524 | unsigned int i; |
525 | unsigned long freq; | 525 | unsigned long freq; |
526 | unsigned long freqn = perf->states[0].core_frequency * 1000; | 526 | unsigned long freqn = perf->states[0].core_frequency * 1000; |
527 | 527 | ||
528 | for (i = 0; i < (perf->state_count-1); i++) { | 528 | for (i = 0; i < (perf->state_count-1); i++) { |
529 | freq = freqn; | 529 | freq = freqn; |
530 | freqn = perf->states[i+1].core_frequency * 1000; | 530 | freqn = perf->states[i+1].core_frequency * 1000; |
531 | if ((2 * cpu_khz) > (freqn + freq)) { | 531 | if ((2 * cpu_khz) > (freqn + freq)) { |
532 | perf->state = i; | 532 | perf->state = i; |
533 | return freq; | 533 | return freq; |
534 | } | 534 | } |
535 | } | 535 | } |
536 | perf->state = perf->state_count-1; | 536 | perf->state = perf->state_count-1; |
537 | return freqn; | 537 | return freqn; |
538 | } else { | 538 | } else { |
539 | /* assume CPU is at P0... */ | 539 | /* assume CPU is at P0... */ |
540 | perf->state = 0; | 540 | perf->state = 0; |
541 | return perf->states[0].core_frequency * 1000; | 541 | return perf->states[0].core_frequency * 1000; |
542 | } | 542 | } |
543 | } | 543 | } |
544 | 544 | ||
545 | static void free_acpi_perf_data(void) | 545 | static void free_acpi_perf_data(void) |
546 | { | 546 | { |
547 | unsigned int i; | 547 | unsigned int i; |
548 | 548 | ||
549 | /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ | 549 | /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ |
550 | for_each_possible_cpu(i) | 550 | for_each_possible_cpu(i) |
551 | free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) | 551 | free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) |
552 | ->shared_cpu_map); | 552 | ->shared_cpu_map); |
553 | free_percpu(acpi_perf_data); | 553 | free_percpu(acpi_perf_data); |
554 | } | 554 | } |
555 | 555 | ||
556 | static int boost_notify(struct notifier_block *nb, unsigned long action, | 556 | static int boost_notify(struct notifier_block *nb, unsigned long action, |
557 | void *hcpu) | 557 | void *hcpu) |
558 | { | 558 | { |
559 | unsigned cpu = (long)hcpu; | 559 | unsigned cpu = (long)hcpu; |
560 | const struct cpumask *cpumask; | 560 | const struct cpumask *cpumask; |
561 | 561 | ||
562 | cpumask = get_cpu_mask(cpu); | 562 | cpumask = get_cpu_mask(cpu); |
563 | 563 | ||
564 | /* | 564 | /* |
565 | * Clear the boost-disable bit on the CPU_DOWN path so that | 565 | * Clear the boost-disable bit on the CPU_DOWN path so that |
566 | * this cpu cannot block the remaining ones from boosting. On | 566 | * this cpu cannot block the remaining ones from boosting. On |
567 | * the CPU_UP path we simply keep the boost-disable flag in | 567 | * the CPU_UP path we simply keep the boost-disable flag in |
568 | * sync with the current global state. | 568 | * sync with the current global state. |
569 | */ | 569 | */ |
570 | 570 | ||
571 | switch (action) { | 571 | switch (action) { |
572 | case CPU_UP_PREPARE: | 572 | case CPU_UP_PREPARE: |
573 | case CPU_UP_PREPARE_FROZEN: | 573 | case CPU_UP_PREPARE_FROZEN: |
574 | boost_set_msrs(boost_enabled, cpumask); | 574 | boost_set_msrs(boost_enabled, cpumask); |
575 | break; | 575 | break; |
576 | 576 | ||
577 | case CPU_DOWN_PREPARE: | 577 | case CPU_DOWN_PREPARE: |
578 | case CPU_DOWN_PREPARE_FROZEN: | 578 | case CPU_DOWN_PREPARE_FROZEN: |
579 | boost_set_msrs(1, cpumask); | 579 | boost_set_msrs(1, cpumask); |
580 | break; | 580 | break; |
581 | 581 | ||
582 | default: | 582 | default: |
583 | break; | 583 | break; |
584 | } | 584 | } |
585 | 585 | ||
586 | return NOTIFY_OK; | 586 | return NOTIFY_OK; |
587 | } | 587 | } |
588 | 588 | ||
589 | 589 | ||
590 | static struct notifier_block boost_nb = { | 590 | static struct notifier_block boost_nb = { |
591 | .notifier_call = boost_notify, | 591 | .notifier_call = boost_notify, |
592 | }; | 592 | }; |
593 | 593 | ||
594 | /* | 594 | /* |
595 | * acpi_cpufreq_early_init - initialize ACPI P-States library | 595 | * acpi_cpufreq_early_init - initialize ACPI P-States library |
596 | * | 596 | * |
597 | * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c) | 597 | * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c) |
598 | * in order to determine correct frequency and voltage pairings. We can | 598 | * in order to determine correct frequency and voltage pairings. We can |
599 | * do _PDC and _PSD and find out the processor dependency for the | 599 | * do _PDC and _PSD and find out the processor dependency for the |
600 | * actual init that will happen later... | 600 | * actual init that will happen later... |
601 | */ | 601 | */ |
602 | static int __init acpi_cpufreq_early_init(void) | 602 | static int __init acpi_cpufreq_early_init(void) |
603 | { | 603 | { |
604 | unsigned int i; | 604 | unsigned int i; |
605 | pr_debug("acpi_cpufreq_early_init\n"); | 605 | pr_debug("acpi_cpufreq_early_init\n"); |
606 | 606 | ||
607 | acpi_perf_data = alloc_percpu(struct acpi_processor_performance); | 607 | acpi_perf_data = alloc_percpu(struct acpi_processor_performance); |
608 | if (!acpi_perf_data) { | 608 | if (!acpi_perf_data) { |
609 | pr_debug("Memory allocation error for acpi_perf_data.\n"); | 609 | pr_debug("Memory allocation error for acpi_perf_data.\n"); |
610 | return -ENOMEM; | 610 | return -ENOMEM; |
611 | } | 611 | } |
612 | for_each_possible_cpu(i) { | 612 | for_each_possible_cpu(i) { |
613 | if (!zalloc_cpumask_var_node( | 613 | if (!zalloc_cpumask_var_node( |
614 | &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, | 614 | &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, |
615 | GFP_KERNEL, cpu_to_node(i))) { | 615 | GFP_KERNEL, cpu_to_node(i))) { |
616 | 616 | ||
617 | /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ | 617 | /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ |
618 | free_acpi_perf_data(); | 618 | free_acpi_perf_data(); |
619 | return -ENOMEM; | 619 | return -ENOMEM; |
620 | } | 620 | } |
621 | } | 621 | } |
622 | 622 | ||
623 | /* Do initialization in ACPI core */ | 623 | /* Do initialization in ACPI core */ |
624 | acpi_processor_preregister_performance(acpi_perf_data); | 624 | acpi_processor_preregister_performance(acpi_perf_data); |
625 | return 0; | 625 | return 0; |
626 | } | 626 | } |
627 | 627 | ||
628 | #ifdef CONFIG_SMP | 628 | #ifdef CONFIG_SMP |
629 | /* | 629 | /* |
630 | * Some BIOSes do SW_ANY coordination internally, either set it up in hw | 630 | * Some BIOSes do SW_ANY coordination internally, either set it up in hw |
631 | * or do it in BIOS firmware and won't inform about it to OS. If not | 631 | * or do it in BIOS firmware and won't inform about it to OS. If not |
632 | * detected, this has a side effect of making CPU run at a different speed | 632 | * detected, this has a side effect of making CPU run at a different speed |
633 | * than OS intended it to run at. Detect it and handle it cleanly. | 633 | * than OS intended it to run at. Detect it and handle it cleanly. |
634 | */ | 634 | */ |
635 | static int bios_with_sw_any_bug; | 635 | static int bios_with_sw_any_bug; |
636 | 636 | ||
637 | static int sw_any_bug_found(const struct dmi_system_id *d) | 637 | static int sw_any_bug_found(const struct dmi_system_id *d) |
638 | { | 638 | { |
639 | bios_with_sw_any_bug = 1; | 639 | bios_with_sw_any_bug = 1; |
640 | return 0; | 640 | return 0; |
641 | } | 641 | } |
642 | 642 | ||
643 | static const struct dmi_system_id sw_any_bug_dmi_table[] = { | 643 | static const struct dmi_system_id sw_any_bug_dmi_table[] = { |
644 | { | 644 | { |
645 | .callback = sw_any_bug_found, | 645 | .callback = sw_any_bug_found, |
646 | .ident = "Supermicro Server X6DLP", | 646 | .ident = "Supermicro Server X6DLP", |
647 | .matches = { | 647 | .matches = { |
648 | DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), | 648 | DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), |
649 | DMI_MATCH(DMI_BIOS_VERSION, "080010"), | 649 | DMI_MATCH(DMI_BIOS_VERSION, "080010"), |
650 | DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"), | 650 | DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"), |
651 | }, | 651 | }, |
652 | }, | 652 | }, |
653 | { } | 653 | { } |
654 | }; | 654 | }; |
655 | 655 | ||
656 | static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) | 656 | static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) |
657 | { | 657 | { |
658 | /* Intel Xeon Processor 7100 Series Specification Update | 658 | /* Intel Xeon Processor 7100 Series Specification Update |
659 | * http://www.intel.com/Assets/PDF/specupdate/314554.pdf | 659 | * http://www.intel.com/Assets/PDF/specupdate/314554.pdf |
660 | * AL30: A Machine Check Exception (MCE) Occurring during an | 660 | * AL30: A Machine Check Exception (MCE) Occurring during an |
661 | * Enhanced Intel SpeedStep Technology Ratio Change May Cause | 661 | * Enhanced Intel SpeedStep Technology Ratio Change May Cause |
662 | * Both Processor Cores to Lock Up. */ | 662 | * Both Processor Cores to Lock Up. */ |
663 | if (c->x86_vendor == X86_VENDOR_INTEL) { | 663 | if (c->x86_vendor == X86_VENDOR_INTEL) { |
664 | if ((c->x86 == 15) && | 664 | if ((c->x86 == 15) && |
665 | (c->x86_model == 6) && | 665 | (c->x86_model == 6) && |
666 | (c->x86_mask == 8)) { | 666 | (c->x86_mask == 8)) { |
667 | printk(KERN_INFO "acpi-cpufreq: Intel(R) " | 667 | printk(KERN_INFO "acpi-cpufreq: Intel(R) " |
668 | "Xeon(R) 7100 Errata AL30, processors may " | 668 | "Xeon(R) 7100 Errata AL30, processors may " |
669 | "lock up on frequency changes: disabling " | 669 | "lock up on frequency changes: disabling " |
670 | "acpi-cpufreq.\n"); | 670 | "acpi-cpufreq.\n"); |
671 | return -ENODEV; | 671 | return -ENODEV; |
672 | } | 672 | } |
673 | } | 673 | } |
674 | return 0; | 674 | return 0; |
675 | } | 675 | } |
676 | #endif | 676 | #endif |
677 | 677 | ||
678 | static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | 678 | static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) |
679 | { | 679 | { |
680 | unsigned int i; | 680 | unsigned int i; |
681 | unsigned int valid_states = 0; | 681 | unsigned int valid_states = 0; |
682 | unsigned int cpu = policy->cpu; | 682 | unsigned int cpu = policy->cpu; |
683 | struct acpi_cpufreq_data *data; | 683 | struct acpi_cpufreq_data *data; |
684 | unsigned int result = 0; | 684 | unsigned int result = 0; |
685 | struct cpuinfo_x86 *c = &cpu_data(policy->cpu); | 685 | struct cpuinfo_x86 *c = &cpu_data(policy->cpu); |
686 | struct acpi_processor_performance *perf; | 686 | struct acpi_processor_performance *perf; |
687 | #ifdef CONFIG_SMP | 687 | #ifdef CONFIG_SMP |
688 | static int blacklisted; | 688 | static int blacklisted; |
689 | #endif | 689 | #endif |
690 | 690 | ||
691 | pr_debug("acpi_cpufreq_cpu_init\n"); | 691 | pr_debug("acpi_cpufreq_cpu_init\n"); |
692 | 692 | ||
693 | #ifdef CONFIG_SMP | 693 | #ifdef CONFIG_SMP |
694 | if (blacklisted) | 694 | if (blacklisted) |
695 | return blacklisted; | 695 | return blacklisted; |
696 | blacklisted = acpi_cpufreq_blacklist(c); | 696 | blacklisted = acpi_cpufreq_blacklist(c); |
697 | if (blacklisted) | 697 | if (blacklisted) |
698 | return blacklisted; | 698 | return blacklisted; |
699 | #endif | 699 | #endif |
700 | 700 | ||
701 | data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); | 701 | data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); |
702 | if (!data) | 702 | if (!data) |
703 | return -ENOMEM; | 703 | return -ENOMEM; |
704 | 704 | ||
705 | data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); | 705 | data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); |
706 | per_cpu(acfreq_data, cpu) = data; | 706 | per_cpu(acfreq_data, cpu) = data; |
707 | 707 | ||
708 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) | 708 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) |
709 | acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; | 709 | acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; |
710 | 710 | ||
711 | result = acpi_processor_register_performance(data->acpi_data, cpu); | 711 | result = acpi_processor_register_performance(data->acpi_data, cpu); |
712 | if (result) | 712 | if (result) |
713 | goto err_free; | 713 | goto err_free; |
714 | 714 | ||
715 | perf = data->acpi_data; | 715 | perf = data->acpi_data; |
716 | policy->shared_type = perf->shared_type; | 716 | policy->shared_type = perf->shared_type; |
717 | 717 | ||
718 | /* | 718 | /* |
719 | * Will let policy->cpus know about dependency only when software | 719 | * Will let policy->cpus know about dependency only when software |
720 | * coordination is required. | 720 | * coordination is required. |
721 | */ | 721 | */ |
722 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || | 722 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || |
723 | policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { | 723 | policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { |
724 | cpumask_copy(policy->cpus, perf->shared_cpu_map); | 724 | cpumask_copy(policy->cpus, perf->shared_cpu_map); |
725 | } | 725 | } |
726 | 726 | ||
727 | #ifdef CONFIG_SMP | 727 | #ifdef CONFIG_SMP |
728 | dmi_check_system(sw_any_bug_dmi_table); | 728 | dmi_check_system(sw_any_bug_dmi_table); |
729 | if (bios_with_sw_any_bug && !policy_is_shared(policy)) { | 729 | if (bios_with_sw_any_bug && !policy_is_shared(policy)) { |
730 | policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; | 730 | policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; |
731 | cpumask_copy(policy->cpus, cpu_core_mask(cpu)); | 731 | cpumask_copy(policy->cpus, cpu_core_mask(cpu)); |
732 | } | 732 | } |
733 | 733 | ||
734 | if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) { | 734 | if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) { |
735 | cpumask_clear(policy->cpus); | 735 | cpumask_clear(policy->cpus); |
736 | cpumask_set_cpu(cpu, policy->cpus); | 736 | cpumask_set_cpu(cpu, policy->cpus); |
737 | policy->shared_type = CPUFREQ_SHARED_TYPE_HW; | 737 | policy->shared_type = CPUFREQ_SHARED_TYPE_HW; |
738 | pr_info_once(PFX "overriding BIOS provided _PSD data\n"); | 738 | pr_info_once(PFX "overriding BIOS provided _PSD data\n"); |
739 | } | 739 | } |
740 | #endif | 740 | #endif |
741 | 741 | ||
742 | /* capability check */ | 742 | /* capability check */ |
743 | if (perf->state_count <= 1) { | 743 | if (perf->state_count <= 1) { |
744 | pr_debug("No P-States\n"); | 744 | pr_debug("No P-States\n"); |
745 | result = -ENODEV; | 745 | result = -ENODEV; |
746 | goto err_unreg; | 746 | goto err_unreg; |
747 | } | 747 | } |
748 | 748 | ||
749 | if (perf->control_register.space_id != perf->status_register.space_id) { | 749 | if (perf->control_register.space_id != perf->status_register.space_id) { |
750 | result = -ENODEV; | 750 | result = -ENODEV; |
751 | goto err_unreg; | 751 | goto err_unreg; |
752 | } | 752 | } |
753 | 753 | ||
754 | switch (perf->control_register.space_id) { | 754 | switch (perf->control_register.space_id) { |
755 | case ACPI_ADR_SPACE_SYSTEM_IO: | 755 | case ACPI_ADR_SPACE_SYSTEM_IO: |
756 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && | 756 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && |
757 | boot_cpu_data.x86 == 0xf) { | 757 | boot_cpu_data.x86 == 0xf) { |
758 | pr_debug("AMD K8 systems must use native drivers.\n"); | 758 | pr_debug("AMD K8 systems must use native drivers.\n"); |
759 | result = -ENODEV; | 759 | result = -ENODEV; |
760 | goto err_unreg; | 760 | goto err_unreg; |
761 | } | 761 | } |
762 | pr_debug("SYSTEM IO addr space\n"); | 762 | pr_debug("SYSTEM IO addr space\n"); |
763 | data->cpu_feature = SYSTEM_IO_CAPABLE; | 763 | data->cpu_feature = SYSTEM_IO_CAPABLE; |
764 | break; | 764 | break; |
765 | case ACPI_ADR_SPACE_FIXED_HARDWARE: | 765 | case ACPI_ADR_SPACE_FIXED_HARDWARE: |
766 | pr_debug("HARDWARE addr space\n"); | 766 | pr_debug("HARDWARE addr space\n"); |
767 | if (check_est_cpu(cpu)) { | 767 | if (check_est_cpu(cpu)) { |
768 | data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; | 768 | data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; |
769 | break; | 769 | break; |
770 | } | 770 | } |
771 | if (check_amd_hwpstate_cpu(cpu)) { | 771 | if (check_amd_hwpstate_cpu(cpu)) { |
772 | data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE; | 772 | data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE; |
773 | break; | 773 | break; |
774 | } | 774 | } |
775 | result = -ENODEV; | 775 | result = -ENODEV; |
776 | goto err_unreg; | 776 | goto err_unreg; |
777 | default: | 777 | default: |
778 | pr_debug("Unknown addr space %d\n", | 778 | pr_debug("Unknown addr space %d\n", |
779 | (u32) (perf->control_register.space_id)); | 779 | (u32) (perf->control_register.space_id)); |
780 | result = -ENODEV; | 780 | result = -ENODEV; |
781 | goto err_unreg; | 781 | goto err_unreg; |
782 | } | 782 | } |
783 | 783 | ||
784 | data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * | 784 | data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * |
785 | (perf->state_count+1), GFP_KERNEL); | 785 | (perf->state_count+1), GFP_KERNEL); |
786 | if (!data->freq_table) { | 786 | if (!data->freq_table) { |
787 | result = -ENOMEM; | 787 | result = -ENOMEM; |
788 | goto err_unreg; | 788 | goto err_unreg; |
789 | } | 789 | } |
790 | 790 | ||
791 | /* detect transition latency */ | 791 | /* detect transition latency */ |
792 | policy->cpuinfo.transition_latency = 0; | 792 | policy->cpuinfo.transition_latency = 0; |
793 | for (i = 0; i < perf->state_count; i++) { | 793 | for (i = 0; i < perf->state_count; i++) { |
794 | if ((perf->states[i].transition_latency * 1000) > | 794 | if ((perf->states[i].transition_latency * 1000) > |
795 | policy->cpuinfo.transition_latency) | 795 | policy->cpuinfo.transition_latency) |
796 | policy->cpuinfo.transition_latency = | 796 | policy->cpuinfo.transition_latency = |
797 | perf->states[i].transition_latency * 1000; | 797 | perf->states[i].transition_latency * 1000; |
798 | } | 798 | } |
799 | 799 | ||
800 | /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ | 800 | /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ |
801 | if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && | 801 | if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && |
802 | policy->cpuinfo.transition_latency > 20 * 1000) { | 802 | policy->cpuinfo.transition_latency > 20 * 1000) { |
803 | policy->cpuinfo.transition_latency = 20 * 1000; | 803 | policy->cpuinfo.transition_latency = 20 * 1000; |
804 | printk_once(KERN_INFO | 804 | printk_once(KERN_INFO |
805 | "P-state transition latency capped at 20 uS\n"); | 805 | "P-state transition latency capped at 20 uS\n"); |
806 | } | 806 | } |
807 | 807 | ||
808 | /* table init */ | 808 | /* table init */ |
809 | for (i = 0; i < perf->state_count; i++) { | 809 | for (i = 0; i < perf->state_count; i++) { |
810 | if (i > 0 && perf->states[i].core_frequency >= | 810 | if (i > 0 && perf->states[i].core_frequency >= |
811 | data->freq_table[valid_states-1].frequency / 1000) | 811 | data->freq_table[valid_states-1].frequency / 1000) |
812 | continue; | 812 | continue; |
813 | 813 | ||
814 | data->freq_table[valid_states].index = i; | 814 | data->freq_table[valid_states].index = i; |
815 | data->freq_table[valid_states].frequency = | 815 | data->freq_table[valid_states].frequency = |
816 | perf->states[i].core_frequency * 1000; | 816 | perf->states[i].core_frequency * 1000; |
817 | valid_states++; | 817 | valid_states++; |
818 | } | 818 | } |
819 | data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END; | 819 | data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END; |
820 | perf->state = 0; | 820 | perf->state = 0; |
821 | 821 | ||
822 | result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); | 822 | result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); |
823 | if (result) | 823 | if (result) |
824 | goto err_freqfree; | 824 | goto err_freqfree; |
825 | 825 | ||
826 | if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq) | 826 | if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq) |
827 | printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n"); | 827 | printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n"); |
828 | 828 | ||
829 | switch (perf->control_register.space_id) { | 829 | switch (perf->control_register.space_id) { |
830 | case ACPI_ADR_SPACE_SYSTEM_IO: | 830 | case ACPI_ADR_SPACE_SYSTEM_IO: |
831 | /* Current speed is unknown and not detectable by IO port */ | 831 | /* Current speed is unknown and not detectable by IO port */ |
832 | policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); | 832 | policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); |
833 | break; | 833 | break; |
834 | case ACPI_ADR_SPACE_FIXED_HARDWARE: | 834 | case ACPI_ADR_SPACE_FIXED_HARDWARE: |
835 | acpi_cpufreq_driver.get = get_cur_freq_on_cpu; | 835 | acpi_cpufreq_driver.get = get_cur_freq_on_cpu; |
836 | policy->cur = get_cur_freq_on_cpu(cpu); | 836 | policy->cur = get_cur_freq_on_cpu(cpu); |
837 | break; | 837 | break; |
838 | default: | 838 | default: |
839 | break; | 839 | break; |
840 | } | 840 | } |
841 | 841 | ||
842 | /* notify BIOS that we exist */ | 842 | /* notify BIOS that we exist */ |
843 | acpi_processor_notify_smm(THIS_MODULE); | 843 | acpi_processor_notify_smm(THIS_MODULE); |
844 | 844 | ||
845 | /* Check for APERF/MPERF support in hardware */ | 845 | /* Check for APERF/MPERF support in hardware */ |
846 | if (boot_cpu_has(X86_FEATURE_APERFMPERF)) | 846 | if (boot_cpu_has(X86_FEATURE_APERFMPERF)) |
847 | acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf; | 847 | acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf; |
848 | 848 | ||
849 | pr_debug("CPU%u - ACPI performance management activated.\n", cpu); | 849 | pr_debug("CPU%u - ACPI performance management activated.\n", cpu); |
850 | for (i = 0; i < perf->state_count; i++) | 850 | for (i = 0; i < perf->state_count; i++) |
851 | pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", | 851 | pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", |
852 | (i == perf->state ? '*' : ' '), i, | 852 | (i == perf->state ? '*' : ' '), i, |
853 | (u32) perf->states[i].core_frequency, | 853 | (u32) perf->states[i].core_frequency, |
854 | (u32) perf->states[i].power, | 854 | (u32) perf->states[i].power, |
855 | (u32) perf->states[i].transition_latency); | 855 | (u32) perf->states[i].transition_latency); |
856 | 856 | ||
857 | cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); | 857 | cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); |
858 | 858 | ||
859 | /* | 859 | /* |
860 | * the first call to ->target() should result in us actually | 860 | * the first call to ->target() should result in us actually |
861 | * writing something to the appropriate registers. | 861 | * writing something to the appropriate registers. |
862 | */ | 862 | */ |
863 | data->resume = 1; | 863 | data->resume = 1; |
864 | 864 | ||
865 | return result; | 865 | return result; |
866 | 866 | ||
867 | err_freqfree: | 867 | err_freqfree: |
868 | kfree(data->freq_table); | 868 | kfree(data->freq_table); |
869 | err_unreg: | 869 | err_unreg: |
870 | acpi_processor_unregister_performance(perf, cpu); | 870 | acpi_processor_unregister_performance(perf, cpu); |
871 | err_free: | 871 | err_free: |
872 | kfree(data); | 872 | kfree(data); |
873 | per_cpu(acfreq_data, cpu) = NULL; | 873 | per_cpu(acfreq_data, cpu) = NULL; |
874 | 874 | ||
875 | return result; | 875 | return result; |
876 | } | 876 | } |
877 | 877 | ||
878 | static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) | 878 | static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) |
879 | { | 879 | { |
880 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); | 880 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
881 | 881 | ||
882 | pr_debug("acpi_cpufreq_cpu_exit\n"); | 882 | pr_debug("acpi_cpufreq_cpu_exit\n"); |
883 | 883 | ||
884 | if (data) { | 884 | if (data) { |
885 | cpufreq_frequency_table_put_attr(policy->cpu); | 885 | cpufreq_frequency_table_put_attr(policy->cpu); |
886 | per_cpu(acfreq_data, policy->cpu) = NULL; | 886 | per_cpu(acfreq_data, policy->cpu) = NULL; |
887 | acpi_processor_unregister_performance(data->acpi_data, | 887 | acpi_processor_unregister_performance(data->acpi_data, |
888 | policy->cpu); | 888 | policy->cpu); |
889 | kfree(data->freq_table); | 889 | kfree(data->freq_table); |
890 | kfree(data); | 890 | kfree(data); |
891 | } | 891 | } |
892 | 892 | ||
893 | return 0; | 893 | return 0; |
894 | } | 894 | } |
895 | 895 | ||
896 | static int acpi_cpufreq_resume(struct cpufreq_policy *policy) | 896 | static int acpi_cpufreq_resume(struct cpufreq_policy *policy) |
897 | { | 897 | { |
898 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); | 898 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
899 | 899 | ||
900 | pr_debug("acpi_cpufreq_resume\n"); | 900 | pr_debug("acpi_cpufreq_resume\n"); |
901 | 901 | ||
902 | data->resume = 1; | 902 | data->resume = 1; |
903 | 903 | ||
904 | return 0; | 904 | return 0; |
905 | } | 905 | } |
906 | 906 | ||
907 | static struct freq_attr *acpi_cpufreq_attr[] = { | 907 | static struct freq_attr *acpi_cpufreq_attr[] = { |
908 | &cpufreq_freq_attr_scaling_available_freqs, | 908 | &cpufreq_freq_attr_scaling_available_freqs, |
909 | NULL, /* this is a placeholder for cpb, do not remove */ | 909 | NULL, /* this is a placeholder for cpb, do not remove */ |
910 | NULL, | 910 | NULL, |
911 | }; | 911 | }; |
912 | 912 | ||
913 | static struct cpufreq_driver acpi_cpufreq_driver = { | 913 | static struct cpufreq_driver acpi_cpufreq_driver = { |
914 | .verify = acpi_cpufreq_verify, | 914 | .verify = acpi_cpufreq_verify, |
915 | .target = acpi_cpufreq_target, | 915 | .target = acpi_cpufreq_target, |
916 | .bios_limit = acpi_processor_get_bios_limit, | 916 | .bios_limit = acpi_processor_get_bios_limit, |
917 | .init = acpi_cpufreq_cpu_init, | 917 | .init = acpi_cpufreq_cpu_init, |
918 | .exit = acpi_cpufreq_cpu_exit, | 918 | .exit = acpi_cpufreq_cpu_exit, |
919 | .resume = acpi_cpufreq_resume, | 919 | .resume = acpi_cpufreq_resume, |
920 | .name = "acpi-cpufreq", | 920 | .name = "acpi-cpufreq", |
921 | .owner = THIS_MODULE, | 921 | .owner = THIS_MODULE, |
922 | .attr = acpi_cpufreq_attr, | 922 | .attr = acpi_cpufreq_attr, |
923 | }; | 923 | }; |
924 | 924 | ||
925 | static void __init acpi_cpufreq_boost_init(void) | 925 | static void __init acpi_cpufreq_boost_init(void) |
926 | { | 926 | { |
927 | if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) { | 927 | if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) { |
928 | msrs = msrs_alloc(); | 928 | msrs = msrs_alloc(); |
929 | 929 | ||
930 | if (!msrs) | 930 | if (!msrs) |
931 | return; | 931 | return; |
932 | 932 | ||
933 | boost_supported = true; | 933 | boost_supported = true; |
934 | boost_enabled = boost_state(0); | 934 | boost_enabled = boost_state(0); |
935 | 935 | ||
936 | get_online_cpus(); | 936 | get_online_cpus(); |
937 | 937 | ||
938 | /* Force all MSRs to the same value */ | 938 | /* Force all MSRs to the same value */ |
939 | boost_set_msrs(boost_enabled, cpu_online_mask); | 939 | boost_set_msrs(boost_enabled, cpu_online_mask); |
940 | 940 | ||
941 | register_cpu_notifier(&boost_nb); | 941 | register_cpu_notifier(&boost_nb); |
942 | 942 | ||
943 | put_online_cpus(); | 943 | put_online_cpus(); |
944 | } else | 944 | } else |
945 | global_boost.attr.mode = 0444; | 945 | global_boost.attr.mode = 0444; |
946 | 946 | ||
947 | /* We create the boost file in any case, though for systems without | 947 | /* We create the boost file in any case, though for systems without |
948 | * hardware support it will be read-only and hardwired to return 0. | 948 | * hardware support it will be read-only and hardwired to return 0. |
949 | */ | 949 | */ |
950 | if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr))) | 950 | if (cpufreq_sysfs_create_file(&(global_boost.attr))) |
951 | pr_warn(PFX "could not register global boost sysfs file\n"); | 951 | pr_warn(PFX "could not register global boost sysfs file\n"); |
952 | else | 952 | else |
953 | pr_debug("registered global boost sysfs file\n"); | 953 | pr_debug("registered global boost sysfs file\n"); |
954 | } | 954 | } |
955 | 955 | ||
956 | static void __exit acpi_cpufreq_boost_exit(void) | 956 | static void __exit acpi_cpufreq_boost_exit(void) |
957 | { | 957 | { |
958 | sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr)); | 958 | cpufreq_sysfs_remove_file(&(global_boost.attr)); |
959 | 959 | ||
960 | if (msrs) { | 960 | if (msrs) { |
961 | unregister_cpu_notifier(&boost_nb); | 961 | unregister_cpu_notifier(&boost_nb); |
962 | 962 | ||
963 | msrs_free(msrs); | 963 | msrs_free(msrs); |
964 | msrs = NULL; | 964 | msrs = NULL; |
965 | } | 965 | } |
966 | } | 966 | } |
967 | 967 | ||
968 | static int __init acpi_cpufreq_init(void) | 968 | static int __init acpi_cpufreq_init(void) |
969 | { | 969 | { |
970 | int ret; | 970 | int ret; |
971 | 971 | ||
972 | if (acpi_disabled) | 972 | if (acpi_disabled) |
973 | return 0; | 973 | return 0; |
974 | 974 | ||
975 | pr_debug("acpi_cpufreq_init\n"); | 975 | pr_debug("acpi_cpufreq_init\n"); |
976 | 976 | ||
977 | ret = acpi_cpufreq_early_init(); | 977 | ret = acpi_cpufreq_early_init(); |
978 | if (ret) | 978 | if (ret) |
979 | return ret; | 979 | return ret; |
980 | 980 | ||
981 | #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB | 981 | #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB |
982 | /* this is a sysfs file with a strange name and an even stranger | 982 | /* this is a sysfs file with a strange name and an even stranger |
983 | * semantic - per CPU instantiation, but system global effect. | 983 | * semantic - per CPU instantiation, but system global effect. |
984 | * Lets enable it only on AMD CPUs for compatibility reasons and | 984 | * Lets enable it only on AMD CPUs for compatibility reasons and |
985 | * only if configured. This is considered legacy code, which | 985 | * only if configured. This is considered legacy code, which |
986 | * will probably be removed at some point in the future. | 986 | * will probably be removed at some point in the future. |
987 | */ | 987 | */ |
988 | if (check_amd_hwpstate_cpu(0)) { | 988 | if (check_amd_hwpstate_cpu(0)) { |
989 | struct freq_attr **iter; | 989 | struct freq_attr **iter; |
990 | 990 | ||
991 | pr_debug("adding sysfs entry for cpb\n"); | 991 | pr_debug("adding sysfs entry for cpb\n"); |
992 | 992 | ||
993 | for (iter = acpi_cpufreq_attr; *iter != NULL; iter++) | 993 | for (iter = acpi_cpufreq_attr; *iter != NULL; iter++) |
994 | ; | 994 | ; |
995 | 995 | ||
996 | /* make sure there is a terminator behind it */ | 996 | /* make sure there is a terminator behind it */ |
997 | if (iter[1] == NULL) | 997 | if (iter[1] == NULL) |
998 | *iter = &cpb; | 998 | *iter = &cpb; |
999 | } | 999 | } |
1000 | #endif | 1000 | #endif |
1001 | 1001 | ||
1002 | ret = cpufreq_register_driver(&acpi_cpufreq_driver); | 1002 | ret = cpufreq_register_driver(&acpi_cpufreq_driver); |
1003 | if (ret) | 1003 | if (ret) |
1004 | free_acpi_perf_data(); | 1004 | free_acpi_perf_data(); |
1005 | else | 1005 | else |
1006 | acpi_cpufreq_boost_init(); | 1006 | acpi_cpufreq_boost_init(); |
1007 | 1007 | ||
1008 | return ret; | 1008 | return ret; |
1009 | } | 1009 | } |
1010 | 1010 | ||
1011 | static void __exit acpi_cpufreq_exit(void) | 1011 | static void __exit acpi_cpufreq_exit(void) |
1012 | { | 1012 | { |
1013 | pr_debug("acpi_cpufreq_exit\n"); | 1013 | pr_debug("acpi_cpufreq_exit\n"); |
1014 | 1014 | ||
1015 | acpi_cpufreq_boost_exit(); | 1015 | acpi_cpufreq_boost_exit(); |
1016 | 1016 | ||
1017 | cpufreq_unregister_driver(&acpi_cpufreq_driver); | 1017 | cpufreq_unregister_driver(&acpi_cpufreq_driver); |
1018 | 1018 | ||
1019 | free_acpi_perf_data(); | 1019 | free_acpi_perf_data(); |
1020 | } | 1020 | } |
1021 | 1021 | ||
1022 | module_param(acpi_pstate_strict, uint, 0644); | 1022 | module_param(acpi_pstate_strict, uint, 0644); |
1023 | MODULE_PARM_DESC(acpi_pstate_strict, | 1023 | MODULE_PARM_DESC(acpi_pstate_strict, |
1024 | "value 0 or non-zero. non-zero -> strict ACPI checks are " | 1024 | "value 0 or non-zero. non-zero -> strict ACPI checks are " |
1025 | "performed during frequency changes."); | 1025 | "performed during frequency changes."); |
1026 | 1026 | ||
1027 | late_initcall(acpi_cpufreq_init); | 1027 | late_initcall(acpi_cpufreq_init); |
1028 | module_exit(acpi_cpufreq_exit); | 1028 | module_exit(acpi_cpufreq_exit); |
1029 | 1029 | ||
1030 | static const struct x86_cpu_id acpi_cpufreq_ids[] = { | 1030 | static const struct x86_cpu_id acpi_cpufreq_ids[] = { |
1031 | X86_FEATURE_MATCH(X86_FEATURE_ACPI), | 1031 | X86_FEATURE_MATCH(X86_FEATURE_ACPI), |
1032 | X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE), | 1032 | X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE), |
1033 | {} | 1033 | {} |
1034 | }; | 1034 | }; |
1035 | MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids); | 1035 | MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids); |
1036 | 1036 | ||
1037 | MODULE_ALIAS("acpi"); | 1037 | MODULE_ALIAS("acpi"); |
1038 | 1038 |
drivers/cpufreq/cpufreq.c
1 | /* | 1 | /* |
2 | * linux/drivers/cpufreq/cpufreq.c | 2 | * linux/drivers/cpufreq/cpufreq.c |
3 | * | 3 | * |
4 | * Copyright (C) 2001 Russell King | 4 | * Copyright (C) 2001 Russell King |
5 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> | 5 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> |
6 | * | 6 | * |
7 | * Oct 2005 - Ashok Raj <ashok.raj@intel.com> | 7 | * Oct 2005 - Ashok Raj <ashok.raj@intel.com> |
8 | * Added handling for CPU hotplug | 8 | * Added handling for CPU hotplug |
9 | * Feb 2006 - Jacob Shin <jacob.shin@amd.com> | 9 | * Feb 2006 - Jacob Shin <jacob.shin@amd.com> |
10 | * Fix handling for CPU hotplug -- affected CPUs | 10 | * Fix handling for CPU hotplug -- affected CPUs |
11 | * | 11 | * |
12 | * This program is free software; you can redistribute it and/or modify | 12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License version 2 as | 13 | * it under the terms of the GNU General Public License version 2 as |
14 | * published by the Free Software Foundation. | 14 | * published by the Free Software Foundation. |
15 | * | 15 | * |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
19 | 19 | ||
20 | #include <asm/cputime.h> | 20 | #include <asm/cputime.h> |
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/kernel_stat.h> | 22 | #include <linux/kernel_stat.h> |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/notifier.h> | 25 | #include <linux/notifier.h> |
26 | #include <linux/cpufreq.h> | 26 | #include <linux/cpufreq.h> |
27 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
28 | #include <linux/interrupt.h> | 28 | #include <linux/interrupt.h> |
29 | #include <linux/spinlock.h> | 29 | #include <linux/spinlock.h> |
30 | #include <linux/tick.h> | 30 | #include <linux/tick.h> |
31 | #include <linux/device.h> | 31 | #include <linux/device.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/cpu.h> | 33 | #include <linux/cpu.h> |
34 | #include <linux/completion.h> | 34 | #include <linux/completion.h> |
35 | #include <linux/mutex.h> | 35 | #include <linux/mutex.h> |
36 | #include <linux/syscore_ops.h> | 36 | #include <linux/syscore_ops.h> |
37 | 37 | ||
38 | #include <trace/events/power.h> | 38 | #include <trace/events/power.h> |
39 | 39 | ||
40 | /** | 40 | /** |
41 | * The "cpufreq driver" - the arch- or hardware-dependent low | 41 | * The "cpufreq driver" - the arch- or hardware-dependent low |
42 | * level driver of CPUFreq support, and its spinlock. This lock | 42 | * level driver of CPUFreq support, and its spinlock. This lock |
43 | * also protects the cpufreq_cpu_data array. | 43 | * also protects the cpufreq_cpu_data array. |
44 | */ | 44 | */ |
45 | static struct cpufreq_driver *cpufreq_driver; | 45 | static struct cpufreq_driver *cpufreq_driver; |
46 | static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); | 46 | static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); |
47 | #ifdef CONFIG_HOTPLUG_CPU | 47 | #ifdef CONFIG_HOTPLUG_CPU |
48 | /* This one keeps track of the previously set governor of a removed CPU */ | 48 | /* This one keeps track of the previously set governor of a removed CPU */ |
49 | static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); | 49 | static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); |
50 | #endif | 50 | #endif |
51 | static DEFINE_RWLOCK(cpufreq_driver_lock); | 51 | static DEFINE_RWLOCK(cpufreq_driver_lock); |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure | 54 | * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure |
55 | * all cpufreq/hotplug/workqueue/etc related lock issues. | 55 | * all cpufreq/hotplug/workqueue/etc related lock issues. |
56 | * | 56 | * |
57 | * The rules for this semaphore: | 57 | * The rules for this semaphore: |
58 | * - Any routine that wants to read from the policy structure will | 58 | * - Any routine that wants to read from the policy structure will |
59 | * do a down_read on this semaphore. | 59 | * do a down_read on this semaphore. |
60 | * - Any routine that will write to the policy structure and/or may take away | 60 | * - Any routine that will write to the policy structure and/or may take away |
61 | * the policy altogether (eg. CPU hotplug), will hold this lock in write | 61 | * the policy altogether (eg. CPU hotplug), will hold this lock in write |
62 | * mode before doing so. | 62 | * mode before doing so. |
63 | * | 63 | * |
64 | * Additional rules: | 64 | * Additional rules: |
65 | * - Governor routines that can be called in cpufreq hotplug path should not | 65 | * - Governor routines that can be called in cpufreq hotplug path should not |
66 | * take this sem as top level hotplug notifier handler takes this. | 66 | * take this sem as top level hotplug notifier handler takes this. |
67 | * - Lock should not be held across | 67 | * - Lock should not be held across |
68 | * __cpufreq_governor(data, CPUFREQ_GOV_STOP); | 68 | * __cpufreq_governor(data, CPUFREQ_GOV_STOP); |
69 | */ | 69 | */ |
70 | static DEFINE_PER_CPU(int, cpufreq_policy_cpu); | 70 | static DEFINE_PER_CPU(int, cpufreq_policy_cpu); |
71 | static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); | 71 | static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); |
72 | 72 | ||
73 | #define lock_policy_rwsem(mode, cpu) \ | 73 | #define lock_policy_rwsem(mode, cpu) \ |
74 | static int lock_policy_rwsem_##mode(int cpu) \ | 74 | static int lock_policy_rwsem_##mode(int cpu) \ |
75 | { \ | 75 | { \ |
76 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ | 76 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ |
77 | BUG_ON(policy_cpu == -1); \ | 77 | BUG_ON(policy_cpu == -1); \ |
78 | down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ | 78 | down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ |
79 | \ | 79 | \ |
80 | return 0; \ | 80 | return 0; \ |
81 | } | 81 | } |
82 | 82 | ||
83 | lock_policy_rwsem(read, cpu); | 83 | lock_policy_rwsem(read, cpu); |
84 | lock_policy_rwsem(write, cpu); | 84 | lock_policy_rwsem(write, cpu); |
85 | 85 | ||
86 | #define unlock_policy_rwsem(mode, cpu) \ | 86 | #define unlock_policy_rwsem(mode, cpu) \ |
87 | static void unlock_policy_rwsem_##mode(int cpu) \ | 87 | static void unlock_policy_rwsem_##mode(int cpu) \ |
88 | { \ | 88 | { \ |
89 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ | 89 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ |
90 | BUG_ON(policy_cpu == -1); \ | 90 | BUG_ON(policy_cpu == -1); \ |
91 | up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ | 91 | up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ |
92 | } | 92 | } |
93 | 93 | ||
94 | unlock_policy_rwsem(read, cpu); | 94 | unlock_policy_rwsem(read, cpu); |
95 | unlock_policy_rwsem(write, cpu); | 95 | unlock_policy_rwsem(write, cpu); |
96 | 96 | ||
97 | /* internal prototypes */ | 97 | /* internal prototypes */ |
98 | static int __cpufreq_governor(struct cpufreq_policy *policy, | 98 | static int __cpufreq_governor(struct cpufreq_policy *policy, |
99 | unsigned int event); | 99 | unsigned int event); |
100 | static unsigned int __cpufreq_get(unsigned int cpu); | 100 | static unsigned int __cpufreq_get(unsigned int cpu); |
101 | static void handle_update(struct work_struct *work); | 101 | static void handle_update(struct work_struct *work); |
102 | 102 | ||
103 | /** | 103 | /** |
104 | * Two notifier lists: the "policy" list is involved in the | 104 | * Two notifier lists: the "policy" list is involved in the |
105 | * validation process for a new CPU frequency policy; the | 105 | * validation process for a new CPU frequency policy; the |
106 | * "transition" list for kernel code that needs to handle | 106 | * "transition" list for kernel code that needs to handle |
107 | * changes to devices when the CPU clock speed changes. | 107 | * changes to devices when the CPU clock speed changes. |
108 | * The mutex locks both lists. | 108 | * The mutex locks both lists. |
109 | */ | 109 | */ |
110 | static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list); | 110 | static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list); |
111 | static struct srcu_notifier_head cpufreq_transition_notifier_list; | 111 | static struct srcu_notifier_head cpufreq_transition_notifier_list; |
112 | 112 | ||
113 | static bool init_cpufreq_transition_notifier_list_called; | 113 | static bool init_cpufreq_transition_notifier_list_called; |
114 | static int __init init_cpufreq_transition_notifier_list(void) | 114 | static int __init init_cpufreq_transition_notifier_list(void) |
115 | { | 115 | { |
116 | srcu_init_notifier_head(&cpufreq_transition_notifier_list); | 116 | srcu_init_notifier_head(&cpufreq_transition_notifier_list); |
117 | init_cpufreq_transition_notifier_list_called = true; | 117 | init_cpufreq_transition_notifier_list_called = true; |
118 | return 0; | 118 | return 0; |
119 | } | 119 | } |
120 | pure_initcall(init_cpufreq_transition_notifier_list); | 120 | pure_initcall(init_cpufreq_transition_notifier_list); |
121 | 121 | ||
122 | static int off __read_mostly; | 122 | static int off __read_mostly; |
123 | static int cpufreq_disabled(void) | 123 | static int cpufreq_disabled(void) |
124 | { | 124 | { |
125 | return off; | 125 | return off; |
126 | } | 126 | } |
127 | void disable_cpufreq(void) | 127 | void disable_cpufreq(void) |
128 | { | 128 | { |
129 | off = 1; | 129 | off = 1; |
130 | } | 130 | } |
131 | static LIST_HEAD(cpufreq_governor_list); | 131 | static LIST_HEAD(cpufreq_governor_list); |
132 | static DEFINE_MUTEX(cpufreq_governor_mutex); | 132 | static DEFINE_MUTEX(cpufreq_governor_mutex); |
133 | 133 | ||
134 | bool have_governor_per_policy(void) | 134 | bool have_governor_per_policy(void) |
135 | { | 135 | { |
136 | return cpufreq_driver->have_governor_per_policy; | 136 | return cpufreq_driver->have_governor_per_policy; |
137 | } | 137 | } |
138 | EXPORT_SYMBOL_GPL(have_governor_per_policy); | 138 | EXPORT_SYMBOL_GPL(have_governor_per_policy); |
139 | 139 | ||
140 | struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) | 140 | struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) |
141 | { | 141 | { |
142 | if (have_governor_per_policy()) | 142 | if (have_governor_per_policy()) |
143 | return &policy->kobj; | 143 | return &policy->kobj; |
144 | else | 144 | else |
145 | return cpufreq_global_kobject; | 145 | return cpufreq_global_kobject; |
146 | } | 146 | } |
147 | EXPORT_SYMBOL_GPL(get_governor_parent_kobj); | 147 | EXPORT_SYMBOL_GPL(get_governor_parent_kobj); |
148 | 148 | ||
149 | static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) | 149 | static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) |
150 | { | 150 | { |
151 | u64 idle_time; | 151 | u64 idle_time; |
152 | u64 cur_wall_time; | 152 | u64 cur_wall_time; |
153 | u64 busy_time; | 153 | u64 busy_time; |
154 | 154 | ||
155 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); | 155 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); |
156 | 156 | ||
157 | busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; | 157 | busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; |
158 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; | 158 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; |
159 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; | 159 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; |
160 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; | 160 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; |
161 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; | 161 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; |
162 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; | 162 | busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; |
163 | 163 | ||
164 | idle_time = cur_wall_time - busy_time; | 164 | idle_time = cur_wall_time - busy_time; |
165 | if (wall) | 165 | if (wall) |
166 | *wall = cputime_to_usecs(cur_wall_time); | 166 | *wall = cputime_to_usecs(cur_wall_time); |
167 | 167 | ||
168 | return cputime_to_usecs(idle_time); | 168 | return cputime_to_usecs(idle_time); |
169 | } | 169 | } |
170 | 170 | ||
171 | u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) | 171 | u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) |
172 | { | 172 | { |
173 | u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL); | 173 | u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL); |
174 | 174 | ||
175 | if (idle_time == -1ULL) | 175 | if (idle_time == -1ULL) |
176 | return get_cpu_idle_time_jiffy(cpu, wall); | 176 | return get_cpu_idle_time_jiffy(cpu, wall); |
177 | else if (!io_busy) | 177 | else if (!io_busy) |
178 | idle_time += get_cpu_iowait_time_us(cpu, wall); | 178 | idle_time += get_cpu_iowait_time_us(cpu, wall); |
179 | 179 | ||
180 | return idle_time; | 180 | return idle_time; |
181 | } | 181 | } |
182 | EXPORT_SYMBOL_GPL(get_cpu_idle_time); | 182 | EXPORT_SYMBOL_GPL(get_cpu_idle_time); |
183 | 183 | ||
184 | static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs) | 184 | static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs) |
185 | { | 185 | { |
186 | struct cpufreq_policy *data; | 186 | struct cpufreq_policy *data; |
187 | unsigned long flags; | 187 | unsigned long flags; |
188 | 188 | ||
189 | if (cpu >= nr_cpu_ids) | 189 | if (cpu >= nr_cpu_ids) |
190 | goto err_out; | 190 | goto err_out; |
191 | 191 | ||
192 | /* get the cpufreq driver */ | 192 | /* get the cpufreq driver */ |
193 | read_lock_irqsave(&cpufreq_driver_lock, flags); | 193 | read_lock_irqsave(&cpufreq_driver_lock, flags); |
194 | 194 | ||
195 | if (!cpufreq_driver) | 195 | if (!cpufreq_driver) |
196 | goto err_out_unlock; | 196 | goto err_out_unlock; |
197 | 197 | ||
198 | if (!try_module_get(cpufreq_driver->owner)) | 198 | if (!try_module_get(cpufreq_driver->owner)) |
199 | goto err_out_unlock; | 199 | goto err_out_unlock; |
200 | 200 | ||
201 | 201 | ||
202 | /* get the CPU */ | 202 | /* get the CPU */ |
203 | data = per_cpu(cpufreq_cpu_data, cpu); | 203 | data = per_cpu(cpufreq_cpu_data, cpu); |
204 | 204 | ||
205 | if (!data) | 205 | if (!data) |
206 | goto err_out_put_module; | 206 | goto err_out_put_module; |
207 | 207 | ||
208 | if (!sysfs && !kobject_get(&data->kobj)) | 208 | if (!sysfs && !kobject_get(&data->kobj)) |
209 | goto err_out_put_module; | 209 | goto err_out_put_module; |
210 | 210 | ||
211 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); | 211 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); |
212 | return data; | 212 | return data; |
213 | 213 | ||
214 | err_out_put_module: | 214 | err_out_put_module: |
215 | module_put(cpufreq_driver->owner); | 215 | module_put(cpufreq_driver->owner); |
216 | err_out_unlock: | 216 | err_out_unlock: |
217 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); | 217 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); |
218 | err_out: | 218 | err_out: |
219 | return NULL; | 219 | return NULL; |
220 | } | 220 | } |
221 | 221 | ||
222 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) | 222 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) |
223 | { | 223 | { |
224 | if (cpufreq_disabled()) | 224 | if (cpufreq_disabled()) |
225 | return NULL; | 225 | return NULL; |
226 | 226 | ||
227 | return __cpufreq_cpu_get(cpu, false); | 227 | return __cpufreq_cpu_get(cpu, false); |
228 | } | 228 | } |
229 | EXPORT_SYMBOL_GPL(cpufreq_cpu_get); | 229 | EXPORT_SYMBOL_GPL(cpufreq_cpu_get); |
230 | 230 | ||
231 | static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu) | 231 | static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu) |
232 | { | 232 | { |
233 | return __cpufreq_cpu_get(cpu, true); | 233 | return __cpufreq_cpu_get(cpu, true); |
234 | } | 234 | } |
235 | 235 | ||
236 | static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs) | 236 | static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs) |
237 | { | 237 | { |
238 | if (!sysfs) | 238 | if (!sysfs) |
239 | kobject_put(&data->kobj); | 239 | kobject_put(&data->kobj); |
240 | module_put(cpufreq_driver->owner); | 240 | module_put(cpufreq_driver->owner); |
241 | } | 241 | } |
242 | 242 | ||
243 | void cpufreq_cpu_put(struct cpufreq_policy *data) | 243 | void cpufreq_cpu_put(struct cpufreq_policy *data) |
244 | { | 244 | { |
245 | if (cpufreq_disabled()) | 245 | if (cpufreq_disabled()) |
246 | return; | 246 | return; |
247 | 247 | ||
248 | __cpufreq_cpu_put(data, false); | 248 | __cpufreq_cpu_put(data, false); |
249 | } | 249 | } |
250 | EXPORT_SYMBOL_GPL(cpufreq_cpu_put); | 250 | EXPORT_SYMBOL_GPL(cpufreq_cpu_put); |
251 | 251 | ||
252 | static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data) | 252 | static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data) |
253 | { | 253 | { |
254 | __cpufreq_cpu_put(data, true); | 254 | __cpufreq_cpu_put(data, true); |
255 | } | 255 | } |
256 | 256 | ||
257 | /********************************************************************* | 257 | /********************************************************************* |
258 | * EXTERNALLY AFFECTING FREQUENCY CHANGES * | 258 | * EXTERNALLY AFFECTING FREQUENCY CHANGES * |
259 | *********************************************************************/ | 259 | *********************************************************************/ |
260 | 260 | ||
261 | /** | 261 | /** |
262 | * adjust_jiffies - adjust the system "loops_per_jiffy" | 262 | * adjust_jiffies - adjust the system "loops_per_jiffy" |
263 | * | 263 | * |
264 | * This function alters the system "loops_per_jiffy" for the clock | 264 | * This function alters the system "loops_per_jiffy" for the clock |
265 | * speed change. Note that loops_per_jiffy cannot be updated on SMP | 265 | * speed change. Note that loops_per_jiffy cannot be updated on SMP |
266 | * systems as each CPU might be scaled differently. So, use the arch | 266 | * systems as each CPU might be scaled differently. So, use the arch |
267 | * per-CPU loops_per_jiffy value wherever possible. | 267 | * per-CPU loops_per_jiffy value wherever possible. |
268 | */ | 268 | */ |
269 | #ifndef CONFIG_SMP | 269 | #ifndef CONFIG_SMP |
270 | static unsigned long l_p_j_ref; | 270 | static unsigned long l_p_j_ref; |
271 | static unsigned int l_p_j_ref_freq; | 271 | static unsigned int l_p_j_ref_freq; |
272 | 272 | ||
273 | static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) | 273 | static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) |
274 | { | 274 | { |
275 | if (ci->flags & CPUFREQ_CONST_LOOPS) | 275 | if (ci->flags & CPUFREQ_CONST_LOOPS) |
276 | return; | 276 | return; |
277 | 277 | ||
278 | if (!l_p_j_ref_freq) { | 278 | if (!l_p_j_ref_freq) { |
279 | l_p_j_ref = loops_per_jiffy; | 279 | l_p_j_ref = loops_per_jiffy; |
280 | l_p_j_ref_freq = ci->old; | 280 | l_p_j_ref_freq = ci->old; |
281 | pr_debug("saving %lu as reference value for loops_per_jiffy; " | 281 | pr_debug("saving %lu as reference value for loops_per_jiffy; " |
282 | "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); | 282 | "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); |
283 | } | 283 | } |
284 | if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) || | 284 | if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) || |
285 | (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { | 285 | (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { |
286 | loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, | 286 | loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, |
287 | ci->new); | 287 | ci->new); |
288 | pr_debug("scaling loops_per_jiffy to %lu " | 288 | pr_debug("scaling loops_per_jiffy to %lu " |
289 | "for frequency %u kHz\n", loops_per_jiffy, ci->new); | 289 | "for frequency %u kHz\n", loops_per_jiffy, ci->new); |
290 | } | 290 | } |
291 | } | 291 | } |
292 | #else | 292 | #else |
293 | static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) | 293 | static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) |
294 | { | 294 | { |
295 | return; | 295 | return; |
296 | } | 296 | } |
297 | #endif | 297 | #endif |
298 | 298 | ||
299 | 299 | ||
300 | void __cpufreq_notify_transition(struct cpufreq_policy *policy, | 300 | void __cpufreq_notify_transition(struct cpufreq_policy *policy, |
301 | struct cpufreq_freqs *freqs, unsigned int state) | 301 | struct cpufreq_freqs *freqs, unsigned int state) |
302 | { | 302 | { |
303 | BUG_ON(irqs_disabled()); | 303 | BUG_ON(irqs_disabled()); |
304 | 304 | ||
305 | if (cpufreq_disabled()) | 305 | if (cpufreq_disabled()) |
306 | return; | 306 | return; |
307 | 307 | ||
308 | freqs->flags = cpufreq_driver->flags; | 308 | freqs->flags = cpufreq_driver->flags; |
309 | pr_debug("notification %u of frequency transition to %u kHz\n", | 309 | pr_debug("notification %u of frequency transition to %u kHz\n", |
310 | state, freqs->new); | 310 | state, freqs->new); |
311 | 311 | ||
312 | switch (state) { | 312 | switch (state) { |
313 | 313 | ||
314 | case CPUFREQ_PRECHANGE: | 314 | case CPUFREQ_PRECHANGE: |
315 | /* detect if the driver reported a value as "old frequency" | 315 | /* detect if the driver reported a value as "old frequency" |
316 | * which is not equal to what the cpufreq core thinks is | 316 | * which is not equal to what the cpufreq core thinks is |
317 | * "old frequency". | 317 | * "old frequency". |
318 | */ | 318 | */ |
319 | if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { | 319 | if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { |
320 | if ((policy) && (policy->cpu == freqs->cpu) && | 320 | if ((policy) && (policy->cpu == freqs->cpu) && |
321 | (policy->cur) && (policy->cur != freqs->old)) { | 321 | (policy->cur) && (policy->cur != freqs->old)) { |
322 | pr_debug("Warning: CPU frequency is" | 322 | pr_debug("Warning: CPU frequency is" |
323 | " %u, cpufreq assumed %u kHz.\n", | 323 | " %u, cpufreq assumed %u kHz.\n", |
324 | freqs->old, policy->cur); | 324 | freqs->old, policy->cur); |
325 | freqs->old = policy->cur; | 325 | freqs->old = policy->cur; |
326 | } | 326 | } |
327 | } | 327 | } |
328 | srcu_notifier_call_chain(&cpufreq_transition_notifier_list, | 328 | srcu_notifier_call_chain(&cpufreq_transition_notifier_list, |
329 | CPUFREQ_PRECHANGE, freqs); | 329 | CPUFREQ_PRECHANGE, freqs); |
330 | adjust_jiffies(CPUFREQ_PRECHANGE, freqs); | 330 | adjust_jiffies(CPUFREQ_PRECHANGE, freqs); |
331 | break; | 331 | break; |
332 | 332 | ||
333 | case CPUFREQ_POSTCHANGE: | 333 | case CPUFREQ_POSTCHANGE: |
334 | adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); | 334 | adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); |
335 | pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, | 335 | pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, |
336 | (unsigned long)freqs->cpu); | 336 | (unsigned long)freqs->cpu); |
337 | trace_cpu_frequency(freqs->new, freqs->cpu); | 337 | trace_cpu_frequency(freqs->new, freqs->cpu); |
338 | srcu_notifier_call_chain(&cpufreq_transition_notifier_list, | 338 | srcu_notifier_call_chain(&cpufreq_transition_notifier_list, |
339 | CPUFREQ_POSTCHANGE, freqs); | 339 | CPUFREQ_POSTCHANGE, freqs); |
340 | if (likely(policy) && likely(policy->cpu == freqs->cpu)) | 340 | if (likely(policy) && likely(policy->cpu == freqs->cpu)) |
341 | policy->cur = freqs->new; | 341 | policy->cur = freqs->new; |
342 | break; | 342 | break; |
343 | } | 343 | } |
344 | } | 344 | } |
345 | /** | 345 | /** |
346 | * cpufreq_notify_transition - call notifier chain and adjust_jiffies | 346 | * cpufreq_notify_transition - call notifier chain and adjust_jiffies |
347 | * on frequency transition. | 347 | * on frequency transition. |
348 | * | 348 | * |
349 | * This function calls the transition notifiers and the "adjust_jiffies" | 349 | * This function calls the transition notifiers and the "adjust_jiffies" |
350 | * function. It is called twice on all CPU frequency changes that have | 350 | * function. It is called twice on all CPU frequency changes that have |
351 | * external effects. | 351 | * external effects. |
352 | */ | 352 | */ |
353 | void cpufreq_notify_transition(struct cpufreq_policy *policy, | 353 | void cpufreq_notify_transition(struct cpufreq_policy *policy, |
354 | struct cpufreq_freqs *freqs, unsigned int state) | 354 | struct cpufreq_freqs *freqs, unsigned int state) |
355 | { | 355 | { |
356 | for_each_cpu(freqs->cpu, policy->cpus) | 356 | for_each_cpu(freqs->cpu, policy->cpus) |
357 | __cpufreq_notify_transition(policy, freqs, state); | 357 | __cpufreq_notify_transition(policy, freqs, state); |
358 | } | 358 | } |
359 | EXPORT_SYMBOL_GPL(cpufreq_notify_transition); | 359 | EXPORT_SYMBOL_GPL(cpufreq_notify_transition); |
360 | 360 | ||
361 | 361 | ||
362 | 362 | ||
363 | /********************************************************************* | 363 | /********************************************************************* |
364 | * SYSFS INTERFACE * | 364 | * SYSFS INTERFACE * |
365 | *********************************************************************/ | 365 | *********************************************************************/ |
366 | 366 | ||
367 | static struct cpufreq_governor *__find_governor(const char *str_governor) | 367 | static struct cpufreq_governor *__find_governor(const char *str_governor) |
368 | { | 368 | { |
369 | struct cpufreq_governor *t; | 369 | struct cpufreq_governor *t; |
370 | 370 | ||
371 | list_for_each_entry(t, &cpufreq_governor_list, governor_list) | 371 | list_for_each_entry(t, &cpufreq_governor_list, governor_list) |
372 | if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN)) | 372 | if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN)) |
373 | return t; | 373 | return t; |
374 | 374 | ||
375 | return NULL; | 375 | return NULL; |
376 | } | 376 | } |
377 | 377 | ||
378 | /** | 378 | /** |
379 | * cpufreq_parse_governor - parse a governor string | 379 | * cpufreq_parse_governor - parse a governor string |
380 | */ | 380 | */ |
381 | static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, | 381 | static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, |
382 | struct cpufreq_governor **governor) | 382 | struct cpufreq_governor **governor) |
383 | { | 383 | { |
384 | int err = -EINVAL; | 384 | int err = -EINVAL; |
385 | 385 | ||
386 | if (!cpufreq_driver) | 386 | if (!cpufreq_driver) |
387 | goto out; | 387 | goto out; |
388 | 388 | ||
389 | if (cpufreq_driver->setpolicy) { | 389 | if (cpufreq_driver->setpolicy) { |
390 | if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { | 390 | if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { |
391 | *policy = CPUFREQ_POLICY_PERFORMANCE; | 391 | *policy = CPUFREQ_POLICY_PERFORMANCE; |
392 | err = 0; | 392 | err = 0; |
393 | } else if (!strnicmp(str_governor, "powersave", | 393 | } else if (!strnicmp(str_governor, "powersave", |
394 | CPUFREQ_NAME_LEN)) { | 394 | CPUFREQ_NAME_LEN)) { |
395 | *policy = CPUFREQ_POLICY_POWERSAVE; | 395 | *policy = CPUFREQ_POLICY_POWERSAVE; |
396 | err = 0; | 396 | err = 0; |
397 | } | 397 | } |
398 | } else if (cpufreq_driver->target) { | 398 | } else if (cpufreq_driver->target) { |
399 | struct cpufreq_governor *t; | 399 | struct cpufreq_governor *t; |
400 | 400 | ||
401 | mutex_lock(&cpufreq_governor_mutex); | 401 | mutex_lock(&cpufreq_governor_mutex); |
402 | 402 | ||
403 | t = __find_governor(str_governor); | 403 | t = __find_governor(str_governor); |
404 | 404 | ||
405 | if (t == NULL) { | 405 | if (t == NULL) { |
406 | int ret; | 406 | int ret; |
407 | 407 | ||
408 | mutex_unlock(&cpufreq_governor_mutex); | 408 | mutex_unlock(&cpufreq_governor_mutex); |
409 | ret = request_module("cpufreq_%s", str_governor); | 409 | ret = request_module("cpufreq_%s", str_governor); |
410 | mutex_lock(&cpufreq_governor_mutex); | 410 | mutex_lock(&cpufreq_governor_mutex); |
411 | 411 | ||
412 | if (ret == 0) | 412 | if (ret == 0) |
413 | t = __find_governor(str_governor); | 413 | t = __find_governor(str_governor); |
414 | } | 414 | } |
415 | 415 | ||
416 | if (t != NULL) { | 416 | if (t != NULL) { |
417 | *governor = t; | 417 | *governor = t; |
418 | err = 0; | 418 | err = 0; |
419 | } | 419 | } |
420 | 420 | ||
421 | mutex_unlock(&cpufreq_governor_mutex); | 421 | mutex_unlock(&cpufreq_governor_mutex); |
422 | } | 422 | } |
423 | out: | 423 | out: |
424 | return err; | 424 | return err; |
425 | } | 425 | } |
426 | 426 | ||
427 | 427 | ||
428 | /** | 428 | /** |
429 | * cpufreq_per_cpu_attr_read() / show_##file_name() - | 429 | * cpufreq_per_cpu_attr_read() / show_##file_name() - |
430 | * print out cpufreq information | 430 | * print out cpufreq information |
431 | * | 431 | * |
432 | * Write out information from cpufreq_driver->policy[cpu]; object must be | 432 | * Write out information from cpufreq_driver->policy[cpu]; object must be |
433 | * "unsigned int". | 433 | * "unsigned int". |
434 | */ | 434 | */ |
435 | 435 | ||
436 | #define show_one(file_name, object) \ | 436 | #define show_one(file_name, object) \ |
437 | static ssize_t show_##file_name \ | 437 | static ssize_t show_##file_name \ |
438 | (struct cpufreq_policy *policy, char *buf) \ | 438 | (struct cpufreq_policy *policy, char *buf) \ |
439 | { \ | 439 | { \ |
440 | return sprintf(buf, "%u\n", policy->object); \ | 440 | return sprintf(buf, "%u\n", policy->object); \ |
441 | } | 441 | } |
442 | 442 | ||
443 | show_one(cpuinfo_min_freq, cpuinfo.min_freq); | 443 | show_one(cpuinfo_min_freq, cpuinfo.min_freq); |
444 | show_one(cpuinfo_max_freq, cpuinfo.max_freq); | 444 | show_one(cpuinfo_max_freq, cpuinfo.max_freq); |
445 | show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); | 445 | show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); |
446 | show_one(scaling_min_freq, min); | 446 | show_one(scaling_min_freq, min); |
447 | show_one(scaling_max_freq, max); | 447 | show_one(scaling_max_freq, max); |
448 | show_one(scaling_cur_freq, cur); | 448 | show_one(scaling_cur_freq, cur); |
449 | 449 | ||
450 | static int __cpufreq_set_policy(struct cpufreq_policy *data, | 450 | static int __cpufreq_set_policy(struct cpufreq_policy *data, |
451 | struct cpufreq_policy *policy); | 451 | struct cpufreq_policy *policy); |
452 | 452 | ||
453 | /** | 453 | /** |
454 | * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access | 454 | * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access |
455 | */ | 455 | */ |
456 | #define store_one(file_name, object) \ | 456 | #define store_one(file_name, object) \ |
457 | static ssize_t store_##file_name \ | 457 | static ssize_t store_##file_name \ |
458 | (struct cpufreq_policy *policy, const char *buf, size_t count) \ | 458 | (struct cpufreq_policy *policy, const char *buf, size_t count) \ |
459 | { \ | 459 | { \ |
460 | unsigned int ret; \ | 460 | unsigned int ret; \ |
461 | struct cpufreq_policy new_policy; \ | 461 | struct cpufreq_policy new_policy; \ |
462 | \ | 462 | \ |
463 | ret = cpufreq_get_policy(&new_policy, policy->cpu); \ | 463 | ret = cpufreq_get_policy(&new_policy, policy->cpu); \ |
464 | if (ret) \ | 464 | if (ret) \ |
465 | return -EINVAL; \ | 465 | return -EINVAL; \ |
466 | \ | 466 | \ |
467 | ret = sscanf(buf, "%u", &new_policy.object); \ | 467 | ret = sscanf(buf, "%u", &new_policy.object); \ |
468 | if (ret != 1) \ | 468 | if (ret != 1) \ |
469 | return -EINVAL; \ | 469 | return -EINVAL; \ |
470 | \ | 470 | \ |
471 | ret = __cpufreq_set_policy(policy, &new_policy); \ | 471 | ret = __cpufreq_set_policy(policy, &new_policy); \ |
472 | policy->user_policy.object = policy->object; \ | 472 | policy->user_policy.object = policy->object; \ |
473 | \ | 473 | \ |
474 | return ret ? ret : count; \ | 474 | return ret ? ret : count; \ |
475 | } | 475 | } |
476 | 476 | ||
477 | store_one(scaling_min_freq, min); | 477 | store_one(scaling_min_freq, min); |
478 | store_one(scaling_max_freq, max); | 478 | store_one(scaling_max_freq, max); |
479 | 479 | ||
480 | /** | 480 | /** |
481 | * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware | 481 | * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware |
482 | */ | 482 | */ |
483 | static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, | 483 | static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, |
484 | char *buf) | 484 | char *buf) |
485 | { | 485 | { |
486 | unsigned int cur_freq = __cpufreq_get(policy->cpu); | 486 | unsigned int cur_freq = __cpufreq_get(policy->cpu); |
487 | if (!cur_freq) | 487 | if (!cur_freq) |
488 | return sprintf(buf, "<unknown>"); | 488 | return sprintf(buf, "<unknown>"); |
489 | return sprintf(buf, "%u\n", cur_freq); | 489 | return sprintf(buf, "%u\n", cur_freq); |
490 | } | 490 | } |
491 | 491 | ||
492 | 492 | ||
493 | /** | 493 | /** |
494 | * show_scaling_governor - show the current policy for the specified CPU | 494 | * show_scaling_governor - show the current policy for the specified CPU |
495 | */ | 495 | */ |
496 | static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) | 496 | static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) |
497 | { | 497 | { |
498 | if (policy->policy == CPUFREQ_POLICY_POWERSAVE) | 498 | if (policy->policy == CPUFREQ_POLICY_POWERSAVE) |
499 | return sprintf(buf, "powersave\n"); | 499 | return sprintf(buf, "powersave\n"); |
500 | else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) | 500 | else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) |
501 | return sprintf(buf, "performance\n"); | 501 | return sprintf(buf, "performance\n"); |
502 | else if (policy->governor) | 502 | else if (policy->governor) |
503 | return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", | 503 | return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", |
504 | policy->governor->name); | 504 | policy->governor->name); |
505 | return -EINVAL; | 505 | return -EINVAL; |
506 | } | 506 | } |
507 | 507 | ||
508 | 508 | ||
509 | /** | 509 | /** |
510 | * store_scaling_governor - store policy for the specified CPU | 510 | * store_scaling_governor - store policy for the specified CPU |
511 | */ | 511 | */ |
512 | static ssize_t store_scaling_governor(struct cpufreq_policy *policy, | 512 | static ssize_t store_scaling_governor(struct cpufreq_policy *policy, |
513 | const char *buf, size_t count) | 513 | const char *buf, size_t count) |
514 | { | 514 | { |
515 | unsigned int ret; | 515 | unsigned int ret; |
516 | char str_governor[16]; | 516 | char str_governor[16]; |
517 | struct cpufreq_policy new_policy; | 517 | struct cpufreq_policy new_policy; |
518 | 518 | ||
519 | ret = cpufreq_get_policy(&new_policy, policy->cpu); | 519 | ret = cpufreq_get_policy(&new_policy, policy->cpu); |
520 | if (ret) | 520 | if (ret) |
521 | return ret; | 521 | return ret; |
522 | 522 | ||
523 | ret = sscanf(buf, "%15s", str_governor); | 523 | ret = sscanf(buf, "%15s", str_governor); |
524 | if (ret != 1) | 524 | if (ret != 1) |
525 | return -EINVAL; | 525 | return -EINVAL; |
526 | 526 | ||
527 | if (cpufreq_parse_governor(str_governor, &new_policy.policy, | 527 | if (cpufreq_parse_governor(str_governor, &new_policy.policy, |
528 | &new_policy.governor)) | 528 | &new_policy.governor)) |
529 | return -EINVAL; | 529 | return -EINVAL; |
530 | 530 | ||
531 | /* Do not use cpufreq_set_policy here or the user_policy.max | 531 | /* Do not use cpufreq_set_policy here or the user_policy.max |
532 | will be wrongly overridden */ | 532 | will be wrongly overridden */ |
533 | ret = __cpufreq_set_policy(policy, &new_policy); | 533 | ret = __cpufreq_set_policy(policy, &new_policy); |
534 | 534 | ||
535 | policy->user_policy.policy = policy->policy; | 535 | policy->user_policy.policy = policy->policy; |
536 | policy->user_policy.governor = policy->governor; | 536 | policy->user_policy.governor = policy->governor; |
537 | 537 | ||
538 | if (ret) | 538 | if (ret) |
539 | return ret; | 539 | return ret; |
540 | else | 540 | else |
541 | return count; | 541 | return count; |
542 | } | 542 | } |
543 | 543 | ||
544 | /** | 544 | /** |
545 | * show_scaling_driver - show the cpufreq driver currently loaded | 545 | * show_scaling_driver - show the cpufreq driver currently loaded |
546 | */ | 546 | */ |
547 | static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) | 547 | static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) |
548 | { | 548 | { |
549 | return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name); | 549 | return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name); |
550 | } | 550 | } |
551 | 551 | ||
552 | /** | 552 | /** |
553 | * show_scaling_available_governors - show the available CPUfreq governors | 553 | * show_scaling_available_governors - show the available CPUfreq governors |
554 | */ | 554 | */ |
555 | static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, | 555 | static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, |
556 | char *buf) | 556 | char *buf) |
557 | { | 557 | { |
558 | ssize_t i = 0; | 558 | ssize_t i = 0; |
559 | struct cpufreq_governor *t; | 559 | struct cpufreq_governor *t; |
560 | 560 | ||
561 | if (!cpufreq_driver->target) { | 561 | if (!cpufreq_driver->target) { |
562 | i += sprintf(buf, "performance powersave"); | 562 | i += sprintf(buf, "performance powersave"); |
563 | goto out; | 563 | goto out; |
564 | } | 564 | } |
565 | 565 | ||
566 | list_for_each_entry(t, &cpufreq_governor_list, governor_list) { | 566 | list_for_each_entry(t, &cpufreq_governor_list, governor_list) { |
567 | if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) | 567 | if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) |
568 | - (CPUFREQ_NAME_LEN + 2))) | 568 | - (CPUFREQ_NAME_LEN + 2))) |
569 | goto out; | 569 | goto out; |
570 | i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name); | 570 | i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name); |
571 | } | 571 | } |
572 | out: | 572 | out: |
573 | i += sprintf(&buf[i], "\n"); | 573 | i += sprintf(&buf[i], "\n"); |
574 | return i; | 574 | return i; |
575 | } | 575 | } |
576 | 576 | ||
577 | static ssize_t show_cpus(const struct cpumask *mask, char *buf) | 577 | static ssize_t show_cpus(const struct cpumask *mask, char *buf) |
578 | { | 578 | { |
579 | ssize_t i = 0; | 579 | ssize_t i = 0; |
580 | unsigned int cpu; | 580 | unsigned int cpu; |
581 | 581 | ||
582 | for_each_cpu(cpu, mask) { | 582 | for_each_cpu(cpu, mask) { |
583 | if (i) | 583 | if (i) |
584 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); | 584 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); |
585 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); | 585 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); |
586 | if (i >= (PAGE_SIZE - 5)) | 586 | if (i >= (PAGE_SIZE - 5)) |
587 | break; | 587 | break; |
588 | } | 588 | } |
589 | i += sprintf(&buf[i], "\n"); | 589 | i += sprintf(&buf[i], "\n"); |
590 | return i; | 590 | return i; |
591 | } | 591 | } |
592 | 592 | ||
593 | /** | 593 | /** |
594 | * show_related_cpus - show the CPUs affected by each transition even if | 594 | * show_related_cpus - show the CPUs affected by each transition even if |
595 | * hw coordination is in use | 595 | * hw coordination is in use |
596 | */ | 596 | */ |
597 | static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) | 597 | static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) |
598 | { | 598 | { |
599 | return show_cpus(policy->related_cpus, buf); | 599 | return show_cpus(policy->related_cpus, buf); |
600 | } | 600 | } |
601 | 601 | ||
602 | /** | 602 | /** |
603 | * show_affected_cpus - show the CPUs affected by each transition | 603 | * show_affected_cpus - show the CPUs affected by each transition |
604 | */ | 604 | */ |
605 | static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf) | 605 | static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf) |
606 | { | 606 | { |
607 | return show_cpus(policy->cpus, buf); | 607 | return show_cpus(policy->cpus, buf); |
608 | } | 608 | } |
609 | 609 | ||
610 | static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, | 610 | static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, |
611 | const char *buf, size_t count) | 611 | const char *buf, size_t count) |
612 | { | 612 | { |
613 | unsigned int freq = 0; | 613 | unsigned int freq = 0; |
614 | unsigned int ret; | 614 | unsigned int ret; |
615 | 615 | ||
616 | if (!policy->governor || !policy->governor->store_setspeed) | 616 | if (!policy->governor || !policy->governor->store_setspeed) |
617 | return -EINVAL; | 617 | return -EINVAL; |
618 | 618 | ||
619 | ret = sscanf(buf, "%u", &freq); | 619 | ret = sscanf(buf, "%u", &freq); |
620 | if (ret != 1) | 620 | if (ret != 1) |
621 | return -EINVAL; | 621 | return -EINVAL; |
622 | 622 | ||
623 | policy->governor->store_setspeed(policy, freq); | 623 | policy->governor->store_setspeed(policy, freq); |
624 | 624 | ||
625 | return count; | 625 | return count; |
626 | } | 626 | } |
627 | 627 | ||
628 | static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) | 628 | static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) |
629 | { | 629 | { |
630 | if (!policy->governor || !policy->governor->show_setspeed) | 630 | if (!policy->governor || !policy->governor->show_setspeed) |
631 | return sprintf(buf, "<unsupported>\n"); | 631 | return sprintf(buf, "<unsupported>\n"); |
632 | 632 | ||
633 | return policy->governor->show_setspeed(policy, buf); | 633 | return policy->governor->show_setspeed(policy, buf); |
634 | } | 634 | } |
635 | 635 | ||
636 | /** | 636 | /** |
637 | * show_bios_limit - show the current cpufreq HW/BIOS limitation | 637 | * show_bios_limit - show the current cpufreq HW/BIOS limitation |
638 | */ | 638 | */ |
639 | static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) | 639 | static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) |
640 | { | 640 | { |
641 | unsigned int limit; | 641 | unsigned int limit; |
642 | int ret; | 642 | int ret; |
643 | if (cpufreq_driver->bios_limit) { | 643 | if (cpufreq_driver->bios_limit) { |
644 | ret = cpufreq_driver->bios_limit(policy->cpu, &limit); | 644 | ret = cpufreq_driver->bios_limit(policy->cpu, &limit); |
645 | if (!ret) | 645 | if (!ret) |
646 | return sprintf(buf, "%u\n", limit); | 646 | return sprintf(buf, "%u\n", limit); |
647 | } | 647 | } |
648 | return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); | 648 | return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); |
649 | } | 649 | } |
650 | 650 | ||
651 | cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400); | 651 | cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400); |
652 | cpufreq_freq_attr_ro(cpuinfo_min_freq); | 652 | cpufreq_freq_attr_ro(cpuinfo_min_freq); |
653 | cpufreq_freq_attr_ro(cpuinfo_max_freq); | 653 | cpufreq_freq_attr_ro(cpuinfo_max_freq); |
654 | cpufreq_freq_attr_ro(cpuinfo_transition_latency); | 654 | cpufreq_freq_attr_ro(cpuinfo_transition_latency); |
655 | cpufreq_freq_attr_ro(scaling_available_governors); | 655 | cpufreq_freq_attr_ro(scaling_available_governors); |
656 | cpufreq_freq_attr_ro(scaling_driver); | 656 | cpufreq_freq_attr_ro(scaling_driver); |
657 | cpufreq_freq_attr_ro(scaling_cur_freq); | 657 | cpufreq_freq_attr_ro(scaling_cur_freq); |
658 | cpufreq_freq_attr_ro(bios_limit); | 658 | cpufreq_freq_attr_ro(bios_limit); |
659 | cpufreq_freq_attr_ro(related_cpus); | 659 | cpufreq_freq_attr_ro(related_cpus); |
660 | cpufreq_freq_attr_ro(affected_cpus); | 660 | cpufreq_freq_attr_ro(affected_cpus); |
661 | cpufreq_freq_attr_rw(scaling_min_freq); | 661 | cpufreq_freq_attr_rw(scaling_min_freq); |
662 | cpufreq_freq_attr_rw(scaling_max_freq); | 662 | cpufreq_freq_attr_rw(scaling_max_freq); |
663 | cpufreq_freq_attr_rw(scaling_governor); | 663 | cpufreq_freq_attr_rw(scaling_governor); |
664 | cpufreq_freq_attr_rw(scaling_setspeed); | 664 | cpufreq_freq_attr_rw(scaling_setspeed); |
665 | 665 | ||
666 | static struct attribute *default_attrs[] = { | 666 | static struct attribute *default_attrs[] = { |
667 | &cpuinfo_min_freq.attr, | 667 | &cpuinfo_min_freq.attr, |
668 | &cpuinfo_max_freq.attr, | 668 | &cpuinfo_max_freq.attr, |
669 | &cpuinfo_transition_latency.attr, | 669 | &cpuinfo_transition_latency.attr, |
670 | &scaling_min_freq.attr, | 670 | &scaling_min_freq.attr, |
671 | &scaling_max_freq.attr, | 671 | &scaling_max_freq.attr, |
672 | &affected_cpus.attr, | 672 | &affected_cpus.attr, |
673 | &related_cpus.attr, | 673 | &related_cpus.attr, |
674 | &scaling_governor.attr, | 674 | &scaling_governor.attr, |
675 | &scaling_driver.attr, | 675 | &scaling_driver.attr, |
676 | &scaling_available_governors.attr, | 676 | &scaling_available_governors.attr, |
677 | &scaling_setspeed.attr, | 677 | &scaling_setspeed.attr, |
678 | NULL | 678 | NULL |
679 | }; | 679 | }; |
680 | 680 | ||
681 | struct kobject *cpufreq_global_kobject; | ||
682 | EXPORT_SYMBOL(cpufreq_global_kobject); | ||
683 | |||
684 | #define to_policy(k) container_of(k, struct cpufreq_policy, kobj) | 681 | #define to_policy(k) container_of(k, struct cpufreq_policy, kobj) |
685 | #define to_attr(a) container_of(a, struct freq_attr, attr) | 682 | #define to_attr(a) container_of(a, struct freq_attr, attr) |
686 | 683 | ||
687 | static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) | 684 | static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) |
688 | { | 685 | { |
689 | struct cpufreq_policy *policy = to_policy(kobj); | 686 | struct cpufreq_policy *policy = to_policy(kobj); |
690 | struct freq_attr *fattr = to_attr(attr); | 687 | struct freq_attr *fattr = to_attr(attr); |
691 | ssize_t ret = -EINVAL; | 688 | ssize_t ret = -EINVAL; |
692 | policy = cpufreq_cpu_get_sysfs(policy->cpu); | 689 | policy = cpufreq_cpu_get_sysfs(policy->cpu); |
693 | if (!policy) | 690 | if (!policy) |
694 | goto no_policy; | 691 | goto no_policy; |
695 | 692 | ||
696 | if (lock_policy_rwsem_read(policy->cpu) < 0) | 693 | if (lock_policy_rwsem_read(policy->cpu) < 0) |
697 | goto fail; | 694 | goto fail; |
698 | 695 | ||
699 | if (fattr->show) | 696 | if (fattr->show) |
700 | ret = fattr->show(policy, buf); | 697 | ret = fattr->show(policy, buf); |
701 | else | 698 | else |
702 | ret = -EIO; | 699 | ret = -EIO; |
703 | 700 | ||
704 | unlock_policy_rwsem_read(policy->cpu); | 701 | unlock_policy_rwsem_read(policy->cpu); |
705 | fail: | 702 | fail: |
706 | cpufreq_cpu_put_sysfs(policy); | 703 | cpufreq_cpu_put_sysfs(policy); |
707 | no_policy: | 704 | no_policy: |
708 | return ret; | 705 | return ret; |
709 | } | 706 | } |
710 | 707 | ||
711 | static ssize_t store(struct kobject *kobj, struct attribute *attr, | 708 | static ssize_t store(struct kobject *kobj, struct attribute *attr, |
712 | const char *buf, size_t count) | 709 | const char *buf, size_t count) |
713 | { | 710 | { |
714 | struct cpufreq_policy *policy = to_policy(kobj); | 711 | struct cpufreq_policy *policy = to_policy(kobj); |
715 | struct freq_attr *fattr = to_attr(attr); | 712 | struct freq_attr *fattr = to_attr(attr); |
716 | ssize_t ret = -EINVAL; | 713 | ssize_t ret = -EINVAL; |
717 | policy = cpufreq_cpu_get_sysfs(policy->cpu); | 714 | policy = cpufreq_cpu_get_sysfs(policy->cpu); |
718 | if (!policy) | 715 | if (!policy) |
719 | goto no_policy; | 716 | goto no_policy; |
720 | 717 | ||
721 | if (lock_policy_rwsem_write(policy->cpu) < 0) | 718 | if (lock_policy_rwsem_write(policy->cpu) < 0) |
722 | goto fail; | 719 | goto fail; |
723 | 720 | ||
724 | if (fattr->store) | 721 | if (fattr->store) |
725 | ret = fattr->store(policy, buf, count); | 722 | ret = fattr->store(policy, buf, count); |
726 | else | 723 | else |
727 | ret = -EIO; | 724 | ret = -EIO; |
728 | 725 | ||
729 | unlock_policy_rwsem_write(policy->cpu); | 726 | unlock_policy_rwsem_write(policy->cpu); |
730 | fail: | 727 | fail: |
731 | cpufreq_cpu_put_sysfs(policy); | 728 | cpufreq_cpu_put_sysfs(policy); |
732 | no_policy: | 729 | no_policy: |
733 | return ret; | 730 | return ret; |
734 | } | 731 | } |
735 | 732 | ||
736 | static void cpufreq_sysfs_release(struct kobject *kobj) | 733 | static void cpufreq_sysfs_release(struct kobject *kobj) |
737 | { | 734 | { |
738 | struct cpufreq_policy *policy = to_policy(kobj); | 735 | struct cpufreq_policy *policy = to_policy(kobj); |
739 | pr_debug("last reference is dropped\n"); | 736 | pr_debug("last reference is dropped\n"); |
740 | complete(&policy->kobj_unregister); | 737 | complete(&policy->kobj_unregister); |
741 | } | 738 | } |
742 | 739 | ||
743 | static const struct sysfs_ops sysfs_ops = { | 740 | static const struct sysfs_ops sysfs_ops = { |
744 | .show = show, | 741 | .show = show, |
745 | .store = store, | 742 | .store = store, |
746 | }; | 743 | }; |
747 | 744 | ||
748 | static struct kobj_type ktype_cpufreq = { | 745 | static struct kobj_type ktype_cpufreq = { |
749 | .sysfs_ops = &sysfs_ops, | 746 | .sysfs_ops = &sysfs_ops, |
750 | .default_attrs = default_attrs, | 747 | .default_attrs = default_attrs, |
751 | .release = cpufreq_sysfs_release, | 748 | .release = cpufreq_sysfs_release, |
752 | }; | 749 | }; |
753 | 750 | ||
751 | struct kobject *cpufreq_global_kobject; | ||
752 | EXPORT_SYMBOL(cpufreq_global_kobject); | ||
753 | |||
754 | static int cpufreq_global_kobject_usage; | ||
755 | |||
756 | int cpufreq_get_global_kobject(void) | ||
757 | { | ||
758 | if (!cpufreq_global_kobject_usage++) | ||
759 | return kobject_add(cpufreq_global_kobject, | ||
760 | &cpu_subsys.dev_root->kobj, "%s", "cpufreq"); | ||
761 | |||
762 | return 0; | ||
763 | } | ||
764 | EXPORT_SYMBOL(cpufreq_get_global_kobject); | ||
765 | |||
766 | void cpufreq_put_global_kobject(void) | ||
767 | { | ||
768 | if (!--cpufreq_global_kobject_usage) | ||
769 | kobject_del(cpufreq_global_kobject); | ||
770 | } | ||
771 | EXPORT_SYMBOL(cpufreq_put_global_kobject); | ||
772 | |||
773 | int cpufreq_sysfs_create_file(const struct attribute *attr) | ||
774 | { | ||
775 | int ret = cpufreq_get_global_kobject(); | ||
776 | |||
777 | if (!ret) { | ||
778 | ret = sysfs_create_file(cpufreq_global_kobject, attr); | ||
779 | if (ret) | ||
780 | cpufreq_put_global_kobject(); | ||
781 | } | ||
782 | |||
783 | return ret; | ||
784 | } | ||
785 | EXPORT_SYMBOL(cpufreq_sysfs_create_file); | ||
786 | |||
787 | void cpufreq_sysfs_remove_file(const struct attribute *attr) | ||
788 | { | ||
789 | sysfs_remove_file(cpufreq_global_kobject, attr); | ||
790 | cpufreq_put_global_kobject(); | ||
791 | } | ||
792 | EXPORT_SYMBOL(cpufreq_sysfs_remove_file); | ||
793 | |||
754 | /* symlink affected CPUs */ | 794 | /* symlink affected CPUs */ |
755 | static int cpufreq_add_dev_symlink(unsigned int cpu, | 795 | static int cpufreq_add_dev_symlink(unsigned int cpu, |
756 | struct cpufreq_policy *policy) | 796 | struct cpufreq_policy *policy) |
757 | { | 797 | { |
758 | unsigned int j; | 798 | unsigned int j; |
759 | int ret = 0; | 799 | int ret = 0; |
760 | 800 | ||
761 | for_each_cpu(j, policy->cpus) { | 801 | for_each_cpu(j, policy->cpus) { |
762 | struct cpufreq_policy *managed_policy; | 802 | struct cpufreq_policy *managed_policy; |
763 | struct device *cpu_dev; | 803 | struct device *cpu_dev; |
764 | 804 | ||
765 | if (j == cpu) | 805 | if (j == cpu) |
766 | continue; | 806 | continue; |
767 | 807 | ||
768 | pr_debug("CPU %u already managed, adding link\n", j); | 808 | pr_debug("CPU %u already managed, adding link\n", j); |
769 | managed_policy = cpufreq_cpu_get(cpu); | 809 | managed_policy = cpufreq_cpu_get(cpu); |
770 | cpu_dev = get_cpu_device(j); | 810 | cpu_dev = get_cpu_device(j); |
771 | ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, | 811 | ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, |
772 | "cpufreq"); | 812 | "cpufreq"); |
773 | if (ret) { | 813 | if (ret) { |
774 | cpufreq_cpu_put(managed_policy); | 814 | cpufreq_cpu_put(managed_policy); |
775 | return ret; | 815 | return ret; |
776 | } | 816 | } |
777 | } | 817 | } |
778 | return ret; | 818 | return ret; |
779 | } | 819 | } |
780 | 820 | ||
781 | static int cpufreq_add_dev_interface(unsigned int cpu, | 821 | static int cpufreq_add_dev_interface(unsigned int cpu, |
782 | struct cpufreq_policy *policy, | 822 | struct cpufreq_policy *policy, |
783 | struct device *dev) | 823 | struct device *dev) |
784 | { | 824 | { |
785 | struct cpufreq_policy new_policy; | 825 | struct cpufreq_policy new_policy; |
786 | struct freq_attr **drv_attr; | 826 | struct freq_attr **drv_attr; |
787 | unsigned long flags; | 827 | unsigned long flags; |
788 | int ret = 0; | 828 | int ret = 0; |
789 | unsigned int j; | 829 | unsigned int j; |
790 | 830 | ||
791 | /* prepare interface data */ | 831 | /* prepare interface data */ |
792 | ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, | 832 | ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, |
793 | &dev->kobj, "cpufreq"); | 833 | &dev->kobj, "cpufreq"); |
794 | if (ret) | 834 | if (ret) |
795 | return ret; | 835 | return ret; |
796 | 836 | ||
797 | /* set up files for this cpu device */ | 837 | /* set up files for this cpu device */ |
798 | drv_attr = cpufreq_driver->attr; | 838 | drv_attr = cpufreq_driver->attr; |
799 | while ((drv_attr) && (*drv_attr)) { | 839 | while ((drv_attr) && (*drv_attr)) { |
800 | ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); | 840 | ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); |
801 | if (ret) | 841 | if (ret) |
802 | goto err_out_kobj_put; | 842 | goto err_out_kobj_put; |
803 | drv_attr++; | 843 | drv_attr++; |
804 | } | 844 | } |
805 | if (cpufreq_driver->get) { | 845 | if (cpufreq_driver->get) { |
806 | ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); | 846 | ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); |
807 | if (ret) | 847 | if (ret) |
808 | goto err_out_kobj_put; | 848 | goto err_out_kobj_put; |
809 | } | 849 | } |
810 | if (cpufreq_driver->target) { | 850 | if (cpufreq_driver->target) { |
811 | ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); | 851 | ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); |
812 | if (ret) | 852 | if (ret) |
813 | goto err_out_kobj_put; | 853 | goto err_out_kobj_put; |
814 | } | 854 | } |
815 | if (cpufreq_driver->bios_limit) { | 855 | if (cpufreq_driver->bios_limit) { |
816 | ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); | 856 | ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); |
817 | if (ret) | 857 | if (ret) |
818 | goto err_out_kobj_put; | 858 | goto err_out_kobj_put; |
819 | } | 859 | } |
820 | 860 | ||
821 | write_lock_irqsave(&cpufreq_driver_lock, flags); | 861 | write_lock_irqsave(&cpufreq_driver_lock, flags); |
822 | for_each_cpu(j, policy->cpus) { | 862 | for_each_cpu(j, policy->cpus) { |
823 | per_cpu(cpufreq_cpu_data, j) = policy; | 863 | per_cpu(cpufreq_cpu_data, j) = policy; |
824 | per_cpu(cpufreq_policy_cpu, j) = policy->cpu; | 864 | per_cpu(cpufreq_policy_cpu, j) = policy->cpu; |
825 | } | 865 | } |
826 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 866 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
827 | 867 | ||
828 | ret = cpufreq_add_dev_symlink(cpu, policy); | 868 | ret = cpufreq_add_dev_symlink(cpu, policy); |
829 | if (ret) | 869 | if (ret) |
830 | goto err_out_kobj_put; | 870 | goto err_out_kobj_put; |
831 | 871 | ||
832 | memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); | 872 | memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); |
833 | /* assure that the starting sequence is run in __cpufreq_set_policy */ | 873 | /* assure that the starting sequence is run in __cpufreq_set_policy */ |
834 | policy->governor = NULL; | 874 | policy->governor = NULL; |
835 | 875 | ||
836 | /* set default policy */ | 876 | /* set default policy */ |
837 | ret = __cpufreq_set_policy(policy, &new_policy); | 877 | ret = __cpufreq_set_policy(policy, &new_policy); |
838 | policy->user_policy.policy = policy->policy; | 878 | policy->user_policy.policy = policy->policy; |
839 | policy->user_policy.governor = policy->governor; | 879 | policy->user_policy.governor = policy->governor; |
840 | 880 | ||
841 | if (ret) { | 881 | if (ret) { |
842 | pr_debug("setting policy failed\n"); | 882 | pr_debug("setting policy failed\n"); |
843 | if (cpufreq_driver->exit) | 883 | if (cpufreq_driver->exit) |
844 | cpufreq_driver->exit(policy); | 884 | cpufreq_driver->exit(policy); |
845 | } | 885 | } |
846 | return ret; | 886 | return ret; |
847 | 887 | ||
848 | err_out_kobj_put: | 888 | err_out_kobj_put: |
849 | kobject_put(&policy->kobj); | 889 | kobject_put(&policy->kobj); |
850 | wait_for_completion(&policy->kobj_unregister); | 890 | wait_for_completion(&policy->kobj_unregister); |
851 | return ret; | 891 | return ret; |
852 | } | 892 | } |
853 | 893 | ||
854 | #ifdef CONFIG_HOTPLUG_CPU | 894 | #ifdef CONFIG_HOTPLUG_CPU |
855 | static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling, | 895 | static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling, |
856 | struct device *dev) | 896 | struct device *dev) |
857 | { | 897 | { |
858 | struct cpufreq_policy *policy; | 898 | struct cpufreq_policy *policy; |
859 | int ret = 0, has_target = !!cpufreq_driver->target; | 899 | int ret = 0, has_target = !!cpufreq_driver->target; |
860 | unsigned long flags; | 900 | unsigned long flags; |
861 | 901 | ||
862 | policy = cpufreq_cpu_get(sibling); | 902 | policy = cpufreq_cpu_get(sibling); |
863 | WARN_ON(!policy); | 903 | WARN_ON(!policy); |
864 | 904 | ||
865 | if (has_target) | 905 | if (has_target) |
866 | __cpufreq_governor(policy, CPUFREQ_GOV_STOP); | 906 | __cpufreq_governor(policy, CPUFREQ_GOV_STOP); |
867 | 907 | ||
868 | lock_policy_rwsem_write(sibling); | 908 | lock_policy_rwsem_write(sibling); |
869 | 909 | ||
870 | write_lock_irqsave(&cpufreq_driver_lock, flags); | 910 | write_lock_irqsave(&cpufreq_driver_lock, flags); |
871 | 911 | ||
872 | cpumask_set_cpu(cpu, policy->cpus); | 912 | cpumask_set_cpu(cpu, policy->cpus); |
873 | per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu; | 913 | per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu; |
874 | per_cpu(cpufreq_cpu_data, cpu) = policy; | 914 | per_cpu(cpufreq_cpu_data, cpu) = policy; |
875 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 915 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
876 | 916 | ||
877 | unlock_policy_rwsem_write(sibling); | 917 | unlock_policy_rwsem_write(sibling); |
878 | 918 | ||
879 | if (has_target) { | 919 | if (has_target) { |
880 | __cpufreq_governor(policy, CPUFREQ_GOV_START); | 920 | __cpufreq_governor(policy, CPUFREQ_GOV_START); |
881 | __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); | 921 | __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); |
882 | } | 922 | } |
883 | 923 | ||
884 | ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); | 924 | ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); |
885 | if (ret) { | 925 | if (ret) { |
886 | cpufreq_cpu_put(policy); | 926 | cpufreq_cpu_put(policy); |
887 | return ret; | 927 | return ret; |
888 | } | 928 | } |
889 | 929 | ||
890 | return 0; | 930 | return 0; |
891 | } | 931 | } |
892 | #endif | 932 | #endif |
893 | 933 | ||
894 | /** | 934 | /** |
895 | * cpufreq_add_dev - add a CPU device | 935 | * cpufreq_add_dev - add a CPU device |
896 | * | 936 | * |
897 | * Adds the cpufreq interface for a CPU device. | 937 | * Adds the cpufreq interface for a CPU device. |
898 | * | 938 | * |
899 | * The Oracle says: try running cpufreq registration/unregistration concurrently | 939 | * The Oracle says: try running cpufreq registration/unregistration concurrently |
900 | * with with cpu hotplugging and all hell will break loose. Tried to clean this | 940 | * with with cpu hotplugging and all hell will break loose. Tried to clean this |
901 | * mess up, but more thorough testing is needed. - Mathieu | 941 | * mess up, but more thorough testing is needed. - Mathieu |
902 | */ | 942 | */ |
903 | static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) | 943 | static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) |
904 | { | 944 | { |
905 | unsigned int j, cpu = dev->id; | 945 | unsigned int j, cpu = dev->id; |
906 | int ret = -ENOMEM; | 946 | int ret = -ENOMEM; |
907 | struct cpufreq_policy *policy; | 947 | struct cpufreq_policy *policy; |
908 | unsigned long flags; | 948 | unsigned long flags; |
909 | #ifdef CONFIG_HOTPLUG_CPU | 949 | #ifdef CONFIG_HOTPLUG_CPU |
910 | struct cpufreq_governor *gov; | 950 | struct cpufreq_governor *gov; |
911 | int sibling; | 951 | int sibling; |
912 | #endif | 952 | #endif |
913 | 953 | ||
914 | if (cpu_is_offline(cpu)) | 954 | if (cpu_is_offline(cpu)) |
915 | return 0; | 955 | return 0; |
916 | 956 | ||
917 | pr_debug("adding CPU %u\n", cpu); | 957 | pr_debug("adding CPU %u\n", cpu); |
918 | 958 | ||
919 | #ifdef CONFIG_SMP | 959 | #ifdef CONFIG_SMP |
920 | /* check whether a different CPU already registered this | 960 | /* check whether a different CPU already registered this |
921 | * CPU because it is in the same boat. */ | 961 | * CPU because it is in the same boat. */ |
922 | policy = cpufreq_cpu_get(cpu); | 962 | policy = cpufreq_cpu_get(cpu); |
923 | if (unlikely(policy)) { | 963 | if (unlikely(policy)) { |
924 | cpufreq_cpu_put(policy); | 964 | cpufreq_cpu_put(policy); |
925 | return 0; | 965 | return 0; |
926 | } | 966 | } |
927 | 967 | ||
928 | #ifdef CONFIG_HOTPLUG_CPU | 968 | #ifdef CONFIG_HOTPLUG_CPU |
929 | /* Check if this cpu was hot-unplugged earlier and has siblings */ | 969 | /* Check if this cpu was hot-unplugged earlier and has siblings */ |
930 | read_lock_irqsave(&cpufreq_driver_lock, flags); | 970 | read_lock_irqsave(&cpufreq_driver_lock, flags); |
931 | for_each_online_cpu(sibling) { | 971 | for_each_online_cpu(sibling) { |
932 | struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling); | 972 | struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling); |
933 | if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) { | 973 | if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) { |
934 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); | 974 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); |
935 | return cpufreq_add_policy_cpu(cpu, sibling, dev); | 975 | return cpufreq_add_policy_cpu(cpu, sibling, dev); |
936 | } | 976 | } |
937 | } | 977 | } |
938 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); | 978 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); |
939 | #endif | 979 | #endif |
940 | #endif | 980 | #endif |
941 | 981 | ||
942 | if (!try_module_get(cpufreq_driver->owner)) { | 982 | if (!try_module_get(cpufreq_driver->owner)) { |
943 | ret = -EINVAL; | 983 | ret = -EINVAL; |
944 | goto module_out; | 984 | goto module_out; |
945 | } | 985 | } |
946 | 986 | ||
947 | policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); | 987 | policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); |
948 | if (!policy) | 988 | if (!policy) |
949 | goto nomem_out; | 989 | goto nomem_out; |
950 | 990 | ||
951 | if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) | 991 | if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) |
952 | goto err_free_policy; | 992 | goto err_free_policy; |
953 | 993 | ||
954 | if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) | 994 | if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) |
955 | goto err_free_cpumask; | 995 | goto err_free_cpumask; |
956 | 996 | ||
957 | policy->cpu = cpu; | 997 | policy->cpu = cpu; |
958 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | 998 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; |
959 | cpumask_copy(policy->cpus, cpumask_of(cpu)); | 999 | cpumask_copy(policy->cpus, cpumask_of(cpu)); |
960 | 1000 | ||
961 | /* Initially set CPU itself as the policy_cpu */ | 1001 | /* Initially set CPU itself as the policy_cpu */ |
962 | per_cpu(cpufreq_policy_cpu, cpu) = cpu; | 1002 | per_cpu(cpufreq_policy_cpu, cpu) = cpu; |
963 | 1003 | ||
964 | init_completion(&policy->kobj_unregister); | 1004 | init_completion(&policy->kobj_unregister); |
965 | INIT_WORK(&policy->update, handle_update); | 1005 | INIT_WORK(&policy->update, handle_update); |
966 | 1006 | ||
967 | /* call driver. From then on the cpufreq must be able | 1007 | /* call driver. From then on the cpufreq must be able |
968 | * to accept all calls to ->verify and ->setpolicy for this CPU | 1008 | * to accept all calls to ->verify and ->setpolicy for this CPU |
969 | */ | 1009 | */ |
970 | ret = cpufreq_driver->init(policy); | 1010 | ret = cpufreq_driver->init(policy); |
971 | if (ret) { | 1011 | if (ret) { |
972 | pr_debug("initialization failed\n"); | 1012 | pr_debug("initialization failed\n"); |
973 | goto err_set_policy_cpu; | 1013 | goto err_set_policy_cpu; |
974 | } | 1014 | } |
975 | 1015 | ||
976 | /* related cpus should atleast have policy->cpus */ | 1016 | /* related cpus should atleast have policy->cpus */ |
977 | cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); | 1017 | cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); |
978 | 1018 | ||
979 | /* | 1019 | /* |
980 | * affected cpus must always be the one, which are online. We aren't | 1020 | * affected cpus must always be the one, which are online. We aren't |
981 | * managing offline cpus here. | 1021 | * managing offline cpus here. |
982 | */ | 1022 | */ |
983 | cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); | 1023 | cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); |
984 | 1024 | ||
985 | policy->user_policy.min = policy->min; | 1025 | policy->user_policy.min = policy->min; |
986 | policy->user_policy.max = policy->max; | 1026 | policy->user_policy.max = policy->max; |
987 | 1027 | ||
988 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | 1028 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, |
989 | CPUFREQ_START, policy); | 1029 | CPUFREQ_START, policy); |
990 | 1030 | ||
991 | #ifdef CONFIG_HOTPLUG_CPU | 1031 | #ifdef CONFIG_HOTPLUG_CPU |
992 | gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); | 1032 | gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); |
993 | if (gov) { | 1033 | if (gov) { |
994 | policy->governor = gov; | 1034 | policy->governor = gov; |
995 | pr_debug("Restoring governor %s for cpu %d\n", | 1035 | pr_debug("Restoring governor %s for cpu %d\n", |
996 | policy->governor->name, cpu); | 1036 | policy->governor->name, cpu); |
997 | } | 1037 | } |
998 | #endif | 1038 | #endif |
999 | 1039 | ||
1000 | ret = cpufreq_add_dev_interface(cpu, policy, dev); | 1040 | ret = cpufreq_add_dev_interface(cpu, policy, dev); |
1001 | if (ret) | 1041 | if (ret) |
1002 | goto err_out_unregister; | 1042 | goto err_out_unregister; |
1003 | 1043 | ||
1004 | kobject_uevent(&policy->kobj, KOBJ_ADD); | 1044 | kobject_uevent(&policy->kobj, KOBJ_ADD); |
1005 | module_put(cpufreq_driver->owner); | 1045 | module_put(cpufreq_driver->owner); |
1006 | pr_debug("initialization complete\n"); | 1046 | pr_debug("initialization complete\n"); |
1007 | 1047 | ||
1008 | return 0; | 1048 | return 0; |
1009 | 1049 | ||
1010 | err_out_unregister: | 1050 | err_out_unregister: |
1011 | write_lock_irqsave(&cpufreq_driver_lock, flags); | 1051 | write_lock_irqsave(&cpufreq_driver_lock, flags); |
1012 | for_each_cpu(j, policy->cpus) | 1052 | for_each_cpu(j, policy->cpus) |
1013 | per_cpu(cpufreq_cpu_data, j) = NULL; | 1053 | per_cpu(cpufreq_cpu_data, j) = NULL; |
1014 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1054 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1015 | 1055 | ||
1016 | kobject_put(&policy->kobj); | 1056 | kobject_put(&policy->kobj); |
1017 | wait_for_completion(&policy->kobj_unregister); | 1057 | wait_for_completion(&policy->kobj_unregister); |
1018 | 1058 | ||
1019 | err_set_policy_cpu: | 1059 | err_set_policy_cpu: |
1020 | per_cpu(cpufreq_policy_cpu, cpu) = -1; | 1060 | per_cpu(cpufreq_policy_cpu, cpu) = -1; |
1021 | free_cpumask_var(policy->related_cpus); | 1061 | free_cpumask_var(policy->related_cpus); |
1022 | err_free_cpumask: | 1062 | err_free_cpumask: |
1023 | free_cpumask_var(policy->cpus); | 1063 | free_cpumask_var(policy->cpus); |
1024 | err_free_policy: | 1064 | err_free_policy: |
1025 | kfree(policy); | 1065 | kfree(policy); |
1026 | nomem_out: | 1066 | nomem_out: |
1027 | module_put(cpufreq_driver->owner); | 1067 | module_put(cpufreq_driver->owner); |
1028 | module_out: | 1068 | module_out: |
1029 | return ret; | 1069 | return ret; |
1030 | } | 1070 | } |
1031 | 1071 | ||
1032 | static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) | 1072 | static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) |
1033 | { | 1073 | { |
1034 | int j; | 1074 | int j; |
1035 | 1075 | ||
1036 | policy->last_cpu = policy->cpu; | 1076 | policy->last_cpu = policy->cpu; |
1037 | policy->cpu = cpu; | 1077 | policy->cpu = cpu; |
1038 | 1078 | ||
1039 | for_each_cpu(j, policy->cpus) | 1079 | for_each_cpu(j, policy->cpus) |
1040 | per_cpu(cpufreq_policy_cpu, j) = cpu; | 1080 | per_cpu(cpufreq_policy_cpu, j) = cpu; |
1041 | 1081 | ||
1042 | #ifdef CONFIG_CPU_FREQ_TABLE | 1082 | #ifdef CONFIG_CPU_FREQ_TABLE |
1043 | cpufreq_frequency_table_update_policy_cpu(policy); | 1083 | cpufreq_frequency_table_update_policy_cpu(policy); |
1044 | #endif | 1084 | #endif |
1045 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | 1085 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, |
1046 | CPUFREQ_UPDATE_POLICY_CPU, policy); | 1086 | CPUFREQ_UPDATE_POLICY_CPU, policy); |
1047 | } | 1087 | } |
1048 | 1088 | ||
1049 | /** | 1089 | /** |
1050 | * __cpufreq_remove_dev - remove a CPU device | 1090 | * __cpufreq_remove_dev - remove a CPU device |
1051 | * | 1091 | * |
1052 | * Removes the cpufreq interface for a CPU device. | 1092 | * Removes the cpufreq interface for a CPU device. |
1053 | * Caller should already have policy_rwsem in write mode for this CPU. | 1093 | * Caller should already have policy_rwsem in write mode for this CPU. |
1054 | * This routine frees the rwsem before returning. | 1094 | * This routine frees the rwsem before returning. |
1055 | */ | 1095 | */ |
1056 | static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) | 1096 | static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) |
1057 | { | 1097 | { |
1058 | unsigned int cpu = dev->id, ret, cpus; | 1098 | unsigned int cpu = dev->id, ret, cpus; |
1059 | unsigned long flags; | 1099 | unsigned long flags; |
1060 | struct cpufreq_policy *data; | 1100 | struct cpufreq_policy *data; |
1061 | struct kobject *kobj; | 1101 | struct kobject *kobj; |
1062 | struct completion *cmp; | 1102 | struct completion *cmp; |
1063 | struct device *cpu_dev; | 1103 | struct device *cpu_dev; |
1064 | 1104 | ||
1065 | pr_debug("%s: unregistering CPU %u\n", __func__, cpu); | 1105 | pr_debug("%s: unregistering CPU %u\n", __func__, cpu); |
1066 | 1106 | ||
1067 | write_lock_irqsave(&cpufreq_driver_lock, flags); | 1107 | write_lock_irqsave(&cpufreq_driver_lock, flags); |
1068 | 1108 | ||
1069 | data = per_cpu(cpufreq_cpu_data, cpu); | 1109 | data = per_cpu(cpufreq_cpu_data, cpu); |
1070 | per_cpu(cpufreq_cpu_data, cpu) = NULL; | 1110 | per_cpu(cpufreq_cpu_data, cpu) = NULL; |
1071 | 1111 | ||
1072 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1112 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1073 | 1113 | ||
1074 | if (!data) { | 1114 | if (!data) { |
1075 | pr_debug("%s: No cpu_data found\n", __func__); | 1115 | pr_debug("%s: No cpu_data found\n", __func__); |
1076 | return -EINVAL; | 1116 | return -EINVAL; |
1077 | } | 1117 | } |
1078 | 1118 | ||
1079 | if (cpufreq_driver->target) | 1119 | if (cpufreq_driver->target) |
1080 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | 1120 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); |
1081 | 1121 | ||
1082 | #ifdef CONFIG_HOTPLUG_CPU | 1122 | #ifdef CONFIG_HOTPLUG_CPU |
1083 | if (!cpufreq_driver->setpolicy) | 1123 | if (!cpufreq_driver->setpolicy) |
1084 | strncpy(per_cpu(cpufreq_cpu_governor, cpu), | 1124 | strncpy(per_cpu(cpufreq_cpu_governor, cpu), |
1085 | data->governor->name, CPUFREQ_NAME_LEN); | 1125 | data->governor->name, CPUFREQ_NAME_LEN); |
1086 | #endif | 1126 | #endif |
1087 | 1127 | ||
1088 | WARN_ON(lock_policy_rwsem_write(cpu)); | 1128 | WARN_ON(lock_policy_rwsem_write(cpu)); |
1089 | cpus = cpumask_weight(data->cpus); | 1129 | cpus = cpumask_weight(data->cpus); |
1090 | 1130 | ||
1091 | if (cpus > 1) | 1131 | if (cpus > 1) |
1092 | cpumask_clear_cpu(cpu, data->cpus); | 1132 | cpumask_clear_cpu(cpu, data->cpus); |
1093 | unlock_policy_rwsem_write(cpu); | 1133 | unlock_policy_rwsem_write(cpu); |
1094 | 1134 | ||
1095 | if (cpu != data->cpu) { | 1135 | if (cpu != data->cpu) { |
1096 | sysfs_remove_link(&dev->kobj, "cpufreq"); | 1136 | sysfs_remove_link(&dev->kobj, "cpufreq"); |
1097 | } else if (cpus > 1) { | 1137 | } else if (cpus > 1) { |
1098 | /* first sibling now owns the new sysfs dir */ | 1138 | /* first sibling now owns the new sysfs dir */ |
1099 | cpu_dev = get_cpu_device(cpumask_first(data->cpus)); | 1139 | cpu_dev = get_cpu_device(cpumask_first(data->cpus)); |
1100 | sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); | 1140 | sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); |
1101 | ret = kobject_move(&data->kobj, &cpu_dev->kobj); | 1141 | ret = kobject_move(&data->kobj, &cpu_dev->kobj); |
1102 | if (ret) { | 1142 | if (ret) { |
1103 | pr_err("%s: Failed to move kobj: %d", __func__, ret); | 1143 | pr_err("%s: Failed to move kobj: %d", __func__, ret); |
1104 | 1144 | ||
1105 | WARN_ON(lock_policy_rwsem_write(cpu)); | 1145 | WARN_ON(lock_policy_rwsem_write(cpu)); |
1106 | cpumask_set_cpu(cpu, data->cpus); | 1146 | cpumask_set_cpu(cpu, data->cpus); |
1107 | 1147 | ||
1108 | write_lock_irqsave(&cpufreq_driver_lock, flags); | 1148 | write_lock_irqsave(&cpufreq_driver_lock, flags); |
1109 | per_cpu(cpufreq_cpu_data, cpu) = data; | 1149 | per_cpu(cpufreq_cpu_data, cpu) = data; |
1110 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1150 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1111 | 1151 | ||
1112 | unlock_policy_rwsem_write(cpu); | 1152 | unlock_policy_rwsem_write(cpu); |
1113 | 1153 | ||
1114 | ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj, | 1154 | ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj, |
1115 | "cpufreq"); | 1155 | "cpufreq"); |
1116 | return -EINVAL; | 1156 | return -EINVAL; |
1117 | } | 1157 | } |
1118 | 1158 | ||
1119 | WARN_ON(lock_policy_rwsem_write(cpu)); | 1159 | WARN_ON(lock_policy_rwsem_write(cpu)); |
1120 | update_policy_cpu(data, cpu_dev->id); | 1160 | update_policy_cpu(data, cpu_dev->id); |
1121 | unlock_policy_rwsem_write(cpu); | 1161 | unlock_policy_rwsem_write(cpu); |
1122 | pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", | 1162 | pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", |
1123 | __func__, cpu_dev->id, cpu); | 1163 | __func__, cpu_dev->id, cpu); |
1124 | } | 1164 | } |
1125 | 1165 | ||
1126 | if ((cpus == 1) && (cpufreq_driver->target)) | 1166 | if ((cpus == 1) && (cpufreq_driver->target)) |
1127 | __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); | 1167 | __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); |
1128 | 1168 | ||
1129 | pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); | 1169 | pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); |
1130 | cpufreq_cpu_put(data); | 1170 | cpufreq_cpu_put(data); |
1131 | 1171 | ||
1132 | /* If cpu is last user of policy, free policy */ | 1172 | /* If cpu is last user of policy, free policy */ |
1133 | if (cpus == 1) { | 1173 | if (cpus == 1) { |
1134 | lock_policy_rwsem_read(cpu); | 1174 | lock_policy_rwsem_read(cpu); |
1135 | kobj = &data->kobj; | 1175 | kobj = &data->kobj; |
1136 | cmp = &data->kobj_unregister; | 1176 | cmp = &data->kobj_unregister; |
1137 | unlock_policy_rwsem_read(cpu); | 1177 | unlock_policy_rwsem_read(cpu); |
1138 | kobject_put(kobj); | 1178 | kobject_put(kobj); |
1139 | 1179 | ||
1140 | /* we need to make sure that the underlying kobj is actually | 1180 | /* we need to make sure that the underlying kobj is actually |
1141 | * not referenced anymore by anybody before we proceed with | 1181 | * not referenced anymore by anybody before we proceed with |
1142 | * unloading. | 1182 | * unloading. |
1143 | */ | 1183 | */ |
1144 | pr_debug("waiting for dropping of refcount\n"); | 1184 | pr_debug("waiting for dropping of refcount\n"); |
1145 | wait_for_completion(cmp); | 1185 | wait_for_completion(cmp); |
1146 | pr_debug("wait complete\n"); | 1186 | pr_debug("wait complete\n"); |
1147 | 1187 | ||
1148 | if (cpufreq_driver->exit) | 1188 | if (cpufreq_driver->exit) |
1149 | cpufreq_driver->exit(data); | 1189 | cpufreq_driver->exit(data); |
1150 | 1190 | ||
1151 | free_cpumask_var(data->related_cpus); | 1191 | free_cpumask_var(data->related_cpus); |
1152 | free_cpumask_var(data->cpus); | 1192 | free_cpumask_var(data->cpus); |
1153 | kfree(data); | 1193 | kfree(data); |
1154 | } else if (cpufreq_driver->target) { | 1194 | } else if (cpufreq_driver->target) { |
1155 | __cpufreq_governor(data, CPUFREQ_GOV_START); | 1195 | __cpufreq_governor(data, CPUFREQ_GOV_START); |
1156 | __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); | 1196 | __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); |
1157 | } | 1197 | } |
1158 | 1198 | ||
1159 | per_cpu(cpufreq_policy_cpu, cpu) = -1; | 1199 | per_cpu(cpufreq_policy_cpu, cpu) = -1; |
1160 | return 0; | 1200 | return 0; |
1161 | } | 1201 | } |
1162 | 1202 | ||
1163 | 1203 | ||
1164 | static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) | 1204 | static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) |
1165 | { | 1205 | { |
1166 | unsigned int cpu = dev->id; | 1206 | unsigned int cpu = dev->id; |
1167 | int retval; | 1207 | int retval; |
1168 | 1208 | ||
1169 | if (cpu_is_offline(cpu)) | 1209 | if (cpu_is_offline(cpu)) |
1170 | return 0; | 1210 | return 0; |
1171 | 1211 | ||
1172 | retval = __cpufreq_remove_dev(dev, sif); | 1212 | retval = __cpufreq_remove_dev(dev, sif); |
1173 | return retval; | 1213 | return retval; |
1174 | } | 1214 | } |
1175 | 1215 | ||
1176 | 1216 | ||
1177 | static void handle_update(struct work_struct *work) | 1217 | static void handle_update(struct work_struct *work) |
1178 | { | 1218 | { |
1179 | struct cpufreq_policy *policy = | 1219 | struct cpufreq_policy *policy = |
1180 | container_of(work, struct cpufreq_policy, update); | 1220 | container_of(work, struct cpufreq_policy, update); |
1181 | unsigned int cpu = policy->cpu; | 1221 | unsigned int cpu = policy->cpu; |
1182 | pr_debug("handle_update for cpu %u called\n", cpu); | 1222 | pr_debug("handle_update for cpu %u called\n", cpu); |
1183 | cpufreq_update_policy(cpu); | 1223 | cpufreq_update_policy(cpu); |
1184 | } | 1224 | } |
1185 | 1225 | ||
1186 | /** | 1226 | /** |
1187 | * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble. | 1227 | * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're in deep trouble. |
1188 | * @cpu: cpu number | 1228 | * @cpu: cpu number |
1189 | * @old_freq: CPU frequency the kernel thinks the CPU runs at | 1229 | * @old_freq: CPU frequency the kernel thinks the CPU runs at |
1190 | * @new_freq: CPU frequency the CPU actually runs at | 1230 | * @new_freq: CPU frequency the CPU actually runs at |
1191 | * | 1231 | * |
1192 | * We adjust to current frequency first, and need to clean up later. | 1232 | * We adjust to current frequency first, and need to clean up later. |
1193 | * So either call to cpufreq_update_policy() or schedule handle_update()). | 1233 | * So either call to cpufreq_update_policy() or schedule handle_update()). |
1194 | */ | 1234 | */ |
1195 | static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, | 1235 | static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, |
1196 | unsigned int new_freq) | 1236 | unsigned int new_freq) |
1197 | { | 1237 | { |
1198 | struct cpufreq_policy *policy; | 1238 | struct cpufreq_policy *policy; |
1199 | struct cpufreq_freqs freqs; | 1239 | struct cpufreq_freqs freqs; |
1200 | unsigned long flags; | 1240 | unsigned long flags; |
1201 | 1241 | ||
1202 | 1242 | ||
1203 | pr_debug("Warning: CPU frequency out of sync: cpufreq and timing " | 1243 | pr_debug("Warning: CPU frequency out of sync: cpufreq and timing " |
1204 | "core thinks of %u, is %u kHz.\n", old_freq, new_freq); | 1244 | "core thinks of %u, is %u kHz.\n", old_freq, new_freq); |
1205 | 1245 | ||
1206 | freqs.old = old_freq; | 1246 | freqs.old = old_freq; |
1207 | freqs.new = new_freq; | 1247 | freqs.new = new_freq; |
1208 | 1248 | ||
1209 | read_lock_irqsave(&cpufreq_driver_lock, flags); | 1249 | read_lock_irqsave(&cpufreq_driver_lock, flags); |
1210 | policy = per_cpu(cpufreq_cpu_data, cpu); | 1250 | policy = per_cpu(cpufreq_cpu_data, cpu); |
1211 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1251 | read_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1212 | 1252 | ||
1213 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | 1253 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); |
1214 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | 1254 | cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); |
1215 | } | 1255 | } |
1216 | 1256 | ||
1217 | 1257 | ||
1218 | /** | 1258 | /** |
1219 | * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur | 1259 | * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur |
1220 | * @cpu: CPU number | 1260 | * @cpu: CPU number |
1221 | * | 1261 | * |
1222 | * This is the last known freq, without actually getting it from the driver. | 1262 | * This is the last known freq, without actually getting it from the driver. |
1223 | * Return value will be same as what is shown in scaling_cur_freq in sysfs. | 1263 | * Return value will be same as what is shown in scaling_cur_freq in sysfs. |
1224 | */ | 1264 | */ |
1225 | unsigned int cpufreq_quick_get(unsigned int cpu) | 1265 | unsigned int cpufreq_quick_get(unsigned int cpu) |
1226 | { | 1266 | { |
1227 | struct cpufreq_policy *policy; | 1267 | struct cpufreq_policy *policy; |
1228 | unsigned int ret_freq = 0; | 1268 | unsigned int ret_freq = 0; |
1229 | 1269 | ||
1230 | if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) | 1270 | if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) |
1231 | return cpufreq_driver->get(cpu); | 1271 | return cpufreq_driver->get(cpu); |
1232 | 1272 | ||
1233 | policy = cpufreq_cpu_get(cpu); | 1273 | policy = cpufreq_cpu_get(cpu); |
1234 | if (policy) { | 1274 | if (policy) { |
1235 | ret_freq = policy->cur; | 1275 | ret_freq = policy->cur; |
1236 | cpufreq_cpu_put(policy); | 1276 | cpufreq_cpu_put(policy); |
1237 | } | 1277 | } |
1238 | 1278 | ||
1239 | return ret_freq; | 1279 | return ret_freq; |
1240 | } | 1280 | } |
1241 | EXPORT_SYMBOL(cpufreq_quick_get); | 1281 | EXPORT_SYMBOL(cpufreq_quick_get); |
1242 | 1282 | ||
1243 | /** | 1283 | /** |
1244 | * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU | 1284 | * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU |
1245 | * @cpu: CPU number | 1285 | * @cpu: CPU number |
1246 | * | 1286 | * |
1247 | * Just return the max possible frequency for a given CPU. | 1287 | * Just return the max possible frequency for a given CPU. |
1248 | */ | 1288 | */ |
1249 | unsigned int cpufreq_quick_get_max(unsigned int cpu) | 1289 | unsigned int cpufreq_quick_get_max(unsigned int cpu) |
1250 | { | 1290 | { |
1251 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | 1291 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); |
1252 | unsigned int ret_freq = 0; | 1292 | unsigned int ret_freq = 0; |
1253 | 1293 | ||
1254 | if (policy) { | 1294 | if (policy) { |
1255 | ret_freq = policy->max; | 1295 | ret_freq = policy->max; |
1256 | cpufreq_cpu_put(policy); | 1296 | cpufreq_cpu_put(policy); |
1257 | } | 1297 | } |
1258 | 1298 | ||
1259 | return ret_freq; | 1299 | return ret_freq; |
1260 | } | 1300 | } |
1261 | EXPORT_SYMBOL(cpufreq_quick_get_max); | 1301 | EXPORT_SYMBOL(cpufreq_quick_get_max); |
1262 | 1302 | ||
1263 | 1303 | ||
1264 | static unsigned int __cpufreq_get(unsigned int cpu) | 1304 | static unsigned int __cpufreq_get(unsigned int cpu) |
1265 | { | 1305 | { |
1266 | struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); | 1306 | struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); |
1267 | unsigned int ret_freq = 0; | 1307 | unsigned int ret_freq = 0; |
1268 | 1308 | ||
1269 | if (!cpufreq_driver->get) | 1309 | if (!cpufreq_driver->get) |
1270 | return ret_freq; | 1310 | return ret_freq; |
1271 | 1311 | ||
1272 | ret_freq = cpufreq_driver->get(cpu); | 1312 | ret_freq = cpufreq_driver->get(cpu); |
1273 | 1313 | ||
1274 | if (ret_freq && policy->cur && | 1314 | if (ret_freq && policy->cur && |
1275 | !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { | 1315 | !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { |
1276 | /* verify no discrepancy between actual and | 1316 | /* verify no discrepancy between actual and |
1277 | saved value exists */ | 1317 | saved value exists */ |
1278 | if (unlikely(ret_freq != policy->cur)) { | 1318 | if (unlikely(ret_freq != policy->cur)) { |
1279 | cpufreq_out_of_sync(cpu, policy->cur, ret_freq); | 1319 | cpufreq_out_of_sync(cpu, policy->cur, ret_freq); |
1280 | schedule_work(&policy->update); | 1320 | schedule_work(&policy->update); |
1281 | } | 1321 | } |
1282 | } | 1322 | } |
1283 | 1323 | ||
1284 | return ret_freq; | 1324 | return ret_freq; |
1285 | } | 1325 | } |
1286 | 1326 | ||
1287 | /** | 1327 | /** |
1288 | * cpufreq_get - get the current CPU frequency (in kHz) | 1328 | * cpufreq_get - get the current CPU frequency (in kHz) |
1289 | * @cpu: CPU number | 1329 | * @cpu: CPU number |
1290 | * | 1330 | * |
1291 | * Get the CPU current (static) CPU frequency | 1331 | * Get the CPU current (static) CPU frequency |
1292 | */ | 1332 | */ |
1293 | unsigned int cpufreq_get(unsigned int cpu) | 1333 | unsigned int cpufreq_get(unsigned int cpu) |
1294 | { | 1334 | { |
1295 | unsigned int ret_freq = 0; | 1335 | unsigned int ret_freq = 0; |
1296 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | 1336 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); |
1297 | 1337 | ||
1298 | if (!policy) | 1338 | if (!policy) |
1299 | goto out; | 1339 | goto out; |
1300 | 1340 | ||
1301 | if (unlikely(lock_policy_rwsem_read(cpu))) | 1341 | if (unlikely(lock_policy_rwsem_read(cpu))) |
1302 | goto out_policy; | 1342 | goto out_policy; |
1303 | 1343 | ||
1304 | ret_freq = __cpufreq_get(cpu); | 1344 | ret_freq = __cpufreq_get(cpu); |
1305 | 1345 | ||
1306 | unlock_policy_rwsem_read(cpu); | 1346 | unlock_policy_rwsem_read(cpu); |
1307 | 1347 | ||
1308 | out_policy: | 1348 | out_policy: |
1309 | cpufreq_cpu_put(policy); | 1349 | cpufreq_cpu_put(policy); |
1310 | out: | 1350 | out: |
1311 | return ret_freq; | 1351 | return ret_freq; |
1312 | } | 1352 | } |
1313 | EXPORT_SYMBOL(cpufreq_get); | 1353 | EXPORT_SYMBOL(cpufreq_get); |
1314 | 1354 | ||
1315 | static struct subsys_interface cpufreq_interface = { | 1355 | static struct subsys_interface cpufreq_interface = { |
1316 | .name = "cpufreq", | 1356 | .name = "cpufreq", |
1317 | .subsys = &cpu_subsys, | 1357 | .subsys = &cpu_subsys, |
1318 | .add_dev = cpufreq_add_dev, | 1358 | .add_dev = cpufreq_add_dev, |
1319 | .remove_dev = cpufreq_remove_dev, | 1359 | .remove_dev = cpufreq_remove_dev, |
1320 | }; | 1360 | }; |
1321 | 1361 | ||
1322 | 1362 | ||
1323 | /** | 1363 | /** |
1324 | * cpufreq_bp_suspend - Prepare the boot CPU for system suspend. | 1364 | * cpufreq_bp_suspend - Prepare the boot CPU for system suspend. |
1325 | * | 1365 | * |
1326 | * This function is only executed for the boot processor. The other CPUs | 1366 | * This function is only executed for the boot processor. The other CPUs |
1327 | * have been put offline by means of CPU hotplug. | 1367 | * have been put offline by means of CPU hotplug. |
1328 | */ | 1368 | */ |
1329 | static int cpufreq_bp_suspend(void) | 1369 | static int cpufreq_bp_suspend(void) |
1330 | { | 1370 | { |
1331 | int ret = 0; | 1371 | int ret = 0; |
1332 | 1372 | ||
1333 | int cpu = smp_processor_id(); | 1373 | int cpu = smp_processor_id(); |
1334 | struct cpufreq_policy *cpu_policy; | 1374 | struct cpufreq_policy *cpu_policy; |
1335 | 1375 | ||
1336 | pr_debug("suspending cpu %u\n", cpu); | 1376 | pr_debug("suspending cpu %u\n", cpu); |
1337 | 1377 | ||
1338 | /* If there's no policy for the boot CPU, we have nothing to do. */ | 1378 | /* If there's no policy for the boot CPU, we have nothing to do. */ |
1339 | cpu_policy = cpufreq_cpu_get(cpu); | 1379 | cpu_policy = cpufreq_cpu_get(cpu); |
1340 | if (!cpu_policy) | 1380 | if (!cpu_policy) |
1341 | return 0; | 1381 | return 0; |
1342 | 1382 | ||
1343 | if (cpufreq_driver->suspend) { | 1383 | if (cpufreq_driver->suspend) { |
1344 | ret = cpufreq_driver->suspend(cpu_policy); | 1384 | ret = cpufreq_driver->suspend(cpu_policy); |
1345 | if (ret) | 1385 | if (ret) |
1346 | printk(KERN_ERR "cpufreq: suspend failed in ->suspend " | 1386 | printk(KERN_ERR "cpufreq: suspend failed in ->suspend " |
1347 | "step on CPU %u\n", cpu_policy->cpu); | 1387 | "step on CPU %u\n", cpu_policy->cpu); |
1348 | } | 1388 | } |
1349 | 1389 | ||
1350 | cpufreq_cpu_put(cpu_policy); | 1390 | cpufreq_cpu_put(cpu_policy); |
1351 | return ret; | 1391 | return ret; |
1352 | } | 1392 | } |
1353 | 1393 | ||
1354 | /** | 1394 | /** |
1355 | * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU. | 1395 | * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU. |
1356 | * | 1396 | * |
1357 | * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) | 1397 | * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) |
1358 | * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are | 1398 | * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are |
1359 | * restored. It will verify that the current freq is in sync with | 1399 | * restored. It will verify that the current freq is in sync with |
1360 | * what we believe it to be. This is a bit later than when it | 1400 | * what we believe it to be. This is a bit later than when it |
1361 | * should be, but nonethteless it's better than calling | 1401 | * should be, but nonethteless it's better than calling |
1362 | * cpufreq_driver->get() here which might re-enable interrupts... | 1402 | * cpufreq_driver->get() here which might re-enable interrupts... |
1363 | * | 1403 | * |
1364 | * This function is only executed for the boot CPU. The other CPUs have not | 1404 | * This function is only executed for the boot CPU. The other CPUs have not |
1365 | * been turned on yet. | 1405 | * been turned on yet. |
1366 | */ | 1406 | */ |
1367 | static void cpufreq_bp_resume(void) | 1407 | static void cpufreq_bp_resume(void) |
1368 | { | 1408 | { |
1369 | int ret = 0; | 1409 | int ret = 0; |
1370 | 1410 | ||
1371 | int cpu = smp_processor_id(); | 1411 | int cpu = smp_processor_id(); |
1372 | struct cpufreq_policy *cpu_policy; | 1412 | struct cpufreq_policy *cpu_policy; |
1373 | 1413 | ||
1374 | pr_debug("resuming cpu %u\n", cpu); | 1414 | pr_debug("resuming cpu %u\n", cpu); |
1375 | 1415 | ||
1376 | /* If there's no policy for the boot CPU, we have nothing to do. */ | 1416 | /* If there's no policy for the boot CPU, we have nothing to do. */ |
1377 | cpu_policy = cpufreq_cpu_get(cpu); | 1417 | cpu_policy = cpufreq_cpu_get(cpu); |
1378 | if (!cpu_policy) | 1418 | if (!cpu_policy) |
1379 | return; | 1419 | return; |
1380 | 1420 | ||
1381 | if (cpufreq_driver->resume) { | 1421 | if (cpufreq_driver->resume) { |
1382 | ret = cpufreq_driver->resume(cpu_policy); | 1422 | ret = cpufreq_driver->resume(cpu_policy); |
1383 | if (ret) { | 1423 | if (ret) { |
1384 | printk(KERN_ERR "cpufreq: resume failed in ->resume " | 1424 | printk(KERN_ERR "cpufreq: resume failed in ->resume " |
1385 | "step on CPU %u\n", cpu_policy->cpu); | 1425 | "step on CPU %u\n", cpu_policy->cpu); |
1386 | goto fail; | 1426 | goto fail; |
1387 | } | 1427 | } |
1388 | } | 1428 | } |
1389 | 1429 | ||
1390 | schedule_work(&cpu_policy->update); | 1430 | schedule_work(&cpu_policy->update); |
1391 | 1431 | ||
1392 | fail: | 1432 | fail: |
1393 | cpufreq_cpu_put(cpu_policy); | 1433 | cpufreq_cpu_put(cpu_policy); |
1394 | } | 1434 | } |
1395 | 1435 | ||
1396 | static struct syscore_ops cpufreq_syscore_ops = { | 1436 | static struct syscore_ops cpufreq_syscore_ops = { |
1397 | .suspend = cpufreq_bp_suspend, | 1437 | .suspend = cpufreq_bp_suspend, |
1398 | .resume = cpufreq_bp_resume, | 1438 | .resume = cpufreq_bp_resume, |
1399 | }; | 1439 | }; |
1400 | 1440 | ||
1401 | /** | 1441 | /** |
1402 | * cpufreq_get_current_driver - return current driver's name | 1442 | * cpufreq_get_current_driver - return current driver's name |
1403 | * | 1443 | * |
1404 | * Return the name string of the currently loaded cpufreq driver | 1444 | * Return the name string of the currently loaded cpufreq driver |
1405 | * or NULL, if none. | 1445 | * or NULL, if none. |
1406 | */ | 1446 | */ |
1407 | const char *cpufreq_get_current_driver(void) | 1447 | const char *cpufreq_get_current_driver(void) |
1408 | { | 1448 | { |
1409 | if (cpufreq_driver) | 1449 | if (cpufreq_driver) |
1410 | return cpufreq_driver->name; | 1450 | return cpufreq_driver->name; |
1411 | 1451 | ||
1412 | return NULL; | 1452 | return NULL; |
1413 | } | 1453 | } |
1414 | EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); | 1454 | EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); |
1415 | 1455 | ||
1416 | /********************************************************************* | 1456 | /********************************************************************* |
1417 | * NOTIFIER LISTS INTERFACE * | 1457 | * NOTIFIER LISTS INTERFACE * |
1418 | *********************************************************************/ | 1458 | *********************************************************************/ |
1419 | 1459 | ||
1420 | /** | 1460 | /** |
1421 | * cpufreq_register_notifier - register a driver with cpufreq | 1461 | * cpufreq_register_notifier - register a driver with cpufreq |
1422 | * @nb: notifier function to register | 1462 | * @nb: notifier function to register |
1423 | * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER | 1463 | * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER |
1424 | * | 1464 | * |
1425 | * Add a driver to one of two lists: either a list of drivers that | 1465 | * Add a driver to one of two lists: either a list of drivers that |
1426 | * are notified about clock rate changes (once before and once after | 1466 | * are notified about clock rate changes (once before and once after |
1427 | * the transition), or a list of drivers that are notified about | 1467 | * the transition), or a list of drivers that are notified about |
1428 | * changes in cpufreq policy. | 1468 | * changes in cpufreq policy. |
1429 | * | 1469 | * |
1430 | * This function may sleep, and has the same return conditions as | 1470 | * This function may sleep, and has the same return conditions as |
1431 | * blocking_notifier_chain_register. | 1471 | * blocking_notifier_chain_register. |
1432 | */ | 1472 | */ |
1433 | int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) | 1473 | int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) |
1434 | { | 1474 | { |
1435 | int ret; | 1475 | int ret; |
1436 | 1476 | ||
1437 | if (cpufreq_disabled()) | 1477 | if (cpufreq_disabled()) |
1438 | return -EINVAL; | 1478 | return -EINVAL; |
1439 | 1479 | ||
1440 | WARN_ON(!init_cpufreq_transition_notifier_list_called); | 1480 | WARN_ON(!init_cpufreq_transition_notifier_list_called); |
1441 | 1481 | ||
1442 | switch (list) { | 1482 | switch (list) { |
1443 | case CPUFREQ_TRANSITION_NOTIFIER: | 1483 | case CPUFREQ_TRANSITION_NOTIFIER: |
1444 | ret = srcu_notifier_chain_register( | 1484 | ret = srcu_notifier_chain_register( |
1445 | &cpufreq_transition_notifier_list, nb); | 1485 | &cpufreq_transition_notifier_list, nb); |
1446 | break; | 1486 | break; |
1447 | case CPUFREQ_POLICY_NOTIFIER: | 1487 | case CPUFREQ_POLICY_NOTIFIER: |
1448 | ret = blocking_notifier_chain_register( | 1488 | ret = blocking_notifier_chain_register( |
1449 | &cpufreq_policy_notifier_list, nb); | 1489 | &cpufreq_policy_notifier_list, nb); |
1450 | break; | 1490 | break; |
1451 | default: | 1491 | default: |
1452 | ret = -EINVAL; | 1492 | ret = -EINVAL; |
1453 | } | 1493 | } |
1454 | 1494 | ||
1455 | return ret; | 1495 | return ret; |
1456 | } | 1496 | } |
1457 | EXPORT_SYMBOL(cpufreq_register_notifier); | 1497 | EXPORT_SYMBOL(cpufreq_register_notifier); |
1458 | 1498 | ||
1459 | 1499 | ||
1460 | /** | 1500 | /** |
1461 | * cpufreq_unregister_notifier - unregister a driver with cpufreq | 1501 | * cpufreq_unregister_notifier - unregister a driver with cpufreq |
1462 | * @nb: notifier block to be unregistered | 1502 | * @nb: notifier block to be unregistered |
1463 | * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER | 1503 | * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER |
1464 | * | 1504 | * |
1465 | * Remove a driver from the CPU frequency notifier list. | 1505 | * Remove a driver from the CPU frequency notifier list. |
1466 | * | 1506 | * |
1467 | * This function may sleep, and has the same return conditions as | 1507 | * This function may sleep, and has the same return conditions as |
1468 | * blocking_notifier_chain_unregister. | 1508 | * blocking_notifier_chain_unregister. |
1469 | */ | 1509 | */ |
1470 | int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) | 1510 | int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) |
1471 | { | 1511 | { |
1472 | int ret; | 1512 | int ret; |
1473 | 1513 | ||
1474 | if (cpufreq_disabled()) | 1514 | if (cpufreq_disabled()) |
1475 | return -EINVAL; | 1515 | return -EINVAL; |
1476 | 1516 | ||
1477 | switch (list) { | 1517 | switch (list) { |
1478 | case CPUFREQ_TRANSITION_NOTIFIER: | 1518 | case CPUFREQ_TRANSITION_NOTIFIER: |
1479 | ret = srcu_notifier_chain_unregister( | 1519 | ret = srcu_notifier_chain_unregister( |
1480 | &cpufreq_transition_notifier_list, nb); | 1520 | &cpufreq_transition_notifier_list, nb); |
1481 | break; | 1521 | break; |
1482 | case CPUFREQ_POLICY_NOTIFIER: | 1522 | case CPUFREQ_POLICY_NOTIFIER: |
1483 | ret = blocking_notifier_chain_unregister( | 1523 | ret = blocking_notifier_chain_unregister( |
1484 | &cpufreq_policy_notifier_list, nb); | 1524 | &cpufreq_policy_notifier_list, nb); |
1485 | break; | 1525 | break; |
1486 | default: | 1526 | default: |
1487 | ret = -EINVAL; | 1527 | ret = -EINVAL; |
1488 | } | 1528 | } |
1489 | 1529 | ||
1490 | return ret; | 1530 | return ret; |
1491 | } | 1531 | } |
1492 | EXPORT_SYMBOL(cpufreq_unregister_notifier); | 1532 | EXPORT_SYMBOL(cpufreq_unregister_notifier); |
1493 | 1533 | ||
1494 | 1534 | ||
1495 | /********************************************************************* | 1535 | /********************************************************************* |
1496 | * GOVERNORS * | 1536 | * GOVERNORS * |
1497 | *********************************************************************/ | 1537 | *********************************************************************/ |
1498 | 1538 | ||
1499 | 1539 | ||
1500 | int __cpufreq_driver_target(struct cpufreq_policy *policy, | 1540 | int __cpufreq_driver_target(struct cpufreq_policy *policy, |
1501 | unsigned int target_freq, | 1541 | unsigned int target_freq, |
1502 | unsigned int relation) | 1542 | unsigned int relation) |
1503 | { | 1543 | { |
1504 | int retval = -EINVAL; | 1544 | int retval = -EINVAL; |
1505 | unsigned int old_target_freq = target_freq; | 1545 | unsigned int old_target_freq = target_freq; |
1506 | 1546 | ||
1507 | if (cpufreq_disabled()) | 1547 | if (cpufreq_disabled()) |
1508 | return -ENODEV; | 1548 | return -ENODEV; |
1509 | 1549 | ||
1510 | /* Make sure that target_freq is within supported range */ | 1550 | /* Make sure that target_freq is within supported range */ |
1511 | if (target_freq > policy->max) | 1551 | if (target_freq > policy->max) |
1512 | target_freq = policy->max; | 1552 | target_freq = policy->max; |
1513 | if (target_freq < policy->min) | 1553 | if (target_freq < policy->min) |
1514 | target_freq = policy->min; | 1554 | target_freq = policy->min; |
1515 | 1555 | ||
1516 | pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", | 1556 | pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", |
1517 | policy->cpu, target_freq, relation, old_target_freq); | 1557 | policy->cpu, target_freq, relation, old_target_freq); |
1518 | 1558 | ||
1519 | if (target_freq == policy->cur) | 1559 | if (target_freq == policy->cur) |
1520 | return 0; | 1560 | return 0; |
1521 | 1561 | ||
1522 | if (cpufreq_driver->target) | 1562 | if (cpufreq_driver->target) |
1523 | retval = cpufreq_driver->target(policy, target_freq, relation); | 1563 | retval = cpufreq_driver->target(policy, target_freq, relation); |
1524 | 1564 | ||
1525 | return retval; | 1565 | return retval; |
1526 | } | 1566 | } |
1527 | EXPORT_SYMBOL_GPL(__cpufreq_driver_target); | 1567 | EXPORT_SYMBOL_GPL(__cpufreq_driver_target); |
1528 | 1568 | ||
1529 | int cpufreq_driver_target(struct cpufreq_policy *policy, | 1569 | int cpufreq_driver_target(struct cpufreq_policy *policy, |
1530 | unsigned int target_freq, | 1570 | unsigned int target_freq, |
1531 | unsigned int relation) | 1571 | unsigned int relation) |
1532 | { | 1572 | { |
1533 | int ret = -EINVAL; | 1573 | int ret = -EINVAL; |
1534 | 1574 | ||
1535 | policy = cpufreq_cpu_get(policy->cpu); | 1575 | policy = cpufreq_cpu_get(policy->cpu); |
1536 | if (!policy) | 1576 | if (!policy) |
1537 | goto no_policy; | 1577 | goto no_policy; |
1538 | 1578 | ||
1539 | if (unlikely(lock_policy_rwsem_write(policy->cpu))) | 1579 | if (unlikely(lock_policy_rwsem_write(policy->cpu))) |
1540 | goto fail; | 1580 | goto fail; |
1541 | 1581 | ||
1542 | ret = __cpufreq_driver_target(policy, target_freq, relation); | 1582 | ret = __cpufreq_driver_target(policy, target_freq, relation); |
1543 | 1583 | ||
1544 | unlock_policy_rwsem_write(policy->cpu); | 1584 | unlock_policy_rwsem_write(policy->cpu); |
1545 | 1585 | ||
1546 | fail: | 1586 | fail: |
1547 | cpufreq_cpu_put(policy); | 1587 | cpufreq_cpu_put(policy); |
1548 | no_policy: | 1588 | no_policy: |
1549 | return ret; | 1589 | return ret; |
1550 | } | 1590 | } |
1551 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); | 1591 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); |
1552 | 1592 | ||
1553 | int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu) | 1593 | int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu) |
1554 | { | 1594 | { |
1555 | int ret = 0; | 1595 | int ret = 0; |
1556 | 1596 | ||
1557 | if (cpufreq_disabled()) | 1597 | if (cpufreq_disabled()) |
1558 | return ret; | 1598 | return ret; |
1559 | 1599 | ||
1560 | if (!cpufreq_driver->getavg) | 1600 | if (!cpufreq_driver->getavg) |
1561 | return 0; | 1601 | return 0; |
1562 | 1602 | ||
1563 | policy = cpufreq_cpu_get(policy->cpu); | 1603 | policy = cpufreq_cpu_get(policy->cpu); |
1564 | if (!policy) | 1604 | if (!policy) |
1565 | return -EINVAL; | 1605 | return -EINVAL; |
1566 | 1606 | ||
1567 | ret = cpufreq_driver->getavg(policy, cpu); | 1607 | ret = cpufreq_driver->getavg(policy, cpu); |
1568 | 1608 | ||
1569 | cpufreq_cpu_put(policy); | 1609 | cpufreq_cpu_put(policy); |
1570 | return ret; | 1610 | return ret; |
1571 | } | 1611 | } |
1572 | EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg); | 1612 | EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg); |
1573 | 1613 | ||
1574 | /* | 1614 | /* |
1575 | * when "event" is CPUFREQ_GOV_LIMITS | 1615 | * when "event" is CPUFREQ_GOV_LIMITS |
1576 | */ | 1616 | */ |
1577 | 1617 | ||
1578 | static int __cpufreq_governor(struct cpufreq_policy *policy, | 1618 | static int __cpufreq_governor(struct cpufreq_policy *policy, |
1579 | unsigned int event) | 1619 | unsigned int event) |
1580 | { | 1620 | { |
1581 | int ret; | 1621 | int ret; |
1582 | 1622 | ||
1583 | /* Only must be defined when default governor is known to have latency | 1623 | /* Only must be defined when default governor is known to have latency |
1584 | restrictions, like e.g. conservative or ondemand. | 1624 | restrictions, like e.g. conservative or ondemand. |
1585 | That this is the case is already ensured in Kconfig | 1625 | That this is the case is already ensured in Kconfig |
1586 | */ | 1626 | */ |
1587 | #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE | 1627 | #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE |
1588 | struct cpufreq_governor *gov = &cpufreq_gov_performance; | 1628 | struct cpufreq_governor *gov = &cpufreq_gov_performance; |
1589 | #else | 1629 | #else |
1590 | struct cpufreq_governor *gov = NULL; | 1630 | struct cpufreq_governor *gov = NULL; |
1591 | #endif | 1631 | #endif |
1592 | 1632 | ||
1593 | if (policy->governor->max_transition_latency && | 1633 | if (policy->governor->max_transition_latency && |
1594 | policy->cpuinfo.transition_latency > | 1634 | policy->cpuinfo.transition_latency > |
1595 | policy->governor->max_transition_latency) { | 1635 | policy->governor->max_transition_latency) { |
1596 | if (!gov) | 1636 | if (!gov) |
1597 | return -EINVAL; | 1637 | return -EINVAL; |
1598 | else { | 1638 | else { |
1599 | printk(KERN_WARNING "%s governor failed, too long" | 1639 | printk(KERN_WARNING "%s governor failed, too long" |
1600 | " transition latency of HW, fallback" | 1640 | " transition latency of HW, fallback" |
1601 | " to %s governor\n", | 1641 | " to %s governor\n", |
1602 | policy->governor->name, | 1642 | policy->governor->name, |
1603 | gov->name); | 1643 | gov->name); |
1604 | policy->governor = gov; | 1644 | policy->governor = gov; |
1605 | } | 1645 | } |
1606 | } | 1646 | } |
1607 | 1647 | ||
1608 | if (!try_module_get(policy->governor->owner)) | 1648 | if (!try_module_get(policy->governor->owner)) |
1609 | return -EINVAL; | 1649 | return -EINVAL; |
1610 | 1650 | ||
1611 | pr_debug("__cpufreq_governor for CPU %u, event %u\n", | 1651 | pr_debug("__cpufreq_governor for CPU %u, event %u\n", |
1612 | policy->cpu, event); | 1652 | policy->cpu, event); |
1613 | ret = policy->governor->governor(policy, event); | 1653 | ret = policy->governor->governor(policy, event); |
1614 | 1654 | ||
1615 | if (!ret) { | 1655 | if (!ret) { |
1616 | if (event == CPUFREQ_GOV_POLICY_INIT) | 1656 | if (event == CPUFREQ_GOV_POLICY_INIT) |
1617 | policy->governor->initialized++; | 1657 | policy->governor->initialized++; |
1618 | else if (event == CPUFREQ_GOV_POLICY_EXIT) | 1658 | else if (event == CPUFREQ_GOV_POLICY_EXIT) |
1619 | policy->governor->initialized--; | 1659 | policy->governor->initialized--; |
1620 | } | 1660 | } |
1621 | 1661 | ||
1622 | /* we keep one module reference alive for | 1662 | /* we keep one module reference alive for |
1623 | each CPU governed by this CPU */ | 1663 | each CPU governed by this CPU */ |
1624 | if ((event != CPUFREQ_GOV_START) || ret) | 1664 | if ((event != CPUFREQ_GOV_START) || ret) |
1625 | module_put(policy->governor->owner); | 1665 | module_put(policy->governor->owner); |
1626 | if ((event == CPUFREQ_GOV_STOP) && !ret) | 1666 | if ((event == CPUFREQ_GOV_STOP) && !ret) |
1627 | module_put(policy->governor->owner); | 1667 | module_put(policy->governor->owner); |
1628 | 1668 | ||
1629 | return ret; | 1669 | return ret; |
1630 | } | 1670 | } |
1631 | 1671 | ||
1632 | 1672 | ||
1633 | int cpufreq_register_governor(struct cpufreq_governor *governor) | 1673 | int cpufreq_register_governor(struct cpufreq_governor *governor) |
1634 | { | 1674 | { |
1635 | int err; | 1675 | int err; |
1636 | 1676 | ||
1637 | if (!governor) | 1677 | if (!governor) |
1638 | return -EINVAL; | 1678 | return -EINVAL; |
1639 | 1679 | ||
1640 | if (cpufreq_disabled()) | 1680 | if (cpufreq_disabled()) |
1641 | return -ENODEV; | 1681 | return -ENODEV; |
1642 | 1682 | ||
1643 | mutex_lock(&cpufreq_governor_mutex); | 1683 | mutex_lock(&cpufreq_governor_mutex); |
1644 | 1684 | ||
1645 | governor->initialized = 0; | 1685 | governor->initialized = 0; |
1646 | err = -EBUSY; | 1686 | err = -EBUSY; |
1647 | if (__find_governor(governor->name) == NULL) { | 1687 | if (__find_governor(governor->name) == NULL) { |
1648 | err = 0; | 1688 | err = 0; |
1649 | list_add(&governor->governor_list, &cpufreq_governor_list); | 1689 | list_add(&governor->governor_list, &cpufreq_governor_list); |
1650 | } | 1690 | } |
1651 | 1691 | ||
1652 | mutex_unlock(&cpufreq_governor_mutex); | 1692 | mutex_unlock(&cpufreq_governor_mutex); |
1653 | return err; | 1693 | return err; |
1654 | } | 1694 | } |
1655 | EXPORT_SYMBOL_GPL(cpufreq_register_governor); | 1695 | EXPORT_SYMBOL_GPL(cpufreq_register_governor); |
1656 | 1696 | ||
1657 | 1697 | ||
1658 | void cpufreq_unregister_governor(struct cpufreq_governor *governor) | 1698 | void cpufreq_unregister_governor(struct cpufreq_governor *governor) |
1659 | { | 1699 | { |
1660 | #ifdef CONFIG_HOTPLUG_CPU | 1700 | #ifdef CONFIG_HOTPLUG_CPU |
1661 | int cpu; | 1701 | int cpu; |
1662 | #endif | 1702 | #endif |
1663 | 1703 | ||
1664 | if (!governor) | 1704 | if (!governor) |
1665 | return; | 1705 | return; |
1666 | 1706 | ||
1667 | if (cpufreq_disabled()) | 1707 | if (cpufreq_disabled()) |
1668 | return; | 1708 | return; |
1669 | 1709 | ||
1670 | #ifdef CONFIG_HOTPLUG_CPU | 1710 | #ifdef CONFIG_HOTPLUG_CPU |
1671 | for_each_present_cpu(cpu) { | 1711 | for_each_present_cpu(cpu) { |
1672 | if (cpu_online(cpu)) | 1712 | if (cpu_online(cpu)) |
1673 | continue; | 1713 | continue; |
1674 | if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name)) | 1714 | if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name)) |
1675 | strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0"); | 1715 | strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0"); |
1676 | } | 1716 | } |
1677 | #endif | 1717 | #endif |
1678 | 1718 | ||
1679 | mutex_lock(&cpufreq_governor_mutex); | 1719 | mutex_lock(&cpufreq_governor_mutex); |
1680 | list_del(&governor->governor_list); | 1720 | list_del(&governor->governor_list); |
1681 | mutex_unlock(&cpufreq_governor_mutex); | 1721 | mutex_unlock(&cpufreq_governor_mutex); |
1682 | return; | 1722 | return; |
1683 | } | 1723 | } |
1684 | EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); | 1724 | EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); |
1685 | 1725 | ||
1686 | 1726 | ||
1687 | 1727 | ||
1688 | /********************************************************************* | 1728 | /********************************************************************* |
1689 | * POLICY INTERFACE * | 1729 | * POLICY INTERFACE * |
1690 | *********************************************************************/ | 1730 | *********************************************************************/ |
1691 | 1731 | ||
1692 | /** | 1732 | /** |
1693 | * cpufreq_get_policy - get the current cpufreq_policy | 1733 | * cpufreq_get_policy - get the current cpufreq_policy |
1694 | * @policy: struct cpufreq_policy into which the current cpufreq_policy | 1734 | * @policy: struct cpufreq_policy into which the current cpufreq_policy |
1695 | * is written | 1735 | * is written |
1696 | * | 1736 | * |
1697 | * Reads the current cpufreq policy. | 1737 | * Reads the current cpufreq policy. |
1698 | */ | 1738 | */ |
1699 | int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) | 1739 | int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) |
1700 | { | 1740 | { |
1701 | struct cpufreq_policy *cpu_policy; | 1741 | struct cpufreq_policy *cpu_policy; |
1702 | if (!policy) | 1742 | if (!policy) |
1703 | return -EINVAL; | 1743 | return -EINVAL; |
1704 | 1744 | ||
1705 | cpu_policy = cpufreq_cpu_get(cpu); | 1745 | cpu_policy = cpufreq_cpu_get(cpu); |
1706 | if (!cpu_policy) | 1746 | if (!cpu_policy) |
1707 | return -EINVAL; | 1747 | return -EINVAL; |
1708 | 1748 | ||
1709 | memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); | 1749 | memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); |
1710 | 1750 | ||
1711 | cpufreq_cpu_put(cpu_policy); | 1751 | cpufreq_cpu_put(cpu_policy); |
1712 | return 0; | 1752 | return 0; |
1713 | } | 1753 | } |
1714 | EXPORT_SYMBOL(cpufreq_get_policy); | 1754 | EXPORT_SYMBOL(cpufreq_get_policy); |
1715 | 1755 | ||
1716 | 1756 | ||
1717 | /* | 1757 | /* |
1718 | * data : current policy. | 1758 | * data : current policy. |
1719 | * policy : policy to be set. | 1759 | * policy : policy to be set. |
1720 | */ | 1760 | */ |
1721 | static int __cpufreq_set_policy(struct cpufreq_policy *data, | 1761 | static int __cpufreq_set_policy(struct cpufreq_policy *data, |
1722 | struct cpufreq_policy *policy) | 1762 | struct cpufreq_policy *policy) |
1723 | { | 1763 | { |
1724 | int ret = 0, failed = 1; | 1764 | int ret = 0, failed = 1; |
1725 | 1765 | ||
1726 | pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, | 1766 | pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, |
1727 | policy->min, policy->max); | 1767 | policy->min, policy->max); |
1728 | 1768 | ||
1729 | memcpy(&policy->cpuinfo, &data->cpuinfo, | 1769 | memcpy(&policy->cpuinfo, &data->cpuinfo, |
1730 | sizeof(struct cpufreq_cpuinfo)); | 1770 | sizeof(struct cpufreq_cpuinfo)); |
1731 | 1771 | ||
1732 | if (policy->min > data->max || policy->max < data->min) { | 1772 | if (policy->min > data->max || policy->max < data->min) { |
1733 | ret = -EINVAL; | 1773 | ret = -EINVAL; |
1734 | goto error_out; | 1774 | goto error_out; |
1735 | } | 1775 | } |
1736 | 1776 | ||
1737 | /* verify the cpu speed can be set within this limit */ | 1777 | /* verify the cpu speed can be set within this limit */ |
1738 | ret = cpufreq_driver->verify(policy); | 1778 | ret = cpufreq_driver->verify(policy); |
1739 | if (ret) | 1779 | if (ret) |
1740 | goto error_out; | 1780 | goto error_out; |
1741 | 1781 | ||
1742 | /* adjust if necessary - all reasons */ | 1782 | /* adjust if necessary - all reasons */ |
1743 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | 1783 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, |
1744 | CPUFREQ_ADJUST, policy); | 1784 | CPUFREQ_ADJUST, policy); |
1745 | 1785 | ||
1746 | /* adjust if necessary - hardware incompatibility*/ | 1786 | /* adjust if necessary - hardware incompatibility*/ |
1747 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | 1787 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, |
1748 | CPUFREQ_INCOMPATIBLE, policy); | 1788 | CPUFREQ_INCOMPATIBLE, policy); |
1749 | 1789 | ||
1750 | /* verify the cpu speed can be set within this limit, | 1790 | /* verify the cpu speed can be set within this limit, |
1751 | which might be different to the first one */ | 1791 | which might be different to the first one */ |
1752 | ret = cpufreq_driver->verify(policy); | 1792 | ret = cpufreq_driver->verify(policy); |
1753 | if (ret) | 1793 | if (ret) |
1754 | goto error_out; | 1794 | goto error_out; |
1755 | 1795 | ||
1756 | /* notification of the new policy */ | 1796 | /* notification of the new policy */ |
1757 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | 1797 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, |
1758 | CPUFREQ_NOTIFY, policy); | 1798 | CPUFREQ_NOTIFY, policy); |
1759 | 1799 | ||
1760 | data->min = policy->min; | 1800 | data->min = policy->min; |
1761 | data->max = policy->max; | 1801 | data->max = policy->max; |
1762 | 1802 | ||
1763 | pr_debug("new min and max freqs are %u - %u kHz\n", | 1803 | pr_debug("new min and max freqs are %u - %u kHz\n", |
1764 | data->min, data->max); | 1804 | data->min, data->max); |
1765 | 1805 | ||
1766 | if (cpufreq_driver->setpolicy) { | 1806 | if (cpufreq_driver->setpolicy) { |
1767 | data->policy = policy->policy; | 1807 | data->policy = policy->policy; |
1768 | pr_debug("setting range\n"); | 1808 | pr_debug("setting range\n"); |
1769 | ret = cpufreq_driver->setpolicy(policy); | 1809 | ret = cpufreq_driver->setpolicy(policy); |
1770 | } else { | 1810 | } else { |
1771 | if (policy->governor != data->governor) { | 1811 | if (policy->governor != data->governor) { |
1772 | /* save old, working values */ | 1812 | /* save old, working values */ |
1773 | struct cpufreq_governor *old_gov = data->governor; | 1813 | struct cpufreq_governor *old_gov = data->governor; |
1774 | 1814 | ||
1775 | pr_debug("governor switch\n"); | 1815 | pr_debug("governor switch\n"); |
1776 | 1816 | ||
1777 | /* end old governor */ | 1817 | /* end old governor */ |
1778 | if (data->governor) { | 1818 | if (data->governor) { |
1779 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | 1819 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); |
1780 | unlock_policy_rwsem_write(policy->cpu); | 1820 | unlock_policy_rwsem_write(policy->cpu); |
1781 | __cpufreq_governor(data, | 1821 | __cpufreq_governor(data, |
1782 | CPUFREQ_GOV_POLICY_EXIT); | 1822 | CPUFREQ_GOV_POLICY_EXIT); |
1783 | lock_policy_rwsem_write(policy->cpu); | 1823 | lock_policy_rwsem_write(policy->cpu); |
1784 | } | 1824 | } |
1785 | 1825 | ||
1786 | /* start new governor */ | 1826 | /* start new governor */ |
1787 | data->governor = policy->governor; | 1827 | data->governor = policy->governor; |
1788 | if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) { | 1828 | if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) { |
1789 | if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) { | 1829 | if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) { |
1790 | failed = 0; | 1830 | failed = 0; |
1791 | } else { | 1831 | } else { |
1792 | unlock_policy_rwsem_write(policy->cpu); | 1832 | unlock_policy_rwsem_write(policy->cpu); |
1793 | __cpufreq_governor(data, | 1833 | __cpufreq_governor(data, |
1794 | CPUFREQ_GOV_POLICY_EXIT); | 1834 | CPUFREQ_GOV_POLICY_EXIT); |
1795 | lock_policy_rwsem_write(policy->cpu); | 1835 | lock_policy_rwsem_write(policy->cpu); |
1796 | } | 1836 | } |
1797 | } | 1837 | } |
1798 | 1838 | ||
1799 | if (failed) { | 1839 | if (failed) { |
1800 | /* new governor failed, so re-start old one */ | 1840 | /* new governor failed, so re-start old one */ |
1801 | pr_debug("starting governor %s failed\n", | 1841 | pr_debug("starting governor %s failed\n", |
1802 | data->governor->name); | 1842 | data->governor->name); |
1803 | if (old_gov) { | 1843 | if (old_gov) { |
1804 | data->governor = old_gov; | 1844 | data->governor = old_gov; |
1805 | __cpufreq_governor(data, | 1845 | __cpufreq_governor(data, |
1806 | CPUFREQ_GOV_POLICY_INIT); | 1846 | CPUFREQ_GOV_POLICY_INIT); |
1807 | __cpufreq_governor(data, | 1847 | __cpufreq_governor(data, |
1808 | CPUFREQ_GOV_START); | 1848 | CPUFREQ_GOV_START); |
1809 | } | 1849 | } |
1810 | ret = -EINVAL; | 1850 | ret = -EINVAL; |
1811 | goto error_out; | 1851 | goto error_out; |
1812 | } | 1852 | } |
1813 | /* might be a policy change, too, so fall through */ | 1853 | /* might be a policy change, too, so fall through */ |
1814 | } | 1854 | } |
1815 | pr_debug("governor: change or update limits\n"); | 1855 | pr_debug("governor: change or update limits\n"); |
1816 | __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); | 1856 | __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); |
1817 | } | 1857 | } |
1818 | 1858 | ||
1819 | error_out: | 1859 | error_out: |
1820 | return ret; | 1860 | return ret; |
1821 | } | 1861 | } |
1822 | 1862 | ||
1823 | /** | 1863 | /** |
1824 | * cpufreq_update_policy - re-evaluate an existing cpufreq policy | 1864 | * cpufreq_update_policy - re-evaluate an existing cpufreq policy |
1825 | * @cpu: CPU which shall be re-evaluated | 1865 | * @cpu: CPU which shall be re-evaluated |
1826 | * | 1866 | * |
1827 | * Useful for policy notifiers which have different necessities | 1867 | * Useful for policy notifiers which have different necessities |
1828 | * at different times. | 1868 | * at different times. |
1829 | */ | 1869 | */ |
1830 | int cpufreq_update_policy(unsigned int cpu) | 1870 | int cpufreq_update_policy(unsigned int cpu) |
1831 | { | 1871 | { |
1832 | struct cpufreq_policy *data = cpufreq_cpu_get(cpu); | 1872 | struct cpufreq_policy *data = cpufreq_cpu_get(cpu); |
1833 | struct cpufreq_policy policy; | 1873 | struct cpufreq_policy policy; |
1834 | int ret; | 1874 | int ret; |
1835 | 1875 | ||
1836 | if (!data) { | 1876 | if (!data) { |
1837 | ret = -ENODEV; | 1877 | ret = -ENODEV; |
1838 | goto no_policy; | 1878 | goto no_policy; |
1839 | } | 1879 | } |
1840 | 1880 | ||
1841 | if (unlikely(lock_policy_rwsem_write(cpu))) { | 1881 | if (unlikely(lock_policy_rwsem_write(cpu))) { |
1842 | ret = -EINVAL; | 1882 | ret = -EINVAL; |
1843 | goto fail; | 1883 | goto fail; |
1844 | } | 1884 | } |
1845 | 1885 | ||
1846 | pr_debug("updating policy for CPU %u\n", cpu); | 1886 | pr_debug("updating policy for CPU %u\n", cpu); |
1847 | memcpy(&policy, data, sizeof(struct cpufreq_policy)); | 1887 | memcpy(&policy, data, sizeof(struct cpufreq_policy)); |
1848 | policy.min = data->user_policy.min; | 1888 | policy.min = data->user_policy.min; |
1849 | policy.max = data->user_policy.max; | 1889 | policy.max = data->user_policy.max; |
1850 | policy.policy = data->user_policy.policy; | 1890 | policy.policy = data->user_policy.policy; |
1851 | policy.governor = data->user_policy.governor; | 1891 | policy.governor = data->user_policy.governor; |
1852 | 1892 | ||
1853 | /* BIOS might change freq behind our back | 1893 | /* BIOS might change freq behind our back |
1854 | -> ask driver for current freq and notify governors about a change */ | 1894 | -> ask driver for current freq and notify governors about a change */ |
1855 | if (cpufreq_driver->get) { | 1895 | if (cpufreq_driver->get) { |
1856 | policy.cur = cpufreq_driver->get(cpu); | 1896 | policy.cur = cpufreq_driver->get(cpu); |
1857 | if (!data->cur) { | 1897 | if (!data->cur) { |
1858 | pr_debug("Driver did not initialize current freq"); | 1898 | pr_debug("Driver did not initialize current freq"); |
1859 | data->cur = policy.cur; | 1899 | data->cur = policy.cur; |
1860 | } else { | 1900 | } else { |
1861 | if (data->cur != policy.cur && cpufreq_driver->target) | 1901 | if (data->cur != policy.cur && cpufreq_driver->target) |
1862 | cpufreq_out_of_sync(cpu, data->cur, | 1902 | cpufreq_out_of_sync(cpu, data->cur, |
1863 | policy.cur); | 1903 | policy.cur); |
1864 | } | 1904 | } |
1865 | } | 1905 | } |
1866 | 1906 | ||
1867 | ret = __cpufreq_set_policy(data, &policy); | 1907 | ret = __cpufreq_set_policy(data, &policy); |
1868 | 1908 | ||
1869 | unlock_policy_rwsem_write(cpu); | 1909 | unlock_policy_rwsem_write(cpu); |
1870 | 1910 | ||
1871 | fail: | 1911 | fail: |
1872 | cpufreq_cpu_put(data); | 1912 | cpufreq_cpu_put(data); |
1873 | no_policy: | 1913 | no_policy: |
1874 | return ret; | 1914 | return ret; |
1875 | } | 1915 | } |
1876 | EXPORT_SYMBOL(cpufreq_update_policy); | 1916 | EXPORT_SYMBOL(cpufreq_update_policy); |
1877 | 1917 | ||
1878 | static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, | 1918 | static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, |
1879 | unsigned long action, void *hcpu) | 1919 | unsigned long action, void *hcpu) |
1880 | { | 1920 | { |
1881 | unsigned int cpu = (unsigned long)hcpu; | 1921 | unsigned int cpu = (unsigned long)hcpu; |
1882 | struct device *dev; | 1922 | struct device *dev; |
1883 | 1923 | ||
1884 | dev = get_cpu_device(cpu); | 1924 | dev = get_cpu_device(cpu); |
1885 | if (dev) { | 1925 | if (dev) { |
1886 | switch (action) { | 1926 | switch (action) { |
1887 | case CPU_ONLINE: | 1927 | case CPU_ONLINE: |
1888 | cpufreq_add_dev(dev, NULL); | 1928 | cpufreq_add_dev(dev, NULL); |
1889 | break; | 1929 | break; |
1890 | case CPU_DOWN_PREPARE: | 1930 | case CPU_DOWN_PREPARE: |
1891 | case CPU_UP_CANCELED_FROZEN: | 1931 | case CPU_UP_CANCELED_FROZEN: |
1892 | __cpufreq_remove_dev(dev, NULL); | 1932 | __cpufreq_remove_dev(dev, NULL); |
1893 | break; | 1933 | break; |
1894 | case CPU_DOWN_FAILED: | 1934 | case CPU_DOWN_FAILED: |
1895 | cpufreq_add_dev(dev, NULL); | 1935 | cpufreq_add_dev(dev, NULL); |
1896 | break; | 1936 | break; |
1897 | } | 1937 | } |
1898 | } | 1938 | } |
1899 | return NOTIFY_OK; | 1939 | return NOTIFY_OK; |
1900 | } | 1940 | } |
1901 | 1941 | ||
1902 | static struct notifier_block __refdata cpufreq_cpu_notifier = { | 1942 | static struct notifier_block __refdata cpufreq_cpu_notifier = { |
1903 | .notifier_call = cpufreq_cpu_callback, | 1943 | .notifier_call = cpufreq_cpu_callback, |
1904 | }; | 1944 | }; |
1905 | 1945 | ||
1906 | /********************************************************************* | 1946 | /********************************************************************* |
1907 | * REGISTER / UNREGISTER CPUFREQ DRIVER * | 1947 | * REGISTER / UNREGISTER CPUFREQ DRIVER * |
1908 | *********************************************************************/ | 1948 | *********************************************************************/ |
1909 | 1949 | ||
1910 | /** | 1950 | /** |
1911 | * cpufreq_register_driver - register a CPU Frequency driver | 1951 | * cpufreq_register_driver - register a CPU Frequency driver |
1912 | * @driver_data: A struct cpufreq_driver containing the values# | 1952 | * @driver_data: A struct cpufreq_driver containing the values# |
1913 | * submitted by the CPU Frequency driver. | 1953 | * submitted by the CPU Frequency driver. |
1914 | * | 1954 | * |
1915 | * Registers a CPU Frequency driver to this core code. This code | 1955 | * Registers a CPU Frequency driver to this core code. This code |
1916 | * returns zero on success, -EBUSY when another driver got here first | 1956 | * returns zero on success, -EBUSY when another driver got here first |
1917 | * (and isn't unregistered in the meantime). | 1957 | * (and isn't unregistered in the meantime). |
1918 | * | 1958 | * |
1919 | */ | 1959 | */ |
1920 | int cpufreq_register_driver(struct cpufreq_driver *driver_data) | 1960 | int cpufreq_register_driver(struct cpufreq_driver *driver_data) |
1921 | { | 1961 | { |
1922 | unsigned long flags; | 1962 | unsigned long flags; |
1923 | int ret; | 1963 | int ret; |
1924 | 1964 | ||
1925 | if (cpufreq_disabled()) | 1965 | if (cpufreq_disabled()) |
1926 | return -ENODEV; | 1966 | return -ENODEV; |
1927 | 1967 | ||
1928 | if (!driver_data || !driver_data->verify || !driver_data->init || | 1968 | if (!driver_data || !driver_data->verify || !driver_data->init || |
1929 | ((!driver_data->setpolicy) && (!driver_data->target))) | 1969 | ((!driver_data->setpolicy) && (!driver_data->target))) |
1930 | return -EINVAL; | 1970 | return -EINVAL; |
1931 | 1971 | ||
1932 | pr_debug("trying to register driver %s\n", driver_data->name); | 1972 | pr_debug("trying to register driver %s\n", driver_data->name); |
1933 | 1973 | ||
1934 | if (driver_data->setpolicy) | 1974 | if (driver_data->setpolicy) |
1935 | driver_data->flags |= CPUFREQ_CONST_LOOPS; | 1975 | driver_data->flags |= CPUFREQ_CONST_LOOPS; |
1936 | 1976 | ||
1937 | write_lock_irqsave(&cpufreq_driver_lock, flags); | 1977 | write_lock_irqsave(&cpufreq_driver_lock, flags); |
1938 | if (cpufreq_driver) { | 1978 | if (cpufreq_driver) { |
1939 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1979 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1940 | return -EBUSY; | 1980 | return -EBUSY; |
1941 | } | 1981 | } |
1942 | cpufreq_driver = driver_data; | 1982 | cpufreq_driver = driver_data; |
1943 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1983 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1944 | 1984 | ||
1945 | ret = subsys_interface_register(&cpufreq_interface); | 1985 | ret = subsys_interface_register(&cpufreq_interface); |
1946 | if (ret) | 1986 | if (ret) |
1947 | goto err_null_driver; | 1987 | goto err_null_driver; |
1948 | 1988 | ||
1949 | if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) { | 1989 | if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) { |
1950 | int i; | 1990 | int i; |
1951 | ret = -ENODEV; | 1991 | ret = -ENODEV; |
1952 | 1992 | ||
1953 | /* check for at least one working CPU */ | 1993 | /* check for at least one working CPU */ |
1954 | for (i = 0; i < nr_cpu_ids; i++) | 1994 | for (i = 0; i < nr_cpu_ids; i++) |
1955 | if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) { | 1995 | if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) { |
1956 | ret = 0; | 1996 | ret = 0; |
1957 | break; | 1997 | break; |
1958 | } | 1998 | } |
1959 | 1999 | ||
1960 | /* if all ->init() calls failed, unregister */ | 2000 | /* if all ->init() calls failed, unregister */ |
1961 | if (ret) { | 2001 | if (ret) { |
1962 | pr_debug("no CPU initialized for driver %s\n", | 2002 | pr_debug("no CPU initialized for driver %s\n", |
1963 | driver_data->name); | 2003 | driver_data->name); |
1964 | goto err_if_unreg; | 2004 | goto err_if_unreg; |
1965 | } | 2005 | } |
1966 | } | 2006 | } |
1967 | 2007 | ||
1968 | register_hotcpu_notifier(&cpufreq_cpu_notifier); | 2008 | register_hotcpu_notifier(&cpufreq_cpu_notifier); |
1969 | pr_debug("driver %s up and running\n", driver_data->name); | 2009 | pr_debug("driver %s up and running\n", driver_data->name); |
1970 | 2010 | ||
1971 | return 0; | 2011 | return 0; |
1972 | err_if_unreg: | 2012 | err_if_unreg: |
1973 | subsys_interface_unregister(&cpufreq_interface); | 2013 | subsys_interface_unregister(&cpufreq_interface); |
1974 | err_null_driver: | 2014 | err_null_driver: |
1975 | write_lock_irqsave(&cpufreq_driver_lock, flags); | 2015 | write_lock_irqsave(&cpufreq_driver_lock, flags); |
1976 | cpufreq_driver = NULL; | 2016 | cpufreq_driver = NULL; |
1977 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 2017 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1978 | return ret; | 2018 | return ret; |
1979 | } | 2019 | } |
1980 | EXPORT_SYMBOL_GPL(cpufreq_register_driver); | 2020 | EXPORT_SYMBOL_GPL(cpufreq_register_driver); |
1981 | 2021 | ||
1982 | 2022 | ||
1983 | /** | 2023 | /** |
1984 | * cpufreq_unregister_driver - unregister the current CPUFreq driver | 2024 | * cpufreq_unregister_driver - unregister the current CPUFreq driver |
1985 | * | 2025 | * |
1986 | * Unregister the current CPUFreq driver. Only call this if you have | 2026 | * Unregister the current CPUFreq driver. Only call this if you have |
1987 | * the right to do so, i.e. if you have succeeded in initialising before! | 2027 | * the right to do so, i.e. if you have succeeded in initialising before! |
1988 | * Returns zero if successful, and -EINVAL if the cpufreq_driver is | 2028 | * Returns zero if successful, and -EINVAL if the cpufreq_driver is |
1989 | * currently not initialised. | 2029 | * currently not initialised. |
1990 | */ | 2030 | */ |
1991 | int cpufreq_unregister_driver(struct cpufreq_driver *driver) | 2031 | int cpufreq_unregister_driver(struct cpufreq_driver *driver) |
1992 | { | 2032 | { |
1993 | unsigned long flags; | 2033 | unsigned long flags; |
1994 | 2034 | ||
1995 | if (!cpufreq_driver || (driver != cpufreq_driver)) | 2035 | if (!cpufreq_driver || (driver != cpufreq_driver)) |
1996 | return -EINVAL; | 2036 | return -EINVAL; |
1997 | 2037 | ||
1998 | pr_debug("unregistering driver %s\n", driver->name); | 2038 | pr_debug("unregistering driver %s\n", driver->name); |
1999 | 2039 | ||
2000 | subsys_interface_unregister(&cpufreq_interface); | 2040 | subsys_interface_unregister(&cpufreq_interface); |
2001 | unregister_hotcpu_notifier(&cpufreq_cpu_notifier); | 2041 | unregister_hotcpu_notifier(&cpufreq_cpu_notifier); |
2002 | 2042 | ||
2003 | write_lock_irqsave(&cpufreq_driver_lock, flags); | 2043 | write_lock_irqsave(&cpufreq_driver_lock, flags); |
2004 | cpufreq_driver = NULL; | 2044 | cpufreq_driver = NULL; |
2005 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 2045 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
2006 | 2046 | ||
2007 | return 0; | 2047 | return 0; |
2008 | } | 2048 | } |
2009 | EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); | 2049 | EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); |
2010 | 2050 | ||
2011 | static int __init cpufreq_core_init(void) | 2051 | static int __init cpufreq_core_init(void) |
2012 | { | 2052 | { |
2013 | int cpu; | 2053 | int cpu; |
2014 | 2054 | ||
2015 | if (cpufreq_disabled()) | 2055 | if (cpufreq_disabled()) |
2016 | return -ENODEV; | 2056 | return -ENODEV; |
2017 | 2057 | ||
2018 | for_each_possible_cpu(cpu) { | 2058 | for_each_possible_cpu(cpu) { |
2019 | per_cpu(cpufreq_policy_cpu, cpu) = -1; | 2059 | per_cpu(cpufreq_policy_cpu, cpu) = -1; |
2020 | init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); | 2060 | init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); |
2021 | } | 2061 | } |
2022 | 2062 | ||
2023 | cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj); | 2063 | cpufreq_global_kobject = kobject_create(); |
2024 | BUG_ON(!cpufreq_global_kobject); | 2064 | BUG_ON(!cpufreq_global_kobject); |
2025 | register_syscore_ops(&cpufreq_syscore_ops); | 2065 | register_syscore_ops(&cpufreq_syscore_ops); |
2026 | 2066 | ||
2027 | return 0; | 2067 | return 0; |
drivers/cpufreq/cpufreq_governor.c
1 | /* | 1 | /* |
2 | * drivers/cpufreq/cpufreq_governor.c | 2 | * drivers/cpufreq/cpufreq_governor.c |
3 | * | 3 | * |
4 | * CPUFREQ governors common code | 4 | * CPUFREQ governors common code |
5 | * | 5 | * |
6 | * Copyright (C) 2001 Russell King | 6 | * Copyright (C) 2001 Russell King |
7 | * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. | 7 | * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. |
8 | * (C) 2003 Jun Nakajima <jun.nakajima@intel.com> | 8 | * (C) 2003 Jun Nakajima <jun.nakajima@intel.com> |
9 | * (C) 2009 Alexander Clouter <alex@digriz.org.uk> | 9 | * (C) 2009 Alexander Clouter <alex@digriz.org.uk> |
10 | * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org> | 10 | * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org> |
11 | * | 11 | * |
12 | * This program is free software; you can redistribute it and/or modify | 12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License version 2 as | 13 | * it under the terms of the GNU General Public License version 2 as |
14 | * published by the Free Software Foundation. | 14 | * published by the Free Software Foundation. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 17 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
18 | 18 | ||
19 | #include <asm/cputime.h> | 19 | #include <asm/cputime.h> |
20 | #include <linux/cpufreq.h> | 20 | #include <linux/cpufreq.h> |
21 | #include <linux/cpumask.h> | 21 | #include <linux/cpumask.h> |
22 | #include <linux/export.h> | 22 | #include <linux/export.h> |
23 | #include <linux/kernel_stat.h> | 23 | #include <linux/kernel_stat.h> |
24 | #include <linux/mutex.h> | 24 | #include <linux/mutex.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <linux/workqueue.h> | 27 | #include <linux/workqueue.h> |
28 | 28 | ||
29 | #include "cpufreq_governor.h" | 29 | #include "cpufreq_governor.h" |
30 | 30 | ||
31 | static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data) | 31 | static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data) |
32 | { | 32 | { |
33 | if (have_governor_per_policy()) | 33 | if (have_governor_per_policy()) |
34 | return dbs_data->cdata->attr_group_gov_pol; | 34 | return dbs_data->cdata->attr_group_gov_pol; |
35 | else | 35 | else |
36 | return dbs_data->cdata->attr_group_gov_sys; | 36 | return dbs_data->cdata->attr_group_gov_sys; |
37 | } | 37 | } |
38 | 38 | ||
39 | void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) | 39 | void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) |
40 | { | 40 | { |
41 | struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); | 41 | struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); |
42 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | 42 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
43 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | 43 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; |
44 | struct cpufreq_policy *policy; | 44 | struct cpufreq_policy *policy; |
45 | unsigned int max_load = 0; | 45 | unsigned int max_load = 0; |
46 | unsigned int ignore_nice; | 46 | unsigned int ignore_nice; |
47 | unsigned int j; | 47 | unsigned int j; |
48 | 48 | ||
49 | if (dbs_data->cdata->governor == GOV_ONDEMAND) | 49 | if (dbs_data->cdata->governor == GOV_ONDEMAND) |
50 | ignore_nice = od_tuners->ignore_nice; | 50 | ignore_nice = od_tuners->ignore_nice; |
51 | else | 51 | else |
52 | ignore_nice = cs_tuners->ignore_nice; | 52 | ignore_nice = cs_tuners->ignore_nice; |
53 | 53 | ||
54 | policy = cdbs->cur_policy; | 54 | policy = cdbs->cur_policy; |
55 | 55 | ||
56 | /* Get Absolute Load (in terms of freq for ondemand gov) */ | 56 | /* Get Absolute Load (in terms of freq for ondemand gov) */ |
57 | for_each_cpu(j, policy->cpus) { | 57 | for_each_cpu(j, policy->cpus) { |
58 | struct cpu_dbs_common_info *j_cdbs; | 58 | struct cpu_dbs_common_info *j_cdbs; |
59 | u64 cur_wall_time, cur_idle_time; | 59 | u64 cur_wall_time, cur_idle_time; |
60 | unsigned int idle_time, wall_time; | 60 | unsigned int idle_time, wall_time; |
61 | unsigned int load; | 61 | unsigned int load; |
62 | int io_busy = 0; | 62 | int io_busy = 0; |
63 | 63 | ||
64 | j_cdbs = dbs_data->cdata->get_cpu_cdbs(j); | 64 | j_cdbs = dbs_data->cdata->get_cpu_cdbs(j); |
65 | 65 | ||
66 | /* | 66 | /* |
67 | * For the purpose of ondemand, waiting for disk IO is | 67 | * For the purpose of ondemand, waiting for disk IO is |
68 | * an indication that you're performance critical, and | 68 | * an indication that you're performance critical, and |
69 | * not that the system is actually idle. So do not add | 69 | * not that the system is actually idle. So do not add |
70 | * the iowait time to the cpu idle time. | 70 | * the iowait time to the cpu idle time. |
71 | */ | 71 | */ |
72 | if (dbs_data->cdata->governor == GOV_ONDEMAND) | 72 | if (dbs_data->cdata->governor == GOV_ONDEMAND) |
73 | io_busy = od_tuners->io_is_busy; | 73 | io_busy = od_tuners->io_is_busy; |
74 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy); | 74 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy); |
75 | 75 | ||
76 | wall_time = (unsigned int) | 76 | wall_time = (unsigned int) |
77 | (cur_wall_time - j_cdbs->prev_cpu_wall); | 77 | (cur_wall_time - j_cdbs->prev_cpu_wall); |
78 | j_cdbs->prev_cpu_wall = cur_wall_time; | 78 | j_cdbs->prev_cpu_wall = cur_wall_time; |
79 | 79 | ||
80 | idle_time = (unsigned int) | 80 | idle_time = (unsigned int) |
81 | (cur_idle_time - j_cdbs->prev_cpu_idle); | 81 | (cur_idle_time - j_cdbs->prev_cpu_idle); |
82 | j_cdbs->prev_cpu_idle = cur_idle_time; | 82 | j_cdbs->prev_cpu_idle = cur_idle_time; |
83 | 83 | ||
84 | if (ignore_nice) { | 84 | if (ignore_nice) { |
85 | u64 cur_nice; | 85 | u64 cur_nice; |
86 | unsigned long cur_nice_jiffies; | 86 | unsigned long cur_nice_jiffies; |
87 | 87 | ||
88 | cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - | 88 | cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - |
89 | cdbs->prev_cpu_nice; | 89 | cdbs->prev_cpu_nice; |
90 | /* | 90 | /* |
91 | * Assumption: nice time between sampling periods will | 91 | * Assumption: nice time between sampling periods will |
92 | * be less than 2^32 jiffies for 32 bit sys | 92 | * be less than 2^32 jiffies for 32 bit sys |
93 | */ | 93 | */ |
94 | cur_nice_jiffies = (unsigned long) | 94 | cur_nice_jiffies = (unsigned long) |
95 | cputime64_to_jiffies64(cur_nice); | 95 | cputime64_to_jiffies64(cur_nice); |
96 | 96 | ||
97 | cdbs->prev_cpu_nice = | 97 | cdbs->prev_cpu_nice = |
98 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | 98 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
99 | idle_time += jiffies_to_usecs(cur_nice_jiffies); | 99 | idle_time += jiffies_to_usecs(cur_nice_jiffies); |
100 | } | 100 | } |
101 | 101 | ||
102 | if (unlikely(!wall_time || wall_time < idle_time)) | 102 | if (unlikely(!wall_time || wall_time < idle_time)) |
103 | continue; | 103 | continue; |
104 | 104 | ||
105 | load = 100 * (wall_time - idle_time) / wall_time; | 105 | load = 100 * (wall_time - idle_time) / wall_time; |
106 | 106 | ||
107 | if (dbs_data->cdata->governor == GOV_ONDEMAND) { | 107 | if (dbs_data->cdata->governor == GOV_ONDEMAND) { |
108 | int freq_avg = __cpufreq_driver_getavg(policy, j); | 108 | int freq_avg = __cpufreq_driver_getavg(policy, j); |
109 | if (freq_avg <= 0) | 109 | if (freq_avg <= 0) |
110 | freq_avg = policy->cur; | 110 | freq_avg = policy->cur; |
111 | 111 | ||
112 | load *= freq_avg; | 112 | load *= freq_avg; |
113 | } | 113 | } |
114 | 114 | ||
115 | if (load > max_load) | 115 | if (load > max_load) |
116 | max_load = load; | 116 | max_load = load; |
117 | } | 117 | } |
118 | 118 | ||
119 | dbs_data->cdata->gov_check_cpu(cpu, max_load); | 119 | dbs_data->cdata->gov_check_cpu(cpu, max_load); |
120 | } | 120 | } |
121 | EXPORT_SYMBOL_GPL(dbs_check_cpu); | 121 | EXPORT_SYMBOL_GPL(dbs_check_cpu); |
122 | 122 | ||
123 | static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data, | 123 | static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data, |
124 | unsigned int delay) | 124 | unsigned int delay) |
125 | { | 125 | { |
126 | struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); | 126 | struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); |
127 | 127 | ||
128 | mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay); | 128 | mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay); |
129 | } | 129 | } |
130 | 130 | ||
131 | void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, | 131 | void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, |
132 | unsigned int delay, bool all_cpus) | 132 | unsigned int delay, bool all_cpus) |
133 | { | 133 | { |
134 | int i; | 134 | int i; |
135 | 135 | ||
136 | if (!all_cpus) { | 136 | if (!all_cpus) { |
137 | __gov_queue_work(smp_processor_id(), dbs_data, delay); | 137 | __gov_queue_work(smp_processor_id(), dbs_data, delay); |
138 | } else { | 138 | } else { |
139 | for_each_cpu(i, policy->cpus) | 139 | for_each_cpu(i, policy->cpus) |
140 | __gov_queue_work(i, dbs_data, delay); | 140 | __gov_queue_work(i, dbs_data, delay); |
141 | } | 141 | } |
142 | } | 142 | } |
143 | EXPORT_SYMBOL_GPL(gov_queue_work); | 143 | EXPORT_SYMBOL_GPL(gov_queue_work); |
144 | 144 | ||
145 | static inline void gov_cancel_work(struct dbs_data *dbs_data, | 145 | static inline void gov_cancel_work(struct dbs_data *dbs_data, |
146 | struct cpufreq_policy *policy) | 146 | struct cpufreq_policy *policy) |
147 | { | 147 | { |
148 | struct cpu_dbs_common_info *cdbs; | 148 | struct cpu_dbs_common_info *cdbs; |
149 | int i; | 149 | int i; |
150 | 150 | ||
151 | for_each_cpu(i, policy->cpus) { | 151 | for_each_cpu(i, policy->cpus) { |
152 | cdbs = dbs_data->cdata->get_cpu_cdbs(i); | 152 | cdbs = dbs_data->cdata->get_cpu_cdbs(i); |
153 | cancel_delayed_work_sync(&cdbs->work); | 153 | cancel_delayed_work_sync(&cdbs->work); |
154 | } | 154 | } |
155 | } | 155 | } |
156 | 156 | ||
157 | /* Will return if we need to evaluate cpu load again or not */ | 157 | /* Will return if we need to evaluate cpu load again or not */ |
158 | bool need_load_eval(struct cpu_dbs_common_info *cdbs, | 158 | bool need_load_eval(struct cpu_dbs_common_info *cdbs, |
159 | unsigned int sampling_rate) | 159 | unsigned int sampling_rate) |
160 | { | 160 | { |
161 | if (policy_is_shared(cdbs->cur_policy)) { | 161 | if (policy_is_shared(cdbs->cur_policy)) { |
162 | ktime_t time_now = ktime_get(); | 162 | ktime_t time_now = ktime_get(); |
163 | s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp); | 163 | s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp); |
164 | 164 | ||
165 | /* Do nothing if we recently have sampled */ | 165 | /* Do nothing if we recently have sampled */ |
166 | if (delta_us < (s64)(sampling_rate / 2)) | 166 | if (delta_us < (s64)(sampling_rate / 2)) |
167 | return false; | 167 | return false; |
168 | else | 168 | else |
169 | cdbs->time_stamp = time_now; | 169 | cdbs->time_stamp = time_now; |
170 | } | 170 | } |
171 | 171 | ||
172 | return true; | 172 | return true; |
173 | } | 173 | } |
174 | EXPORT_SYMBOL_GPL(need_load_eval); | 174 | EXPORT_SYMBOL_GPL(need_load_eval); |
175 | 175 | ||
176 | static void set_sampling_rate(struct dbs_data *dbs_data, | 176 | static void set_sampling_rate(struct dbs_data *dbs_data, |
177 | unsigned int sampling_rate) | 177 | unsigned int sampling_rate) |
178 | { | 178 | { |
179 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { | 179 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { |
180 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | 180 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; |
181 | cs_tuners->sampling_rate = sampling_rate; | 181 | cs_tuners->sampling_rate = sampling_rate; |
182 | } else { | 182 | } else { |
183 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | 183 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
184 | od_tuners->sampling_rate = sampling_rate; | 184 | od_tuners->sampling_rate = sampling_rate; |
185 | } | 185 | } |
186 | } | 186 | } |
187 | 187 | ||
188 | int cpufreq_governor_dbs(struct cpufreq_policy *policy, | 188 | int cpufreq_governor_dbs(struct cpufreq_policy *policy, |
189 | struct common_dbs_data *cdata, unsigned int event) | 189 | struct common_dbs_data *cdata, unsigned int event) |
190 | { | 190 | { |
191 | struct dbs_data *dbs_data; | 191 | struct dbs_data *dbs_data; |
192 | struct od_cpu_dbs_info_s *od_dbs_info = NULL; | 192 | struct od_cpu_dbs_info_s *od_dbs_info = NULL; |
193 | struct cs_cpu_dbs_info_s *cs_dbs_info = NULL; | 193 | struct cs_cpu_dbs_info_s *cs_dbs_info = NULL; |
194 | struct od_ops *od_ops = NULL; | 194 | struct od_ops *od_ops = NULL; |
195 | struct od_dbs_tuners *od_tuners = NULL; | 195 | struct od_dbs_tuners *od_tuners = NULL; |
196 | struct cs_dbs_tuners *cs_tuners = NULL; | 196 | struct cs_dbs_tuners *cs_tuners = NULL; |
197 | struct cpu_dbs_common_info *cpu_cdbs; | 197 | struct cpu_dbs_common_info *cpu_cdbs; |
198 | unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu; | 198 | unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu; |
199 | int io_busy = 0; | 199 | int io_busy = 0; |
200 | int rc; | 200 | int rc; |
201 | 201 | ||
202 | if (have_governor_per_policy()) | 202 | if (have_governor_per_policy()) |
203 | dbs_data = policy->governor_data; | 203 | dbs_data = policy->governor_data; |
204 | else | 204 | else |
205 | dbs_data = cdata->gdbs_data; | 205 | dbs_data = cdata->gdbs_data; |
206 | 206 | ||
207 | WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT)); | 207 | WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT)); |
208 | 208 | ||
209 | switch (event) { | 209 | switch (event) { |
210 | case CPUFREQ_GOV_POLICY_INIT: | 210 | case CPUFREQ_GOV_POLICY_INIT: |
211 | if (have_governor_per_policy()) { | 211 | if (have_governor_per_policy()) { |
212 | WARN_ON(dbs_data); | 212 | WARN_ON(dbs_data); |
213 | } else if (dbs_data) { | 213 | } else if (dbs_data) { |
214 | dbs_data->usage_count++; | 214 | dbs_data->usage_count++; |
215 | policy->governor_data = dbs_data; | 215 | policy->governor_data = dbs_data; |
216 | return 0; | 216 | return 0; |
217 | } | 217 | } |
218 | 218 | ||
219 | dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL); | 219 | dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL); |
220 | if (!dbs_data) { | 220 | if (!dbs_data) { |
221 | pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__); | 221 | pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__); |
222 | return -ENOMEM; | 222 | return -ENOMEM; |
223 | } | 223 | } |
224 | 224 | ||
225 | dbs_data->cdata = cdata; | 225 | dbs_data->cdata = cdata; |
226 | dbs_data->usage_count = 1; | 226 | dbs_data->usage_count = 1; |
227 | rc = cdata->init(dbs_data); | 227 | rc = cdata->init(dbs_data); |
228 | if (rc) { | 228 | if (rc) { |
229 | pr_err("%s: POLICY_INIT: init() failed\n", __func__); | 229 | pr_err("%s: POLICY_INIT: init() failed\n", __func__); |
230 | kfree(dbs_data); | 230 | kfree(dbs_data); |
231 | return rc; | 231 | return rc; |
232 | } | 232 | } |
233 | 233 | ||
234 | if (!have_governor_per_policy()) | ||
235 | WARN_ON(cpufreq_get_global_kobject()); | ||
236 | |||
234 | rc = sysfs_create_group(get_governor_parent_kobj(policy), | 237 | rc = sysfs_create_group(get_governor_parent_kobj(policy), |
235 | get_sysfs_attr(dbs_data)); | 238 | get_sysfs_attr(dbs_data)); |
236 | if (rc) { | 239 | if (rc) { |
237 | cdata->exit(dbs_data); | 240 | cdata->exit(dbs_data); |
238 | kfree(dbs_data); | 241 | kfree(dbs_data); |
239 | return rc; | 242 | return rc; |
240 | } | 243 | } |
241 | 244 | ||
242 | policy->governor_data = dbs_data; | 245 | policy->governor_data = dbs_data; |
243 | 246 | ||
244 | /* policy latency is in nS. Convert it to uS first */ | 247 | /* policy latency is in nS. Convert it to uS first */ |
245 | latency = policy->cpuinfo.transition_latency / 1000; | 248 | latency = policy->cpuinfo.transition_latency / 1000; |
246 | if (latency == 0) | 249 | if (latency == 0) |
247 | latency = 1; | 250 | latency = 1; |
248 | 251 | ||
249 | /* Bring kernel and HW constraints together */ | 252 | /* Bring kernel and HW constraints together */ |
250 | dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, | 253 | dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, |
251 | MIN_LATENCY_MULTIPLIER * latency); | 254 | MIN_LATENCY_MULTIPLIER * latency); |
252 | set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate, | 255 | set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate, |
253 | latency * LATENCY_MULTIPLIER)); | 256 | latency * LATENCY_MULTIPLIER)); |
254 | 257 | ||
255 | if ((cdata->governor == GOV_CONSERVATIVE) && | 258 | if ((cdata->governor == GOV_CONSERVATIVE) && |
256 | (!policy->governor->initialized)) { | 259 | (!policy->governor->initialized)) { |
257 | struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; | 260 | struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; |
258 | 261 | ||
259 | cpufreq_register_notifier(cs_ops->notifier_block, | 262 | cpufreq_register_notifier(cs_ops->notifier_block, |
260 | CPUFREQ_TRANSITION_NOTIFIER); | 263 | CPUFREQ_TRANSITION_NOTIFIER); |
261 | } | 264 | } |
262 | 265 | ||
263 | if (!have_governor_per_policy()) | 266 | if (!have_governor_per_policy()) |
264 | cdata->gdbs_data = dbs_data; | 267 | cdata->gdbs_data = dbs_data; |
265 | 268 | ||
266 | return 0; | 269 | return 0; |
267 | case CPUFREQ_GOV_POLICY_EXIT: | 270 | case CPUFREQ_GOV_POLICY_EXIT: |
268 | if (!--dbs_data->usage_count) { | 271 | if (!--dbs_data->usage_count) { |
269 | sysfs_remove_group(get_governor_parent_kobj(policy), | 272 | sysfs_remove_group(get_governor_parent_kobj(policy), |
270 | get_sysfs_attr(dbs_data)); | 273 | get_sysfs_attr(dbs_data)); |
274 | |||
275 | if (!have_governor_per_policy()) | ||
276 | cpufreq_put_global_kobject(); | ||
271 | 277 | ||
272 | if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) && | 278 | if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) && |
273 | (policy->governor->initialized == 1)) { | 279 | (policy->governor->initialized == 1)) { |
274 | struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; | 280 | struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; |
275 | 281 | ||
276 | cpufreq_unregister_notifier(cs_ops->notifier_block, | 282 | cpufreq_unregister_notifier(cs_ops->notifier_block, |
277 | CPUFREQ_TRANSITION_NOTIFIER); | 283 | CPUFREQ_TRANSITION_NOTIFIER); |
278 | } | 284 | } |
279 | 285 | ||
280 | cdata->exit(dbs_data); | 286 | cdata->exit(dbs_data); |
281 | kfree(dbs_data); | 287 | kfree(dbs_data); |
282 | cdata->gdbs_data = NULL; | 288 | cdata->gdbs_data = NULL; |
283 | } | 289 | } |
284 | 290 | ||
285 | policy->governor_data = NULL; | 291 | policy->governor_data = NULL; |
286 | return 0; | 292 | return 0; |
287 | } | 293 | } |
288 | 294 | ||
289 | cpu_cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); | 295 | cpu_cdbs = dbs_data->cdata->get_cpu_cdbs(cpu); |
290 | 296 | ||
291 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { | 297 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { |
292 | cs_tuners = dbs_data->tuners; | 298 | cs_tuners = dbs_data->tuners; |
293 | cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); | 299 | cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); |
294 | sampling_rate = cs_tuners->sampling_rate; | 300 | sampling_rate = cs_tuners->sampling_rate; |
295 | ignore_nice = cs_tuners->ignore_nice; | 301 | ignore_nice = cs_tuners->ignore_nice; |
296 | } else { | 302 | } else { |
297 | od_tuners = dbs_data->tuners; | 303 | od_tuners = dbs_data->tuners; |
298 | od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); | 304 | od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); |
299 | sampling_rate = od_tuners->sampling_rate; | 305 | sampling_rate = od_tuners->sampling_rate; |
300 | ignore_nice = od_tuners->ignore_nice; | 306 | ignore_nice = od_tuners->ignore_nice; |
301 | od_ops = dbs_data->cdata->gov_ops; | 307 | od_ops = dbs_data->cdata->gov_ops; |
302 | io_busy = od_tuners->io_is_busy; | 308 | io_busy = od_tuners->io_is_busy; |
303 | } | 309 | } |
304 | 310 | ||
305 | switch (event) { | 311 | switch (event) { |
306 | case CPUFREQ_GOV_START: | 312 | case CPUFREQ_GOV_START: |
307 | if (!policy->cur) | 313 | if (!policy->cur) |
308 | return -EINVAL; | 314 | return -EINVAL; |
309 | 315 | ||
310 | mutex_lock(&dbs_data->mutex); | 316 | mutex_lock(&dbs_data->mutex); |
311 | 317 | ||
312 | for_each_cpu(j, policy->cpus) { | 318 | for_each_cpu(j, policy->cpus) { |
313 | struct cpu_dbs_common_info *j_cdbs = | 319 | struct cpu_dbs_common_info *j_cdbs = |
314 | dbs_data->cdata->get_cpu_cdbs(j); | 320 | dbs_data->cdata->get_cpu_cdbs(j); |
315 | 321 | ||
316 | j_cdbs->cpu = j; | 322 | j_cdbs->cpu = j; |
317 | j_cdbs->cur_policy = policy; | 323 | j_cdbs->cur_policy = policy; |
318 | j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, | 324 | j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, |
319 | &j_cdbs->prev_cpu_wall, io_busy); | 325 | &j_cdbs->prev_cpu_wall, io_busy); |
320 | if (ignore_nice) | 326 | if (ignore_nice) |
321 | j_cdbs->prev_cpu_nice = | 327 | j_cdbs->prev_cpu_nice = |
322 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | 328 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
323 | 329 | ||
324 | mutex_init(&j_cdbs->timer_mutex); | 330 | mutex_init(&j_cdbs->timer_mutex); |
325 | INIT_DEFERRABLE_WORK(&j_cdbs->work, | 331 | INIT_DEFERRABLE_WORK(&j_cdbs->work, |
326 | dbs_data->cdata->gov_dbs_timer); | 332 | dbs_data->cdata->gov_dbs_timer); |
327 | } | 333 | } |
328 | 334 | ||
329 | /* | 335 | /* |
330 | * conservative does not implement micro like ondemand | 336 | * conservative does not implement micro like ondemand |
331 | * governor, thus we are bound to jiffes/HZ | 337 | * governor, thus we are bound to jiffes/HZ |
332 | */ | 338 | */ |
333 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { | 339 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { |
334 | cs_dbs_info->down_skip = 0; | 340 | cs_dbs_info->down_skip = 0; |
335 | cs_dbs_info->enable = 1; | 341 | cs_dbs_info->enable = 1; |
336 | cs_dbs_info->requested_freq = policy->cur; | 342 | cs_dbs_info->requested_freq = policy->cur; |
337 | } else { | 343 | } else { |
338 | od_dbs_info->rate_mult = 1; | 344 | od_dbs_info->rate_mult = 1; |
339 | od_dbs_info->sample_type = OD_NORMAL_SAMPLE; | 345 | od_dbs_info->sample_type = OD_NORMAL_SAMPLE; |
340 | od_ops->powersave_bias_init_cpu(cpu); | 346 | od_ops->powersave_bias_init_cpu(cpu); |
341 | } | 347 | } |
342 | 348 | ||
343 | mutex_unlock(&dbs_data->mutex); | 349 | mutex_unlock(&dbs_data->mutex); |
344 | 350 | ||
345 | /* Initiate timer time stamp */ | 351 | /* Initiate timer time stamp */ |
346 | cpu_cdbs->time_stamp = ktime_get(); | 352 | cpu_cdbs->time_stamp = ktime_get(); |
347 | 353 | ||
348 | gov_queue_work(dbs_data, policy, | 354 | gov_queue_work(dbs_data, policy, |
349 | delay_for_sampling_rate(sampling_rate), true); | 355 | delay_for_sampling_rate(sampling_rate), true); |
350 | break; | 356 | break; |
351 | 357 | ||
352 | case CPUFREQ_GOV_STOP: | 358 | case CPUFREQ_GOV_STOP: |
353 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) | 359 | if (dbs_data->cdata->governor == GOV_CONSERVATIVE) |
354 | cs_dbs_info->enable = 0; | 360 | cs_dbs_info->enable = 0; |
355 | 361 | ||
356 | gov_cancel_work(dbs_data, policy); | 362 | gov_cancel_work(dbs_data, policy); |
357 | 363 | ||
358 | mutex_lock(&dbs_data->mutex); | 364 | mutex_lock(&dbs_data->mutex); |
359 | mutex_destroy(&cpu_cdbs->timer_mutex); | 365 | mutex_destroy(&cpu_cdbs->timer_mutex); |
360 | 366 | ||
361 | mutex_unlock(&dbs_data->mutex); | 367 | mutex_unlock(&dbs_data->mutex); |
362 | 368 | ||
363 | break; | 369 | break; |
364 | 370 | ||
365 | case CPUFREQ_GOV_LIMITS: | 371 | case CPUFREQ_GOV_LIMITS: |
366 | mutex_lock(&cpu_cdbs->timer_mutex); | 372 | mutex_lock(&cpu_cdbs->timer_mutex); |
367 | if (policy->max < cpu_cdbs->cur_policy->cur) | 373 | if (policy->max < cpu_cdbs->cur_policy->cur) |
368 | __cpufreq_driver_target(cpu_cdbs->cur_policy, | 374 | __cpufreq_driver_target(cpu_cdbs->cur_policy, |
369 | policy->max, CPUFREQ_RELATION_H); | 375 | policy->max, CPUFREQ_RELATION_H); |
370 | else if (policy->min > cpu_cdbs->cur_policy->cur) | 376 | else if (policy->min > cpu_cdbs->cur_policy->cur) |
371 | __cpufreq_driver_target(cpu_cdbs->cur_policy, | 377 | __cpufreq_driver_target(cpu_cdbs->cur_policy, |
372 | policy->min, CPUFREQ_RELATION_L); | 378 | policy->min, CPUFREQ_RELATION_L); |
373 | dbs_check_cpu(dbs_data, cpu); | 379 | dbs_check_cpu(dbs_data, cpu); |
374 | mutex_unlock(&cpu_cdbs->timer_mutex); | 380 | mutex_unlock(&cpu_cdbs->timer_mutex); |
375 | break; | 381 | break; |
376 | } | 382 | } |
377 | return 0; | 383 | return 0; |
378 | } | 384 | } |
379 | EXPORT_SYMBOL_GPL(cpufreq_governor_dbs); | 385 | EXPORT_SYMBOL_GPL(cpufreq_governor_dbs); |
380 | 386 |
include/linux/cpufreq.h
1 | /* | 1 | /* |
2 | * linux/include/linux/cpufreq.h | 2 | * linux/include/linux/cpufreq.h |
3 | * | 3 | * |
4 | * Copyright (C) 2001 Russell King | 4 | * Copyright (C) 2001 Russell King |
5 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> | 5 | * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #ifndef _LINUX_CPUFREQ_H | 11 | #ifndef _LINUX_CPUFREQ_H |
12 | #define _LINUX_CPUFREQ_H | 12 | #define _LINUX_CPUFREQ_H |
13 | 13 | ||
14 | #include <asm/cputime.h> | 14 | #include <asm/cputime.h> |
15 | #include <linux/mutex.h> | 15 | #include <linux/mutex.h> |
16 | #include <linux/notifier.h> | 16 | #include <linux/notifier.h> |
17 | #include <linux/threads.h> | 17 | #include <linux/threads.h> |
18 | #include <linux/kobject.h> | 18 | #include <linux/kobject.h> |
19 | #include <linux/sysfs.h> | 19 | #include <linux/sysfs.h> |
20 | #include <linux/completion.h> | 20 | #include <linux/completion.h> |
21 | #include <linux/workqueue.h> | 21 | #include <linux/workqueue.h> |
22 | #include <linux/cpumask.h> | 22 | #include <linux/cpumask.h> |
23 | #include <asm/div64.h> | 23 | #include <asm/div64.h> |
24 | 24 | ||
25 | #define CPUFREQ_NAME_LEN 16 | 25 | #define CPUFREQ_NAME_LEN 16 |
26 | /* Print length for names. Extra 1 space for accomodating '\n' in prints */ | 26 | /* Print length for names. Extra 1 space for accomodating '\n' in prints */ |
27 | #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1) | 27 | #define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1) |
28 | 28 | ||
29 | 29 | ||
30 | /********************************************************************* | 30 | /********************************************************************* |
31 | * CPUFREQ NOTIFIER INTERFACE * | 31 | * CPUFREQ NOTIFIER INTERFACE * |
32 | *********************************************************************/ | 32 | *********************************************************************/ |
33 | 33 | ||
34 | #define CPUFREQ_TRANSITION_NOTIFIER (0) | 34 | #define CPUFREQ_TRANSITION_NOTIFIER (0) |
35 | #define CPUFREQ_POLICY_NOTIFIER (1) | 35 | #define CPUFREQ_POLICY_NOTIFIER (1) |
36 | 36 | ||
37 | #ifdef CONFIG_CPU_FREQ | 37 | #ifdef CONFIG_CPU_FREQ |
38 | int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); | 38 | int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); |
39 | int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list); | 39 | int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list); |
40 | extern void disable_cpufreq(void); | 40 | extern void disable_cpufreq(void); |
41 | #else /* CONFIG_CPU_FREQ */ | 41 | #else /* CONFIG_CPU_FREQ */ |
42 | static inline int cpufreq_register_notifier(struct notifier_block *nb, | 42 | static inline int cpufreq_register_notifier(struct notifier_block *nb, |
43 | unsigned int list) | 43 | unsigned int list) |
44 | { | 44 | { |
45 | return 0; | 45 | return 0; |
46 | } | 46 | } |
47 | static inline int cpufreq_unregister_notifier(struct notifier_block *nb, | 47 | static inline int cpufreq_unregister_notifier(struct notifier_block *nb, |
48 | unsigned int list) | 48 | unsigned int list) |
49 | { | 49 | { |
50 | return 0; | 50 | return 0; |
51 | } | 51 | } |
52 | static inline void disable_cpufreq(void) { } | 52 | static inline void disable_cpufreq(void) { } |
53 | #endif /* CONFIG_CPU_FREQ */ | 53 | #endif /* CONFIG_CPU_FREQ */ |
54 | 54 | ||
55 | /* if (cpufreq_driver->target) exists, the ->governor decides what frequency | 55 | /* if (cpufreq_driver->target) exists, the ->governor decides what frequency |
56 | * within the limits is used. If (cpufreq_driver->setpolicy> exists, these | 56 | * within the limits is used. If (cpufreq_driver->setpolicy> exists, these |
57 | * two generic policies are available: | 57 | * two generic policies are available: |
58 | */ | 58 | */ |
59 | 59 | ||
60 | #define CPUFREQ_POLICY_POWERSAVE (1) | 60 | #define CPUFREQ_POLICY_POWERSAVE (1) |
61 | #define CPUFREQ_POLICY_PERFORMANCE (2) | 61 | #define CPUFREQ_POLICY_PERFORMANCE (2) |
62 | 62 | ||
63 | /* Frequency values here are CPU kHz so that hardware which doesn't run | 63 | /* Frequency values here are CPU kHz so that hardware which doesn't run |
64 | * with some frequencies can complain without having to guess what per | 64 | * with some frequencies can complain without having to guess what per |
65 | * cent / per mille means. | 65 | * cent / per mille means. |
66 | * Maximum transition latency is in nanoseconds - if it's unknown, | 66 | * Maximum transition latency is in nanoseconds - if it's unknown, |
67 | * CPUFREQ_ETERNAL shall be used. | 67 | * CPUFREQ_ETERNAL shall be used. |
68 | */ | 68 | */ |
69 | 69 | ||
70 | struct cpufreq_governor; | 70 | struct cpufreq_governor; |
71 | 71 | ||
72 | /* /sys/devices/system/cpu/cpufreq: entry point for global variables */ | 72 | /* /sys/devices/system/cpu/cpufreq: entry point for global variables */ |
73 | extern struct kobject *cpufreq_global_kobject; | 73 | extern struct kobject *cpufreq_global_kobject; |
74 | int cpufreq_get_global_kobject(void); | ||
75 | void cpufreq_put_global_kobject(void); | ||
76 | int cpufreq_sysfs_create_file(const struct attribute *attr); | ||
77 | void cpufreq_sysfs_remove_file(const struct attribute *attr); | ||
74 | 78 | ||
75 | #define CPUFREQ_ETERNAL (-1) | 79 | #define CPUFREQ_ETERNAL (-1) |
76 | struct cpufreq_cpuinfo { | 80 | struct cpufreq_cpuinfo { |
77 | unsigned int max_freq; | 81 | unsigned int max_freq; |
78 | unsigned int min_freq; | 82 | unsigned int min_freq; |
79 | 83 | ||
80 | /* in 10^(-9) s = nanoseconds */ | 84 | /* in 10^(-9) s = nanoseconds */ |
81 | unsigned int transition_latency; | 85 | unsigned int transition_latency; |
82 | }; | 86 | }; |
83 | 87 | ||
84 | struct cpufreq_real_policy { | 88 | struct cpufreq_real_policy { |
85 | unsigned int min; /* in kHz */ | 89 | unsigned int min; /* in kHz */ |
86 | unsigned int max; /* in kHz */ | 90 | unsigned int max; /* in kHz */ |
87 | unsigned int policy; /* see above */ | 91 | unsigned int policy; /* see above */ |
88 | struct cpufreq_governor *governor; /* see below */ | 92 | struct cpufreq_governor *governor; /* see below */ |
89 | }; | 93 | }; |
90 | 94 | ||
91 | struct cpufreq_policy { | 95 | struct cpufreq_policy { |
92 | /* CPUs sharing clock, require sw coordination */ | 96 | /* CPUs sharing clock, require sw coordination */ |
93 | cpumask_var_t cpus; /* Online CPUs only */ | 97 | cpumask_var_t cpus; /* Online CPUs only */ |
94 | cpumask_var_t related_cpus; /* Online + Offline CPUs */ | 98 | cpumask_var_t related_cpus; /* Online + Offline CPUs */ |
95 | 99 | ||
96 | unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs | 100 | unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs |
97 | should set cpufreq */ | 101 | should set cpufreq */ |
98 | unsigned int cpu; /* cpu nr of CPU managing this policy */ | 102 | unsigned int cpu; /* cpu nr of CPU managing this policy */ |
99 | unsigned int last_cpu; /* cpu nr of previous CPU that managed | 103 | unsigned int last_cpu; /* cpu nr of previous CPU that managed |
100 | * this policy */ | 104 | * this policy */ |
101 | struct cpufreq_cpuinfo cpuinfo;/* see above */ | 105 | struct cpufreq_cpuinfo cpuinfo;/* see above */ |
102 | 106 | ||
103 | unsigned int min; /* in kHz */ | 107 | unsigned int min; /* in kHz */ |
104 | unsigned int max; /* in kHz */ | 108 | unsigned int max; /* in kHz */ |
105 | unsigned int cur; /* in kHz, only needed if cpufreq | 109 | unsigned int cur; /* in kHz, only needed if cpufreq |
106 | * governors are used */ | 110 | * governors are used */ |
107 | unsigned int policy; /* see above */ | 111 | unsigned int policy; /* see above */ |
108 | struct cpufreq_governor *governor; /* see below */ | 112 | struct cpufreq_governor *governor; /* see below */ |
109 | void *governor_data; | 113 | void *governor_data; |
110 | 114 | ||
111 | struct work_struct update; /* if update_policy() needs to be | 115 | struct work_struct update; /* if update_policy() needs to be |
112 | * called, but you're in IRQ context */ | 116 | * called, but you're in IRQ context */ |
113 | 117 | ||
114 | struct cpufreq_real_policy user_policy; | 118 | struct cpufreq_real_policy user_policy; |
115 | 119 | ||
116 | struct kobject kobj; | 120 | struct kobject kobj; |
117 | struct completion kobj_unregister; | 121 | struct completion kobj_unregister; |
118 | }; | 122 | }; |
119 | 123 | ||
120 | #define CPUFREQ_ADJUST (0) | 124 | #define CPUFREQ_ADJUST (0) |
121 | #define CPUFREQ_INCOMPATIBLE (1) | 125 | #define CPUFREQ_INCOMPATIBLE (1) |
122 | #define CPUFREQ_NOTIFY (2) | 126 | #define CPUFREQ_NOTIFY (2) |
123 | #define CPUFREQ_START (3) | 127 | #define CPUFREQ_START (3) |
124 | #define CPUFREQ_UPDATE_POLICY_CPU (4) | 128 | #define CPUFREQ_UPDATE_POLICY_CPU (4) |
125 | 129 | ||
126 | /* Only for ACPI */ | 130 | /* Only for ACPI */ |
127 | #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ | 131 | #define CPUFREQ_SHARED_TYPE_NONE (0) /* None */ |
128 | #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */ | 132 | #define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */ |
129 | #define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */ | 133 | #define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */ |
130 | #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ | 134 | #define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/ |
131 | 135 | ||
132 | static inline bool policy_is_shared(struct cpufreq_policy *policy) | 136 | static inline bool policy_is_shared(struct cpufreq_policy *policy) |
133 | { | 137 | { |
134 | return cpumask_weight(policy->cpus) > 1; | 138 | return cpumask_weight(policy->cpus) > 1; |
135 | } | 139 | } |
136 | 140 | ||
137 | /******************** cpufreq transition notifiers *******************/ | 141 | /******************** cpufreq transition notifiers *******************/ |
138 | 142 | ||
139 | #define CPUFREQ_PRECHANGE (0) | 143 | #define CPUFREQ_PRECHANGE (0) |
140 | #define CPUFREQ_POSTCHANGE (1) | 144 | #define CPUFREQ_POSTCHANGE (1) |
141 | #define CPUFREQ_RESUMECHANGE (8) | 145 | #define CPUFREQ_RESUMECHANGE (8) |
142 | #define CPUFREQ_SUSPENDCHANGE (9) | 146 | #define CPUFREQ_SUSPENDCHANGE (9) |
143 | 147 | ||
144 | struct cpufreq_freqs { | 148 | struct cpufreq_freqs { |
145 | unsigned int cpu; /* cpu nr */ | 149 | unsigned int cpu; /* cpu nr */ |
146 | unsigned int old; | 150 | unsigned int old; |
147 | unsigned int new; | 151 | unsigned int new; |
148 | u8 flags; /* flags of cpufreq_driver, see below. */ | 152 | u8 flags; /* flags of cpufreq_driver, see below. */ |
149 | }; | 153 | }; |
150 | 154 | ||
151 | 155 | ||
152 | /** | 156 | /** |
153 | * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch safe) | 157 | * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch safe) |
154 | * @old: old value | 158 | * @old: old value |
155 | * @div: divisor | 159 | * @div: divisor |
156 | * @mult: multiplier | 160 | * @mult: multiplier |
157 | * | 161 | * |
158 | * | 162 | * |
159 | * new = old * mult / div | 163 | * new = old * mult / div |
160 | */ | 164 | */ |
161 | static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mult) | 165 | static inline unsigned long cpufreq_scale(unsigned long old, u_int div, u_int mult) |
162 | { | 166 | { |
163 | #if BITS_PER_LONG == 32 | 167 | #if BITS_PER_LONG == 32 |
164 | 168 | ||
165 | u64 result = ((u64) old) * ((u64) mult); | 169 | u64 result = ((u64) old) * ((u64) mult); |
166 | do_div(result, div); | 170 | do_div(result, div); |
167 | return (unsigned long) result; | 171 | return (unsigned long) result; |
168 | 172 | ||
169 | #elif BITS_PER_LONG == 64 | 173 | #elif BITS_PER_LONG == 64 |
170 | 174 | ||
171 | unsigned long result = old * ((u64) mult); | 175 | unsigned long result = old * ((u64) mult); |
172 | result /= div; | 176 | result /= div; |
173 | return result; | 177 | return result; |
174 | 178 | ||
175 | #endif | 179 | #endif |
176 | }; | 180 | }; |
177 | 181 | ||
178 | /********************************************************************* | 182 | /********************************************************************* |
179 | * CPUFREQ GOVERNORS * | 183 | * CPUFREQ GOVERNORS * |
180 | *********************************************************************/ | 184 | *********************************************************************/ |
181 | 185 | ||
182 | #define CPUFREQ_GOV_START 1 | 186 | #define CPUFREQ_GOV_START 1 |
183 | #define CPUFREQ_GOV_STOP 2 | 187 | #define CPUFREQ_GOV_STOP 2 |
184 | #define CPUFREQ_GOV_LIMITS 3 | 188 | #define CPUFREQ_GOV_LIMITS 3 |
185 | #define CPUFREQ_GOV_POLICY_INIT 4 | 189 | #define CPUFREQ_GOV_POLICY_INIT 4 |
186 | #define CPUFREQ_GOV_POLICY_EXIT 5 | 190 | #define CPUFREQ_GOV_POLICY_EXIT 5 |
187 | 191 | ||
188 | struct cpufreq_governor { | 192 | struct cpufreq_governor { |
189 | char name[CPUFREQ_NAME_LEN]; | 193 | char name[CPUFREQ_NAME_LEN]; |
190 | int initialized; | 194 | int initialized; |
191 | int (*governor) (struct cpufreq_policy *policy, | 195 | int (*governor) (struct cpufreq_policy *policy, |
192 | unsigned int event); | 196 | unsigned int event); |
193 | ssize_t (*show_setspeed) (struct cpufreq_policy *policy, | 197 | ssize_t (*show_setspeed) (struct cpufreq_policy *policy, |
194 | char *buf); | 198 | char *buf); |
195 | int (*store_setspeed) (struct cpufreq_policy *policy, | 199 | int (*store_setspeed) (struct cpufreq_policy *policy, |
196 | unsigned int freq); | 200 | unsigned int freq); |
197 | unsigned int max_transition_latency; /* HW must be able to switch to | 201 | unsigned int max_transition_latency; /* HW must be able to switch to |
198 | next freq faster than this value in nano secs or we | 202 | next freq faster than this value in nano secs or we |
199 | will fallback to performance governor */ | 203 | will fallback to performance governor */ |
200 | struct list_head governor_list; | 204 | struct list_head governor_list; |
201 | struct module *owner; | 205 | struct module *owner; |
202 | }; | 206 | }; |
203 | 207 | ||
204 | /* | 208 | /* |
205 | * Pass a target to the cpufreq driver. | 209 | * Pass a target to the cpufreq driver. |
206 | */ | 210 | */ |
207 | extern int cpufreq_driver_target(struct cpufreq_policy *policy, | 211 | extern int cpufreq_driver_target(struct cpufreq_policy *policy, |
208 | unsigned int target_freq, | 212 | unsigned int target_freq, |
209 | unsigned int relation); | 213 | unsigned int relation); |
210 | extern int __cpufreq_driver_target(struct cpufreq_policy *policy, | 214 | extern int __cpufreq_driver_target(struct cpufreq_policy *policy, |
211 | unsigned int target_freq, | 215 | unsigned int target_freq, |
212 | unsigned int relation); | 216 | unsigned int relation); |
213 | 217 | ||
214 | 218 | ||
215 | extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy, | 219 | extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy, |
216 | unsigned int cpu); | 220 | unsigned int cpu); |
217 | 221 | ||
218 | int cpufreq_register_governor(struct cpufreq_governor *governor); | 222 | int cpufreq_register_governor(struct cpufreq_governor *governor); |
219 | void cpufreq_unregister_governor(struct cpufreq_governor *governor); | 223 | void cpufreq_unregister_governor(struct cpufreq_governor *governor); |
220 | 224 | ||
221 | 225 | ||
222 | /********************************************************************* | 226 | /********************************************************************* |
223 | * CPUFREQ DRIVER INTERFACE * | 227 | * CPUFREQ DRIVER INTERFACE * |
224 | *********************************************************************/ | 228 | *********************************************************************/ |
225 | 229 | ||
226 | #define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */ | 230 | #define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */ |
227 | #define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */ | 231 | #define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */ |
228 | 232 | ||
229 | struct freq_attr; | 233 | struct freq_attr; |
230 | 234 | ||
231 | struct cpufreq_driver { | 235 | struct cpufreq_driver { |
232 | struct module *owner; | 236 | struct module *owner; |
233 | char name[CPUFREQ_NAME_LEN]; | 237 | char name[CPUFREQ_NAME_LEN]; |
234 | u8 flags; | 238 | u8 flags; |
235 | /* | 239 | /* |
236 | * This should be set by platforms having multiple clock-domains, i.e. | 240 | * This should be set by platforms having multiple clock-domains, i.e. |
237 | * supporting multiple policies. With this sysfs directories of governor | 241 | * supporting multiple policies. With this sysfs directories of governor |
238 | * would be created in cpu/cpu<num>/cpufreq/ directory and so they can | 242 | * would be created in cpu/cpu<num>/cpufreq/ directory and so they can |
239 | * use the same governor with different tunables for different clusters. | 243 | * use the same governor with different tunables for different clusters. |
240 | */ | 244 | */ |
241 | bool have_governor_per_policy; | 245 | bool have_governor_per_policy; |
242 | 246 | ||
243 | /* needed by all drivers */ | 247 | /* needed by all drivers */ |
244 | int (*init) (struct cpufreq_policy *policy); | 248 | int (*init) (struct cpufreq_policy *policy); |
245 | int (*verify) (struct cpufreq_policy *policy); | 249 | int (*verify) (struct cpufreq_policy *policy); |
246 | 250 | ||
247 | /* define one out of two */ | 251 | /* define one out of two */ |
248 | int (*setpolicy) (struct cpufreq_policy *policy); | 252 | int (*setpolicy) (struct cpufreq_policy *policy); |
249 | int (*target) (struct cpufreq_policy *policy, | 253 | int (*target) (struct cpufreq_policy *policy, |
250 | unsigned int target_freq, | 254 | unsigned int target_freq, |
251 | unsigned int relation); | 255 | unsigned int relation); |
252 | 256 | ||
253 | /* should be defined, if possible */ | 257 | /* should be defined, if possible */ |
254 | unsigned int (*get) (unsigned int cpu); | 258 | unsigned int (*get) (unsigned int cpu); |
255 | 259 | ||
256 | /* optional */ | 260 | /* optional */ |
257 | unsigned int (*getavg) (struct cpufreq_policy *policy, | 261 | unsigned int (*getavg) (struct cpufreq_policy *policy, |
258 | unsigned int cpu); | 262 | unsigned int cpu); |
259 | int (*bios_limit) (int cpu, unsigned int *limit); | 263 | int (*bios_limit) (int cpu, unsigned int *limit); |
260 | 264 | ||
261 | int (*exit) (struct cpufreq_policy *policy); | 265 | int (*exit) (struct cpufreq_policy *policy); |
262 | int (*suspend) (struct cpufreq_policy *policy); | 266 | int (*suspend) (struct cpufreq_policy *policy); |
263 | int (*resume) (struct cpufreq_policy *policy); | 267 | int (*resume) (struct cpufreq_policy *policy); |
264 | struct freq_attr **attr; | 268 | struct freq_attr **attr; |
265 | }; | 269 | }; |
266 | 270 | ||
267 | /* flags */ | 271 | /* flags */ |
268 | 272 | ||
269 | #define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if | 273 | #define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if |
270 | * all ->init() calls failed */ | 274 | * all ->init() calls failed */ |
271 | #define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel | 275 | #define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel |
272 | * "constants" aren't affected by | 276 | * "constants" aren't affected by |
273 | * frequency transitions */ | 277 | * frequency transitions */ |
274 | #define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed | 278 | #define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed |
275 | * mismatches */ | 279 | * mismatches */ |
276 | 280 | ||
277 | int cpufreq_register_driver(struct cpufreq_driver *driver_data); | 281 | int cpufreq_register_driver(struct cpufreq_driver *driver_data); |
278 | int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); | 282 | int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); |
279 | 283 | ||
280 | 284 | ||
281 | void cpufreq_notify_transition(struct cpufreq_policy *policy, | 285 | void cpufreq_notify_transition(struct cpufreq_policy *policy, |
282 | struct cpufreq_freqs *freqs, unsigned int state); | 286 | struct cpufreq_freqs *freqs, unsigned int state); |
283 | 287 | ||
284 | static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max) | 288 | static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned int min, unsigned int max) |
285 | { | 289 | { |
286 | if (policy->min < min) | 290 | if (policy->min < min) |
287 | policy->min = min; | 291 | policy->min = min; |
288 | if (policy->max < min) | 292 | if (policy->max < min) |
289 | policy->max = min; | 293 | policy->max = min; |
290 | if (policy->min > max) | 294 | if (policy->min > max) |
291 | policy->min = max; | 295 | policy->min = max; |
292 | if (policy->max > max) | 296 | if (policy->max > max) |
293 | policy->max = max; | 297 | policy->max = max; |
294 | if (policy->min > policy->max) | 298 | if (policy->min > policy->max) |
295 | policy->min = policy->max; | 299 | policy->min = policy->max; |
296 | return; | 300 | return; |
297 | } | 301 | } |
298 | 302 | ||
299 | struct freq_attr { | 303 | struct freq_attr { |
300 | struct attribute attr; | 304 | struct attribute attr; |
301 | ssize_t (*show)(struct cpufreq_policy *, char *); | 305 | ssize_t (*show)(struct cpufreq_policy *, char *); |
302 | ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count); | 306 | ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count); |
303 | }; | 307 | }; |
304 | 308 | ||
305 | #define cpufreq_freq_attr_ro(_name) \ | 309 | #define cpufreq_freq_attr_ro(_name) \ |
306 | static struct freq_attr _name = \ | 310 | static struct freq_attr _name = \ |
307 | __ATTR(_name, 0444, show_##_name, NULL) | 311 | __ATTR(_name, 0444, show_##_name, NULL) |
308 | 312 | ||
309 | #define cpufreq_freq_attr_ro_perm(_name, _perm) \ | 313 | #define cpufreq_freq_attr_ro_perm(_name, _perm) \ |
310 | static struct freq_attr _name = \ | 314 | static struct freq_attr _name = \ |
311 | __ATTR(_name, _perm, show_##_name, NULL) | 315 | __ATTR(_name, _perm, show_##_name, NULL) |
312 | 316 | ||
313 | #define cpufreq_freq_attr_rw(_name) \ | 317 | #define cpufreq_freq_attr_rw(_name) \ |
314 | static struct freq_attr _name = \ | 318 | static struct freq_attr _name = \ |
315 | __ATTR(_name, 0644, show_##_name, store_##_name) | 319 | __ATTR(_name, 0644, show_##_name, store_##_name) |
316 | 320 | ||
317 | struct global_attr { | 321 | struct global_attr { |
318 | struct attribute attr; | 322 | struct attribute attr; |
319 | ssize_t (*show)(struct kobject *kobj, | 323 | ssize_t (*show)(struct kobject *kobj, |
320 | struct attribute *attr, char *buf); | 324 | struct attribute *attr, char *buf); |
321 | ssize_t (*store)(struct kobject *a, struct attribute *b, | 325 | ssize_t (*store)(struct kobject *a, struct attribute *b, |
322 | const char *c, size_t count); | 326 | const char *c, size_t count); |
323 | }; | 327 | }; |
324 | 328 | ||
325 | #define define_one_global_ro(_name) \ | 329 | #define define_one_global_ro(_name) \ |
326 | static struct global_attr _name = \ | 330 | static struct global_attr _name = \ |
327 | __ATTR(_name, 0444, show_##_name, NULL) | 331 | __ATTR(_name, 0444, show_##_name, NULL) |
328 | 332 | ||
329 | #define define_one_global_rw(_name) \ | 333 | #define define_one_global_rw(_name) \ |
330 | static struct global_attr _name = \ | 334 | static struct global_attr _name = \ |
331 | __ATTR(_name, 0644, show_##_name, store_##_name) | 335 | __ATTR(_name, 0644, show_##_name, store_##_name) |
332 | 336 | ||
333 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); | 337 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu); |
334 | void cpufreq_cpu_put(struct cpufreq_policy *data); | 338 | void cpufreq_cpu_put(struct cpufreq_policy *data); |
335 | const char *cpufreq_get_current_driver(void); | 339 | const char *cpufreq_get_current_driver(void); |
336 | 340 | ||
337 | /********************************************************************* | 341 | /********************************************************************* |
338 | * CPUFREQ 2.6. INTERFACE * | 342 | * CPUFREQ 2.6. INTERFACE * |
339 | *********************************************************************/ | 343 | *********************************************************************/ |
340 | u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); | 344 | u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy); |
341 | int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); | 345 | int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); |
342 | int cpufreq_update_policy(unsigned int cpu); | 346 | int cpufreq_update_policy(unsigned int cpu); |
343 | bool have_governor_per_policy(void); | 347 | bool have_governor_per_policy(void); |
344 | struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); | 348 | struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); |
345 | 349 | ||
346 | #ifdef CONFIG_CPU_FREQ | 350 | #ifdef CONFIG_CPU_FREQ |
347 | /* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it */ | 351 | /* query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it */ |
348 | unsigned int cpufreq_get(unsigned int cpu); | 352 | unsigned int cpufreq_get(unsigned int cpu); |
349 | #else | 353 | #else |
350 | static inline unsigned int cpufreq_get(unsigned int cpu) | 354 | static inline unsigned int cpufreq_get(unsigned int cpu) |
351 | { | 355 | { |
352 | return 0; | 356 | return 0; |
353 | } | 357 | } |
354 | #endif | 358 | #endif |
355 | 359 | ||
356 | /* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */ | 360 | /* query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it */ |
357 | #ifdef CONFIG_CPU_FREQ | 361 | #ifdef CONFIG_CPU_FREQ |
358 | unsigned int cpufreq_quick_get(unsigned int cpu); | 362 | unsigned int cpufreq_quick_get(unsigned int cpu); |
359 | unsigned int cpufreq_quick_get_max(unsigned int cpu); | 363 | unsigned int cpufreq_quick_get_max(unsigned int cpu); |
360 | #else | 364 | #else |
361 | static inline unsigned int cpufreq_quick_get(unsigned int cpu) | 365 | static inline unsigned int cpufreq_quick_get(unsigned int cpu) |
362 | { | 366 | { |
363 | return 0; | 367 | return 0; |
364 | } | 368 | } |
365 | static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) | 369 | static inline unsigned int cpufreq_quick_get_max(unsigned int cpu) |
366 | { | 370 | { |
367 | return 0; | 371 | return 0; |
368 | } | 372 | } |
369 | #endif | 373 | #endif |
370 | 374 | ||
371 | 375 | ||
372 | /********************************************************************* | 376 | /********************************************************************* |
373 | * CPUFREQ DEFAULT GOVERNOR * | 377 | * CPUFREQ DEFAULT GOVERNOR * |
374 | *********************************************************************/ | 378 | *********************************************************************/ |
375 | 379 | ||
376 | 380 | ||
377 | /* | 381 | /* |
378 | Performance governor is fallback governor if any other gov failed to | 382 | Performance governor is fallback governor if any other gov failed to |
379 | auto load due latency restrictions | 383 | auto load due latency restrictions |
380 | */ | 384 | */ |
381 | #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE | 385 | #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE |
382 | extern struct cpufreq_governor cpufreq_gov_performance; | 386 | extern struct cpufreq_governor cpufreq_gov_performance; |
383 | #endif | 387 | #endif |
384 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE | 388 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE |
385 | #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_performance) | 389 | #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_performance) |
386 | #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE) | 390 | #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE) |
387 | extern struct cpufreq_governor cpufreq_gov_powersave; | 391 | extern struct cpufreq_governor cpufreq_gov_powersave; |
388 | #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_powersave) | 392 | #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_powersave) |
389 | #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE) | 393 | #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE) |
390 | extern struct cpufreq_governor cpufreq_gov_userspace; | 394 | extern struct cpufreq_governor cpufreq_gov_userspace; |
391 | #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_userspace) | 395 | #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_userspace) |
392 | #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND) | 396 | #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND) |
393 | extern struct cpufreq_governor cpufreq_gov_ondemand; | 397 | extern struct cpufreq_governor cpufreq_gov_ondemand; |
394 | #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_ondemand) | 398 | #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_ondemand) |
395 | #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE) | 399 | #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE) |
396 | extern struct cpufreq_governor cpufreq_gov_conservative; | 400 | extern struct cpufreq_governor cpufreq_gov_conservative; |
397 | #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_conservative) | 401 | #define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_conservative) |
398 | #endif | 402 | #endif |
399 | 403 | ||
400 | 404 | ||
401 | /********************************************************************* | 405 | /********************************************************************* |
402 | * FREQUENCY TABLE HELPERS * | 406 | * FREQUENCY TABLE HELPERS * |
403 | *********************************************************************/ | 407 | *********************************************************************/ |
404 | 408 | ||
405 | #define CPUFREQ_ENTRY_INVALID ~0 | 409 | #define CPUFREQ_ENTRY_INVALID ~0 |
406 | #define CPUFREQ_TABLE_END ~1 | 410 | #define CPUFREQ_TABLE_END ~1 |
407 | 411 | ||
408 | struct cpufreq_frequency_table { | 412 | struct cpufreq_frequency_table { |
409 | unsigned int index; /* any */ | 413 | unsigned int index; /* any */ |
410 | unsigned int frequency; /* kHz - doesn't need to be in ascending | 414 | unsigned int frequency; /* kHz - doesn't need to be in ascending |
411 | * order */ | 415 | * order */ |
412 | }; | 416 | }; |
413 | 417 | ||
414 | int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, | 418 | int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, |
415 | struct cpufreq_frequency_table *table); | 419 | struct cpufreq_frequency_table *table); |
416 | 420 | ||
417 | int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, | 421 | int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, |
418 | struct cpufreq_frequency_table *table); | 422 | struct cpufreq_frequency_table *table); |
419 | 423 | ||
420 | int cpufreq_frequency_table_target(struct cpufreq_policy *policy, | 424 | int cpufreq_frequency_table_target(struct cpufreq_policy *policy, |
421 | struct cpufreq_frequency_table *table, | 425 | struct cpufreq_frequency_table *table, |
422 | unsigned int target_freq, | 426 | unsigned int target_freq, |
423 | unsigned int relation, | 427 | unsigned int relation, |
424 | unsigned int *index); | 428 | unsigned int *index); |
425 | 429 | ||
426 | /* the following 3 funtions are for cpufreq core use only */ | 430 | /* the following 3 funtions are for cpufreq core use only */ |
427 | struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); | 431 | struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); |
428 | 432 | ||
429 | /* the following are really really optional */ | 433 | /* the following are really really optional */ |
430 | extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; | 434 | extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; |
431 | 435 | ||
432 | void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, | 436 | void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table, |
433 | unsigned int cpu); | 437 | unsigned int cpu); |
434 | void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy); | 438 | void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy); |
435 | 439 | ||
436 | void cpufreq_frequency_table_put_attr(unsigned int cpu); | 440 | void cpufreq_frequency_table_put_attr(unsigned int cpu); |
437 | #endif /* _LINUX_CPUFREQ_H */ | 441 | #endif /* _LINUX_CPUFREQ_H */ |
438 | 442 |