Commit 5302c3fb2e62f4ca5e43e060491ba299f58c5231

Authored by Srivatsa S. Bhat
Committed by Rafael J. Wysocki
1 parent 8414809c6a

cpufreq: Perform light-weight init/teardown during suspend/resume

Now that we have the infrastructure to perform a light-weight init/tear-down,
use that in the cpufreq CPU hotplug notifier when invoked from the
suspend/resume path.

This also ensures that the file permissions of the cpufreq sysfs files are
preserved across suspend/resume, something which commit a66b2e (cpufreq:
Preserve sysfs files across suspend/resume) originally intended to do, but
had to be reverted due to other problems.

Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>

Showing 2 changed files with 11 additions and 9 deletions Inline Diff

drivers/cpufreq/cpufreq.c
1 /* 1 /*
2 * linux/drivers/cpufreq/cpufreq.c 2 * linux/drivers/cpufreq/cpufreq.c
3 * 3 *
4 * Copyright (C) 2001 Russell King 4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de> 5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org> 6 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
7 * 7 *
8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com> 8 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
9 * Added handling for CPU hotplug 9 * Added handling for CPU hotplug
10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com> 10 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs 11 * Fix handling for CPU hotplug -- affected CPUs
12 * 12 *
13 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as 14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation. 15 * published by the Free Software Foundation.
16 */ 16 */
17 17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19 19
20 #include <asm/cputime.h> 20 #include <asm/cputime.h>
21 #include <linux/kernel.h> 21 #include <linux/kernel.h>
22 #include <linux/kernel_stat.h> 22 #include <linux/kernel_stat.h>
23 #include <linux/module.h> 23 #include <linux/module.h>
24 #include <linux/init.h> 24 #include <linux/init.h>
25 #include <linux/notifier.h> 25 #include <linux/notifier.h>
26 #include <linux/cpufreq.h> 26 #include <linux/cpufreq.h>
27 #include <linux/delay.h> 27 #include <linux/delay.h>
28 #include <linux/interrupt.h> 28 #include <linux/interrupt.h>
29 #include <linux/spinlock.h> 29 #include <linux/spinlock.h>
30 #include <linux/tick.h> 30 #include <linux/tick.h>
31 #include <linux/device.h> 31 #include <linux/device.h>
32 #include <linux/slab.h> 32 #include <linux/slab.h>
33 #include <linux/cpu.h> 33 #include <linux/cpu.h>
34 #include <linux/completion.h> 34 #include <linux/completion.h>
35 #include <linux/mutex.h> 35 #include <linux/mutex.h>
36 #include <linux/syscore_ops.h> 36 #include <linux/syscore_ops.h>
37 37
38 #include <trace/events/power.h> 38 #include <trace/events/power.h>
39 39
40 /** 40 /**
41 * The "cpufreq driver" - the arch- or hardware-dependent low 41 * The "cpufreq driver" - the arch- or hardware-dependent low
42 * level driver of CPUFreq support, and its spinlock. This lock 42 * level driver of CPUFreq support, and its spinlock. This lock
43 * also protects the cpufreq_cpu_data array. 43 * also protects the cpufreq_cpu_data array.
44 */ 44 */
45 static struct cpufreq_driver *cpufreq_driver; 45 static struct cpufreq_driver *cpufreq_driver;
46 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data); 46 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
47 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback); 47 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
48 static DEFINE_RWLOCK(cpufreq_driver_lock); 48 static DEFINE_RWLOCK(cpufreq_driver_lock);
49 static DEFINE_MUTEX(cpufreq_governor_lock); 49 static DEFINE_MUTEX(cpufreq_governor_lock);
50 50
51 #ifdef CONFIG_HOTPLUG_CPU 51 #ifdef CONFIG_HOTPLUG_CPU
52 /* This one keeps track of the previously set governor of a removed CPU */ 52 /* This one keeps track of the previously set governor of a removed CPU */
53 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor); 53 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
54 #endif 54 #endif
55 55
56 /* 56 /*
57 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure 57 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
58 * all cpufreq/hotplug/workqueue/etc related lock issues. 58 * all cpufreq/hotplug/workqueue/etc related lock issues.
59 * 59 *
60 * The rules for this semaphore: 60 * The rules for this semaphore:
61 * - Any routine that wants to read from the policy structure will 61 * - Any routine that wants to read from the policy structure will
62 * do a down_read on this semaphore. 62 * do a down_read on this semaphore.
63 * - Any routine that will write to the policy structure and/or may take away 63 * - Any routine that will write to the policy structure and/or may take away
64 * the policy altogether (eg. CPU hotplug), will hold this lock in write 64 * the policy altogether (eg. CPU hotplug), will hold this lock in write
65 * mode before doing so. 65 * mode before doing so.
66 * 66 *
67 * Additional rules: 67 * Additional rules:
68 * - Governor routines that can be called in cpufreq hotplug path should not 68 * - Governor routines that can be called in cpufreq hotplug path should not
69 * take this sem as top level hotplug notifier handler takes this. 69 * take this sem as top level hotplug notifier handler takes this.
70 * - Lock should not be held across 70 * - Lock should not be held across
71 * __cpufreq_governor(data, CPUFREQ_GOV_STOP); 71 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
72 */ 72 */
73 static DEFINE_PER_CPU(int, cpufreq_policy_cpu); 73 static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
74 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); 74 static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
75 75
76 #define lock_policy_rwsem(mode, cpu) \ 76 #define lock_policy_rwsem(mode, cpu) \
77 static int lock_policy_rwsem_##mode(int cpu) \ 77 static int lock_policy_rwsem_##mode(int cpu) \
78 { \ 78 { \
79 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ 79 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
80 BUG_ON(policy_cpu == -1); \ 80 BUG_ON(policy_cpu == -1); \
81 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ 81 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
82 \ 82 \
83 return 0; \ 83 return 0; \
84 } 84 }
85 85
86 lock_policy_rwsem(read, cpu); 86 lock_policy_rwsem(read, cpu);
87 lock_policy_rwsem(write, cpu); 87 lock_policy_rwsem(write, cpu);
88 88
89 #define unlock_policy_rwsem(mode, cpu) \ 89 #define unlock_policy_rwsem(mode, cpu) \
90 static void unlock_policy_rwsem_##mode(int cpu) \ 90 static void unlock_policy_rwsem_##mode(int cpu) \
91 { \ 91 { \
92 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ 92 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
93 BUG_ON(policy_cpu == -1); \ 93 BUG_ON(policy_cpu == -1); \
94 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ 94 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
95 } 95 }
96 96
97 unlock_policy_rwsem(read, cpu); 97 unlock_policy_rwsem(read, cpu);
98 unlock_policy_rwsem(write, cpu); 98 unlock_policy_rwsem(write, cpu);
99 99
100 /* internal prototypes */ 100 /* internal prototypes */
101 static int __cpufreq_governor(struct cpufreq_policy *policy, 101 static int __cpufreq_governor(struct cpufreq_policy *policy,
102 unsigned int event); 102 unsigned int event);
103 static unsigned int __cpufreq_get(unsigned int cpu); 103 static unsigned int __cpufreq_get(unsigned int cpu);
104 static void handle_update(struct work_struct *work); 104 static void handle_update(struct work_struct *work);
105 105
106 /** 106 /**
107 * Two notifier lists: the "policy" list is involved in the 107 * Two notifier lists: the "policy" list is involved in the
108 * validation process for a new CPU frequency policy; the 108 * validation process for a new CPU frequency policy; the
109 * "transition" list for kernel code that needs to handle 109 * "transition" list for kernel code that needs to handle
110 * changes to devices when the CPU clock speed changes. 110 * changes to devices when the CPU clock speed changes.
111 * The mutex locks both lists. 111 * The mutex locks both lists.
112 */ 112 */
113 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list); 113 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
114 static struct srcu_notifier_head cpufreq_transition_notifier_list; 114 static struct srcu_notifier_head cpufreq_transition_notifier_list;
115 115
116 static bool init_cpufreq_transition_notifier_list_called; 116 static bool init_cpufreq_transition_notifier_list_called;
117 static int __init init_cpufreq_transition_notifier_list(void) 117 static int __init init_cpufreq_transition_notifier_list(void)
118 { 118 {
119 srcu_init_notifier_head(&cpufreq_transition_notifier_list); 119 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
120 init_cpufreq_transition_notifier_list_called = true; 120 init_cpufreq_transition_notifier_list_called = true;
121 return 0; 121 return 0;
122 } 122 }
123 pure_initcall(init_cpufreq_transition_notifier_list); 123 pure_initcall(init_cpufreq_transition_notifier_list);
124 124
125 static int off __read_mostly; 125 static int off __read_mostly;
126 static int cpufreq_disabled(void) 126 static int cpufreq_disabled(void)
127 { 127 {
128 return off; 128 return off;
129 } 129 }
130 void disable_cpufreq(void) 130 void disable_cpufreq(void)
131 { 131 {
132 off = 1; 132 off = 1;
133 } 133 }
134 static LIST_HEAD(cpufreq_governor_list); 134 static LIST_HEAD(cpufreq_governor_list);
135 static DEFINE_MUTEX(cpufreq_governor_mutex); 135 static DEFINE_MUTEX(cpufreq_governor_mutex);
136 136
137 bool have_governor_per_policy(void) 137 bool have_governor_per_policy(void)
138 { 138 {
139 return cpufreq_driver->have_governor_per_policy; 139 return cpufreq_driver->have_governor_per_policy;
140 } 140 }
141 EXPORT_SYMBOL_GPL(have_governor_per_policy); 141 EXPORT_SYMBOL_GPL(have_governor_per_policy);
142 142
143 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) 143 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
144 { 144 {
145 if (have_governor_per_policy()) 145 if (have_governor_per_policy())
146 return &policy->kobj; 146 return &policy->kobj;
147 else 147 else
148 return cpufreq_global_kobject; 148 return cpufreq_global_kobject;
149 } 149 }
150 EXPORT_SYMBOL_GPL(get_governor_parent_kobj); 150 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
151 151
152 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) 152 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
153 { 153 {
154 u64 idle_time; 154 u64 idle_time;
155 u64 cur_wall_time; 155 u64 cur_wall_time;
156 u64 busy_time; 156 u64 busy_time;
157 157
158 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); 158 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
159 159
160 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; 160 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
161 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; 161 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
162 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; 162 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
163 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; 163 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
164 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; 164 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
165 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; 165 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
166 166
167 idle_time = cur_wall_time - busy_time; 167 idle_time = cur_wall_time - busy_time;
168 if (wall) 168 if (wall)
169 *wall = cputime_to_usecs(cur_wall_time); 169 *wall = cputime_to_usecs(cur_wall_time);
170 170
171 return cputime_to_usecs(idle_time); 171 return cputime_to_usecs(idle_time);
172 } 172 }
173 173
174 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy) 174 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
175 { 175 {
176 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL); 176 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
177 177
178 if (idle_time == -1ULL) 178 if (idle_time == -1ULL)
179 return get_cpu_idle_time_jiffy(cpu, wall); 179 return get_cpu_idle_time_jiffy(cpu, wall);
180 else if (!io_busy) 180 else if (!io_busy)
181 idle_time += get_cpu_iowait_time_us(cpu, wall); 181 idle_time += get_cpu_iowait_time_us(cpu, wall);
182 182
183 return idle_time; 183 return idle_time;
184 } 184 }
185 EXPORT_SYMBOL_GPL(get_cpu_idle_time); 185 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
186 186
187 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs) 187 static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
188 { 188 {
189 struct cpufreq_policy *data; 189 struct cpufreq_policy *data;
190 unsigned long flags; 190 unsigned long flags;
191 191
192 if (cpu >= nr_cpu_ids) 192 if (cpu >= nr_cpu_ids)
193 goto err_out; 193 goto err_out;
194 194
195 /* get the cpufreq driver */ 195 /* get the cpufreq driver */
196 read_lock_irqsave(&cpufreq_driver_lock, flags); 196 read_lock_irqsave(&cpufreq_driver_lock, flags);
197 197
198 if (!cpufreq_driver) 198 if (!cpufreq_driver)
199 goto err_out_unlock; 199 goto err_out_unlock;
200 200
201 if (!try_module_get(cpufreq_driver->owner)) 201 if (!try_module_get(cpufreq_driver->owner))
202 goto err_out_unlock; 202 goto err_out_unlock;
203 203
204 /* get the CPU */ 204 /* get the CPU */
205 data = per_cpu(cpufreq_cpu_data, cpu); 205 data = per_cpu(cpufreq_cpu_data, cpu);
206 206
207 if (!data) 207 if (!data)
208 goto err_out_put_module; 208 goto err_out_put_module;
209 209
210 if (!sysfs && !kobject_get(&data->kobj)) 210 if (!sysfs && !kobject_get(&data->kobj))
211 goto err_out_put_module; 211 goto err_out_put_module;
212 212
213 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 213 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
214 return data; 214 return data;
215 215
216 err_out_put_module: 216 err_out_put_module:
217 module_put(cpufreq_driver->owner); 217 module_put(cpufreq_driver->owner);
218 err_out_unlock: 218 err_out_unlock:
219 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 219 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
220 err_out: 220 err_out:
221 return NULL; 221 return NULL;
222 } 222 }
223 223
224 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) 224 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
225 { 225 {
226 if (cpufreq_disabled()) 226 if (cpufreq_disabled())
227 return NULL; 227 return NULL;
228 228
229 return __cpufreq_cpu_get(cpu, false); 229 return __cpufreq_cpu_get(cpu, false);
230 } 230 }
231 EXPORT_SYMBOL_GPL(cpufreq_cpu_get); 231 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
232 232
233 static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu) 233 static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
234 { 234 {
235 return __cpufreq_cpu_get(cpu, true); 235 return __cpufreq_cpu_get(cpu, true);
236 } 236 }
237 237
238 static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs) 238 static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
239 { 239 {
240 if (!sysfs) 240 if (!sysfs)
241 kobject_put(&data->kobj); 241 kobject_put(&data->kobj);
242 module_put(cpufreq_driver->owner); 242 module_put(cpufreq_driver->owner);
243 } 243 }
244 244
245 void cpufreq_cpu_put(struct cpufreq_policy *data) 245 void cpufreq_cpu_put(struct cpufreq_policy *data)
246 { 246 {
247 if (cpufreq_disabled()) 247 if (cpufreq_disabled())
248 return; 248 return;
249 249
250 __cpufreq_cpu_put(data, false); 250 __cpufreq_cpu_put(data, false);
251 } 251 }
252 EXPORT_SYMBOL_GPL(cpufreq_cpu_put); 252 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
253 253
254 static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data) 254 static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
255 { 255 {
256 __cpufreq_cpu_put(data, true); 256 __cpufreq_cpu_put(data, true);
257 } 257 }
258 258
259 /********************************************************************* 259 /*********************************************************************
260 * EXTERNALLY AFFECTING FREQUENCY CHANGES * 260 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
261 *********************************************************************/ 261 *********************************************************************/
262 262
263 /** 263 /**
264 * adjust_jiffies - adjust the system "loops_per_jiffy" 264 * adjust_jiffies - adjust the system "loops_per_jiffy"
265 * 265 *
266 * This function alters the system "loops_per_jiffy" for the clock 266 * This function alters the system "loops_per_jiffy" for the clock
267 * speed change. Note that loops_per_jiffy cannot be updated on SMP 267 * speed change. Note that loops_per_jiffy cannot be updated on SMP
268 * systems as each CPU might be scaled differently. So, use the arch 268 * systems as each CPU might be scaled differently. So, use the arch
269 * per-CPU loops_per_jiffy value wherever possible. 269 * per-CPU loops_per_jiffy value wherever possible.
270 */ 270 */
271 #ifndef CONFIG_SMP 271 #ifndef CONFIG_SMP
272 static unsigned long l_p_j_ref; 272 static unsigned long l_p_j_ref;
273 static unsigned int l_p_j_ref_freq; 273 static unsigned int l_p_j_ref_freq;
274 274
275 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) 275 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
276 { 276 {
277 if (ci->flags & CPUFREQ_CONST_LOOPS) 277 if (ci->flags & CPUFREQ_CONST_LOOPS)
278 return; 278 return;
279 279
280 if (!l_p_j_ref_freq) { 280 if (!l_p_j_ref_freq) {
281 l_p_j_ref = loops_per_jiffy; 281 l_p_j_ref = loops_per_jiffy;
282 l_p_j_ref_freq = ci->old; 282 l_p_j_ref_freq = ci->old;
283 pr_debug("saving %lu as reference value for loops_per_jiffy; " 283 pr_debug("saving %lu as reference value for loops_per_jiffy; "
284 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq); 284 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
285 } 285 }
286 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) || 286 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
287 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) { 287 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
288 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq, 288 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
289 ci->new); 289 ci->new);
290 pr_debug("scaling loops_per_jiffy to %lu " 290 pr_debug("scaling loops_per_jiffy to %lu "
291 "for frequency %u kHz\n", loops_per_jiffy, ci->new); 291 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
292 } 292 }
293 } 293 }
294 #else 294 #else
295 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) 295 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
296 { 296 {
297 return; 297 return;
298 } 298 }
299 #endif 299 #endif
300 300
301 static void __cpufreq_notify_transition(struct cpufreq_policy *policy, 301 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
302 struct cpufreq_freqs *freqs, unsigned int state) 302 struct cpufreq_freqs *freqs, unsigned int state)
303 { 303 {
304 BUG_ON(irqs_disabled()); 304 BUG_ON(irqs_disabled());
305 305
306 if (cpufreq_disabled()) 306 if (cpufreq_disabled())
307 return; 307 return;
308 308
309 freqs->flags = cpufreq_driver->flags; 309 freqs->flags = cpufreq_driver->flags;
310 pr_debug("notification %u of frequency transition to %u kHz\n", 310 pr_debug("notification %u of frequency transition to %u kHz\n",
311 state, freqs->new); 311 state, freqs->new);
312 312
313 switch (state) { 313 switch (state) {
314 314
315 case CPUFREQ_PRECHANGE: 315 case CPUFREQ_PRECHANGE:
316 if (WARN(policy->transition_ongoing == 316 if (WARN(policy->transition_ongoing ==
317 cpumask_weight(policy->cpus), 317 cpumask_weight(policy->cpus),
318 "In middle of another frequency transition\n")) 318 "In middle of another frequency transition\n"))
319 return; 319 return;
320 320
321 policy->transition_ongoing++; 321 policy->transition_ongoing++;
322 322
323 /* detect if the driver reported a value as "old frequency" 323 /* detect if the driver reported a value as "old frequency"
324 * which is not equal to what the cpufreq core thinks is 324 * which is not equal to what the cpufreq core thinks is
325 * "old frequency". 325 * "old frequency".
326 */ 326 */
327 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 327 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
328 if ((policy) && (policy->cpu == freqs->cpu) && 328 if ((policy) && (policy->cpu == freqs->cpu) &&
329 (policy->cur) && (policy->cur != freqs->old)) { 329 (policy->cur) && (policy->cur != freqs->old)) {
330 pr_debug("Warning: CPU frequency is" 330 pr_debug("Warning: CPU frequency is"
331 " %u, cpufreq assumed %u kHz.\n", 331 " %u, cpufreq assumed %u kHz.\n",
332 freqs->old, policy->cur); 332 freqs->old, policy->cur);
333 freqs->old = policy->cur; 333 freqs->old = policy->cur;
334 } 334 }
335 } 335 }
336 srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 336 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
337 CPUFREQ_PRECHANGE, freqs); 337 CPUFREQ_PRECHANGE, freqs);
338 adjust_jiffies(CPUFREQ_PRECHANGE, freqs); 338 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
339 break; 339 break;
340 340
341 case CPUFREQ_POSTCHANGE: 341 case CPUFREQ_POSTCHANGE:
342 if (WARN(!policy->transition_ongoing, 342 if (WARN(!policy->transition_ongoing,
343 "No frequency transition in progress\n")) 343 "No frequency transition in progress\n"))
344 return; 344 return;
345 345
346 policy->transition_ongoing--; 346 policy->transition_ongoing--;
347 347
348 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs); 348 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
349 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new, 349 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
350 (unsigned long)freqs->cpu); 350 (unsigned long)freqs->cpu);
351 trace_cpu_frequency(freqs->new, freqs->cpu); 351 trace_cpu_frequency(freqs->new, freqs->cpu);
352 srcu_notifier_call_chain(&cpufreq_transition_notifier_list, 352 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
353 CPUFREQ_POSTCHANGE, freqs); 353 CPUFREQ_POSTCHANGE, freqs);
354 if (likely(policy) && likely(policy->cpu == freqs->cpu)) 354 if (likely(policy) && likely(policy->cpu == freqs->cpu))
355 policy->cur = freqs->new; 355 policy->cur = freqs->new;
356 break; 356 break;
357 } 357 }
358 } 358 }
359 359
360 /** 360 /**
361 * cpufreq_notify_transition - call notifier chain and adjust_jiffies 361 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
362 * on frequency transition. 362 * on frequency transition.
363 * 363 *
364 * This function calls the transition notifiers and the "adjust_jiffies" 364 * This function calls the transition notifiers and the "adjust_jiffies"
365 * function. It is called twice on all CPU frequency changes that have 365 * function. It is called twice on all CPU frequency changes that have
366 * external effects. 366 * external effects.
367 */ 367 */
368 void cpufreq_notify_transition(struct cpufreq_policy *policy, 368 void cpufreq_notify_transition(struct cpufreq_policy *policy,
369 struct cpufreq_freqs *freqs, unsigned int state) 369 struct cpufreq_freqs *freqs, unsigned int state)
370 { 370 {
371 for_each_cpu(freqs->cpu, policy->cpus) 371 for_each_cpu(freqs->cpu, policy->cpus)
372 __cpufreq_notify_transition(policy, freqs, state); 372 __cpufreq_notify_transition(policy, freqs, state);
373 } 373 }
374 EXPORT_SYMBOL_GPL(cpufreq_notify_transition); 374 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
375 375
376 376
377 /********************************************************************* 377 /*********************************************************************
378 * SYSFS INTERFACE * 378 * SYSFS INTERFACE *
379 *********************************************************************/ 379 *********************************************************************/
380 380
381 static struct cpufreq_governor *__find_governor(const char *str_governor) 381 static struct cpufreq_governor *__find_governor(const char *str_governor)
382 { 382 {
383 struct cpufreq_governor *t; 383 struct cpufreq_governor *t;
384 384
385 list_for_each_entry(t, &cpufreq_governor_list, governor_list) 385 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
386 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN)) 386 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
387 return t; 387 return t;
388 388
389 return NULL; 389 return NULL;
390 } 390 }
391 391
392 /** 392 /**
393 * cpufreq_parse_governor - parse a governor string 393 * cpufreq_parse_governor - parse a governor string
394 */ 394 */
395 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, 395 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
396 struct cpufreq_governor **governor) 396 struct cpufreq_governor **governor)
397 { 397 {
398 int err = -EINVAL; 398 int err = -EINVAL;
399 399
400 if (!cpufreq_driver) 400 if (!cpufreq_driver)
401 goto out; 401 goto out;
402 402
403 if (cpufreq_driver->setpolicy) { 403 if (cpufreq_driver->setpolicy) {
404 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { 404 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
405 *policy = CPUFREQ_POLICY_PERFORMANCE; 405 *policy = CPUFREQ_POLICY_PERFORMANCE;
406 err = 0; 406 err = 0;
407 } else if (!strnicmp(str_governor, "powersave", 407 } else if (!strnicmp(str_governor, "powersave",
408 CPUFREQ_NAME_LEN)) { 408 CPUFREQ_NAME_LEN)) {
409 *policy = CPUFREQ_POLICY_POWERSAVE; 409 *policy = CPUFREQ_POLICY_POWERSAVE;
410 err = 0; 410 err = 0;
411 } 411 }
412 } else if (cpufreq_driver->target) { 412 } else if (cpufreq_driver->target) {
413 struct cpufreq_governor *t; 413 struct cpufreq_governor *t;
414 414
415 mutex_lock(&cpufreq_governor_mutex); 415 mutex_lock(&cpufreq_governor_mutex);
416 416
417 t = __find_governor(str_governor); 417 t = __find_governor(str_governor);
418 418
419 if (t == NULL) { 419 if (t == NULL) {
420 int ret; 420 int ret;
421 421
422 mutex_unlock(&cpufreq_governor_mutex); 422 mutex_unlock(&cpufreq_governor_mutex);
423 ret = request_module("cpufreq_%s", str_governor); 423 ret = request_module("cpufreq_%s", str_governor);
424 mutex_lock(&cpufreq_governor_mutex); 424 mutex_lock(&cpufreq_governor_mutex);
425 425
426 if (ret == 0) 426 if (ret == 0)
427 t = __find_governor(str_governor); 427 t = __find_governor(str_governor);
428 } 428 }
429 429
430 if (t != NULL) { 430 if (t != NULL) {
431 *governor = t; 431 *governor = t;
432 err = 0; 432 err = 0;
433 } 433 }
434 434
435 mutex_unlock(&cpufreq_governor_mutex); 435 mutex_unlock(&cpufreq_governor_mutex);
436 } 436 }
437 out: 437 out:
438 return err; 438 return err;
439 } 439 }
440 440
441 /** 441 /**
442 * cpufreq_per_cpu_attr_read() / show_##file_name() - 442 * cpufreq_per_cpu_attr_read() / show_##file_name() -
443 * print out cpufreq information 443 * print out cpufreq information
444 * 444 *
445 * Write out information from cpufreq_driver->policy[cpu]; object must be 445 * Write out information from cpufreq_driver->policy[cpu]; object must be
446 * "unsigned int". 446 * "unsigned int".
447 */ 447 */
448 448
449 #define show_one(file_name, object) \ 449 #define show_one(file_name, object) \
450 static ssize_t show_##file_name \ 450 static ssize_t show_##file_name \
451 (struct cpufreq_policy *policy, char *buf) \ 451 (struct cpufreq_policy *policy, char *buf) \
452 { \ 452 { \
453 return sprintf(buf, "%u\n", policy->object); \ 453 return sprintf(buf, "%u\n", policy->object); \
454 } 454 }
455 455
456 show_one(cpuinfo_min_freq, cpuinfo.min_freq); 456 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
457 show_one(cpuinfo_max_freq, cpuinfo.max_freq); 457 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
458 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); 458 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
459 show_one(scaling_min_freq, min); 459 show_one(scaling_min_freq, min);
460 show_one(scaling_max_freq, max); 460 show_one(scaling_max_freq, max);
461 show_one(scaling_cur_freq, cur); 461 show_one(scaling_cur_freq, cur);
462 462
463 static int __cpufreq_set_policy(struct cpufreq_policy *data, 463 static int __cpufreq_set_policy(struct cpufreq_policy *data,
464 struct cpufreq_policy *policy); 464 struct cpufreq_policy *policy);
465 465
466 /** 466 /**
467 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access 467 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
468 */ 468 */
469 #define store_one(file_name, object) \ 469 #define store_one(file_name, object) \
470 static ssize_t store_##file_name \ 470 static ssize_t store_##file_name \
471 (struct cpufreq_policy *policy, const char *buf, size_t count) \ 471 (struct cpufreq_policy *policy, const char *buf, size_t count) \
472 { \ 472 { \
473 unsigned int ret; \ 473 unsigned int ret; \
474 struct cpufreq_policy new_policy; \ 474 struct cpufreq_policy new_policy; \
475 \ 475 \
476 ret = cpufreq_get_policy(&new_policy, policy->cpu); \ 476 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
477 if (ret) \ 477 if (ret) \
478 return -EINVAL; \ 478 return -EINVAL; \
479 \ 479 \
480 ret = sscanf(buf, "%u", &new_policy.object); \ 480 ret = sscanf(buf, "%u", &new_policy.object); \
481 if (ret != 1) \ 481 if (ret != 1) \
482 return -EINVAL; \ 482 return -EINVAL; \
483 \ 483 \
484 ret = __cpufreq_set_policy(policy, &new_policy); \ 484 ret = __cpufreq_set_policy(policy, &new_policy); \
485 policy->user_policy.object = policy->object; \ 485 policy->user_policy.object = policy->object; \
486 \ 486 \
487 return ret ? ret : count; \ 487 return ret ? ret : count; \
488 } 488 }
489 489
490 store_one(scaling_min_freq, min); 490 store_one(scaling_min_freq, min);
491 store_one(scaling_max_freq, max); 491 store_one(scaling_max_freq, max);
492 492
493 /** 493 /**
494 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware 494 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
495 */ 495 */
496 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, 496 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
497 char *buf) 497 char *buf)
498 { 498 {
499 unsigned int cur_freq = __cpufreq_get(policy->cpu); 499 unsigned int cur_freq = __cpufreq_get(policy->cpu);
500 if (!cur_freq) 500 if (!cur_freq)
501 return sprintf(buf, "<unknown>"); 501 return sprintf(buf, "<unknown>");
502 return sprintf(buf, "%u\n", cur_freq); 502 return sprintf(buf, "%u\n", cur_freq);
503 } 503 }
504 504
505 /** 505 /**
506 * show_scaling_governor - show the current policy for the specified CPU 506 * show_scaling_governor - show the current policy for the specified CPU
507 */ 507 */
508 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) 508 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
509 { 509 {
510 if (policy->policy == CPUFREQ_POLICY_POWERSAVE) 510 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
511 return sprintf(buf, "powersave\n"); 511 return sprintf(buf, "powersave\n");
512 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) 512 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
513 return sprintf(buf, "performance\n"); 513 return sprintf(buf, "performance\n");
514 else if (policy->governor) 514 else if (policy->governor)
515 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", 515 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
516 policy->governor->name); 516 policy->governor->name);
517 return -EINVAL; 517 return -EINVAL;
518 } 518 }
519 519
520 /** 520 /**
521 * store_scaling_governor - store policy for the specified CPU 521 * store_scaling_governor - store policy for the specified CPU
522 */ 522 */
523 static ssize_t store_scaling_governor(struct cpufreq_policy *policy, 523 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
524 const char *buf, size_t count) 524 const char *buf, size_t count)
525 { 525 {
526 unsigned int ret; 526 unsigned int ret;
527 char str_governor[16]; 527 char str_governor[16];
528 struct cpufreq_policy new_policy; 528 struct cpufreq_policy new_policy;
529 529
530 ret = cpufreq_get_policy(&new_policy, policy->cpu); 530 ret = cpufreq_get_policy(&new_policy, policy->cpu);
531 if (ret) 531 if (ret)
532 return ret; 532 return ret;
533 533
534 ret = sscanf(buf, "%15s", str_governor); 534 ret = sscanf(buf, "%15s", str_governor);
535 if (ret != 1) 535 if (ret != 1)
536 return -EINVAL; 536 return -EINVAL;
537 537
538 if (cpufreq_parse_governor(str_governor, &new_policy.policy, 538 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
539 &new_policy.governor)) 539 &new_policy.governor))
540 return -EINVAL; 540 return -EINVAL;
541 541
542 /* 542 /*
543 * Do not use cpufreq_set_policy here or the user_policy.max 543 * Do not use cpufreq_set_policy here or the user_policy.max
544 * will be wrongly overridden 544 * will be wrongly overridden
545 */ 545 */
546 ret = __cpufreq_set_policy(policy, &new_policy); 546 ret = __cpufreq_set_policy(policy, &new_policy);
547 547
548 policy->user_policy.policy = policy->policy; 548 policy->user_policy.policy = policy->policy;
549 policy->user_policy.governor = policy->governor; 549 policy->user_policy.governor = policy->governor;
550 550
551 if (ret) 551 if (ret)
552 return ret; 552 return ret;
553 else 553 else
554 return count; 554 return count;
555 } 555 }
556 556
557 /** 557 /**
558 * show_scaling_driver - show the cpufreq driver currently loaded 558 * show_scaling_driver - show the cpufreq driver currently loaded
559 */ 559 */
560 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) 560 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
561 { 561 {
562 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name); 562 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
563 } 563 }
564 564
565 /** 565 /**
566 * show_scaling_available_governors - show the available CPUfreq governors 566 * show_scaling_available_governors - show the available CPUfreq governors
567 */ 567 */
568 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, 568 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
569 char *buf) 569 char *buf)
570 { 570 {
571 ssize_t i = 0; 571 ssize_t i = 0;
572 struct cpufreq_governor *t; 572 struct cpufreq_governor *t;
573 573
574 if (!cpufreq_driver->target) { 574 if (!cpufreq_driver->target) {
575 i += sprintf(buf, "performance powersave"); 575 i += sprintf(buf, "performance powersave");
576 goto out; 576 goto out;
577 } 577 }
578 578
579 list_for_each_entry(t, &cpufreq_governor_list, governor_list) { 579 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
580 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char)) 580 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
581 - (CPUFREQ_NAME_LEN + 2))) 581 - (CPUFREQ_NAME_LEN + 2)))
582 goto out; 582 goto out;
583 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name); 583 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
584 } 584 }
585 out: 585 out:
586 i += sprintf(&buf[i], "\n"); 586 i += sprintf(&buf[i], "\n");
587 return i; 587 return i;
588 } 588 }
589 589
590 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf) 590 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
591 { 591 {
592 ssize_t i = 0; 592 ssize_t i = 0;
593 unsigned int cpu; 593 unsigned int cpu;
594 594
595 for_each_cpu(cpu, mask) { 595 for_each_cpu(cpu, mask) {
596 if (i) 596 if (i)
597 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 597 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
598 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); 598 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
599 if (i >= (PAGE_SIZE - 5)) 599 if (i >= (PAGE_SIZE - 5))
600 break; 600 break;
601 } 601 }
602 i += sprintf(&buf[i], "\n"); 602 i += sprintf(&buf[i], "\n");
603 return i; 603 return i;
604 } 604 }
605 EXPORT_SYMBOL_GPL(cpufreq_show_cpus); 605 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
606 606
607 /** 607 /**
608 * show_related_cpus - show the CPUs affected by each transition even if 608 * show_related_cpus - show the CPUs affected by each transition even if
609 * hw coordination is in use 609 * hw coordination is in use
610 */ 610 */
611 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) 611 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
612 { 612 {
613 return cpufreq_show_cpus(policy->related_cpus, buf); 613 return cpufreq_show_cpus(policy->related_cpus, buf);
614 } 614 }
615 615
616 /** 616 /**
617 * show_affected_cpus - show the CPUs affected by each transition 617 * show_affected_cpus - show the CPUs affected by each transition
618 */ 618 */
619 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf) 619 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
620 { 620 {
621 return cpufreq_show_cpus(policy->cpus, buf); 621 return cpufreq_show_cpus(policy->cpus, buf);
622 } 622 }
623 623
624 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, 624 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
625 const char *buf, size_t count) 625 const char *buf, size_t count)
626 { 626 {
627 unsigned int freq = 0; 627 unsigned int freq = 0;
628 unsigned int ret; 628 unsigned int ret;
629 629
630 if (!policy->governor || !policy->governor->store_setspeed) 630 if (!policy->governor || !policy->governor->store_setspeed)
631 return -EINVAL; 631 return -EINVAL;
632 632
633 ret = sscanf(buf, "%u", &freq); 633 ret = sscanf(buf, "%u", &freq);
634 if (ret != 1) 634 if (ret != 1)
635 return -EINVAL; 635 return -EINVAL;
636 636
637 policy->governor->store_setspeed(policy, freq); 637 policy->governor->store_setspeed(policy, freq);
638 638
639 return count; 639 return count;
640 } 640 }
641 641
642 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) 642 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
643 { 643 {
644 if (!policy->governor || !policy->governor->show_setspeed) 644 if (!policy->governor || !policy->governor->show_setspeed)
645 return sprintf(buf, "<unsupported>\n"); 645 return sprintf(buf, "<unsupported>\n");
646 646
647 return policy->governor->show_setspeed(policy, buf); 647 return policy->governor->show_setspeed(policy, buf);
648 } 648 }
649 649
650 /** 650 /**
651 * show_bios_limit - show the current cpufreq HW/BIOS limitation 651 * show_bios_limit - show the current cpufreq HW/BIOS limitation
652 */ 652 */
653 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) 653 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
654 { 654 {
655 unsigned int limit; 655 unsigned int limit;
656 int ret; 656 int ret;
657 if (cpufreq_driver->bios_limit) { 657 if (cpufreq_driver->bios_limit) {
658 ret = cpufreq_driver->bios_limit(policy->cpu, &limit); 658 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
659 if (!ret) 659 if (!ret)
660 return sprintf(buf, "%u\n", limit); 660 return sprintf(buf, "%u\n", limit);
661 } 661 }
662 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); 662 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
663 } 663 }
664 664
665 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400); 665 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
666 cpufreq_freq_attr_ro(cpuinfo_min_freq); 666 cpufreq_freq_attr_ro(cpuinfo_min_freq);
667 cpufreq_freq_attr_ro(cpuinfo_max_freq); 667 cpufreq_freq_attr_ro(cpuinfo_max_freq);
668 cpufreq_freq_attr_ro(cpuinfo_transition_latency); 668 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
669 cpufreq_freq_attr_ro(scaling_available_governors); 669 cpufreq_freq_attr_ro(scaling_available_governors);
670 cpufreq_freq_attr_ro(scaling_driver); 670 cpufreq_freq_attr_ro(scaling_driver);
671 cpufreq_freq_attr_ro(scaling_cur_freq); 671 cpufreq_freq_attr_ro(scaling_cur_freq);
672 cpufreq_freq_attr_ro(bios_limit); 672 cpufreq_freq_attr_ro(bios_limit);
673 cpufreq_freq_attr_ro(related_cpus); 673 cpufreq_freq_attr_ro(related_cpus);
674 cpufreq_freq_attr_ro(affected_cpus); 674 cpufreq_freq_attr_ro(affected_cpus);
675 cpufreq_freq_attr_rw(scaling_min_freq); 675 cpufreq_freq_attr_rw(scaling_min_freq);
676 cpufreq_freq_attr_rw(scaling_max_freq); 676 cpufreq_freq_attr_rw(scaling_max_freq);
677 cpufreq_freq_attr_rw(scaling_governor); 677 cpufreq_freq_attr_rw(scaling_governor);
678 cpufreq_freq_attr_rw(scaling_setspeed); 678 cpufreq_freq_attr_rw(scaling_setspeed);
679 679
680 static struct attribute *default_attrs[] = { 680 static struct attribute *default_attrs[] = {
681 &cpuinfo_min_freq.attr, 681 &cpuinfo_min_freq.attr,
682 &cpuinfo_max_freq.attr, 682 &cpuinfo_max_freq.attr,
683 &cpuinfo_transition_latency.attr, 683 &cpuinfo_transition_latency.attr,
684 &scaling_min_freq.attr, 684 &scaling_min_freq.attr,
685 &scaling_max_freq.attr, 685 &scaling_max_freq.attr,
686 &affected_cpus.attr, 686 &affected_cpus.attr,
687 &related_cpus.attr, 687 &related_cpus.attr,
688 &scaling_governor.attr, 688 &scaling_governor.attr,
689 &scaling_driver.attr, 689 &scaling_driver.attr,
690 &scaling_available_governors.attr, 690 &scaling_available_governors.attr,
691 &scaling_setspeed.attr, 691 &scaling_setspeed.attr,
692 NULL 692 NULL
693 }; 693 };
694 694
695 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj) 695 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
696 #define to_attr(a) container_of(a, struct freq_attr, attr) 696 #define to_attr(a) container_of(a, struct freq_attr, attr)
697 697
698 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) 698 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
699 { 699 {
700 struct cpufreq_policy *policy = to_policy(kobj); 700 struct cpufreq_policy *policy = to_policy(kobj);
701 struct freq_attr *fattr = to_attr(attr); 701 struct freq_attr *fattr = to_attr(attr);
702 ssize_t ret = -EINVAL; 702 ssize_t ret = -EINVAL;
703 policy = cpufreq_cpu_get_sysfs(policy->cpu); 703 policy = cpufreq_cpu_get_sysfs(policy->cpu);
704 if (!policy) 704 if (!policy)
705 goto no_policy; 705 goto no_policy;
706 706
707 if (lock_policy_rwsem_read(policy->cpu) < 0) 707 if (lock_policy_rwsem_read(policy->cpu) < 0)
708 goto fail; 708 goto fail;
709 709
710 if (fattr->show) 710 if (fattr->show)
711 ret = fattr->show(policy, buf); 711 ret = fattr->show(policy, buf);
712 else 712 else
713 ret = -EIO; 713 ret = -EIO;
714 714
715 unlock_policy_rwsem_read(policy->cpu); 715 unlock_policy_rwsem_read(policy->cpu);
716 fail: 716 fail:
717 cpufreq_cpu_put_sysfs(policy); 717 cpufreq_cpu_put_sysfs(policy);
718 no_policy: 718 no_policy:
719 return ret; 719 return ret;
720 } 720 }
721 721
722 static ssize_t store(struct kobject *kobj, struct attribute *attr, 722 static ssize_t store(struct kobject *kobj, struct attribute *attr,
723 const char *buf, size_t count) 723 const char *buf, size_t count)
724 { 724 {
725 struct cpufreq_policy *policy = to_policy(kobj); 725 struct cpufreq_policy *policy = to_policy(kobj);
726 struct freq_attr *fattr = to_attr(attr); 726 struct freq_attr *fattr = to_attr(attr);
727 ssize_t ret = -EINVAL; 727 ssize_t ret = -EINVAL;
728 policy = cpufreq_cpu_get_sysfs(policy->cpu); 728 policy = cpufreq_cpu_get_sysfs(policy->cpu);
729 if (!policy) 729 if (!policy)
730 goto no_policy; 730 goto no_policy;
731 731
732 if (lock_policy_rwsem_write(policy->cpu) < 0) 732 if (lock_policy_rwsem_write(policy->cpu) < 0)
733 goto fail; 733 goto fail;
734 734
735 if (fattr->store) 735 if (fattr->store)
736 ret = fattr->store(policy, buf, count); 736 ret = fattr->store(policy, buf, count);
737 else 737 else
738 ret = -EIO; 738 ret = -EIO;
739 739
740 unlock_policy_rwsem_write(policy->cpu); 740 unlock_policy_rwsem_write(policy->cpu);
741 fail: 741 fail:
742 cpufreq_cpu_put_sysfs(policy); 742 cpufreq_cpu_put_sysfs(policy);
743 no_policy: 743 no_policy:
744 return ret; 744 return ret;
745 } 745 }
746 746
747 static void cpufreq_sysfs_release(struct kobject *kobj) 747 static void cpufreq_sysfs_release(struct kobject *kobj)
748 { 748 {
749 struct cpufreq_policy *policy = to_policy(kobj); 749 struct cpufreq_policy *policy = to_policy(kobj);
750 pr_debug("last reference is dropped\n"); 750 pr_debug("last reference is dropped\n");
751 complete(&policy->kobj_unregister); 751 complete(&policy->kobj_unregister);
752 } 752 }
753 753
754 static const struct sysfs_ops sysfs_ops = { 754 static const struct sysfs_ops sysfs_ops = {
755 .show = show, 755 .show = show,
756 .store = store, 756 .store = store,
757 }; 757 };
758 758
759 static struct kobj_type ktype_cpufreq = { 759 static struct kobj_type ktype_cpufreq = {
760 .sysfs_ops = &sysfs_ops, 760 .sysfs_ops = &sysfs_ops,
761 .default_attrs = default_attrs, 761 .default_attrs = default_attrs,
762 .release = cpufreq_sysfs_release, 762 .release = cpufreq_sysfs_release,
763 }; 763 };
764 764
765 struct kobject *cpufreq_global_kobject; 765 struct kobject *cpufreq_global_kobject;
766 EXPORT_SYMBOL(cpufreq_global_kobject); 766 EXPORT_SYMBOL(cpufreq_global_kobject);
767 767
768 static int cpufreq_global_kobject_usage; 768 static int cpufreq_global_kobject_usage;
769 769
770 int cpufreq_get_global_kobject(void) 770 int cpufreq_get_global_kobject(void)
771 { 771 {
772 if (!cpufreq_global_kobject_usage++) 772 if (!cpufreq_global_kobject_usage++)
773 return kobject_add(cpufreq_global_kobject, 773 return kobject_add(cpufreq_global_kobject,
774 &cpu_subsys.dev_root->kobj, "%s", "cpufreq"); 774 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
775 775
776 return 0; 776 return 0;
777 } 777 }
778 EXPORT_SYMBOL(cpufreq_get_global_kobject); 778 EXPORT_SYMBOL(cpufreq_get_global_kobject);
779 779
780 void cpufreq_put_global_kobject(void) 780 void cpufreq_put_global_kobject(void)
781 { 781 {
782 if (!--cpufreq_global_kobject_usage) 782 if (!--cpufreq_global_kobject_usage)
783 kobject_del(cpufreq_global_kobject); 783 kobject_del(cpufreq_global_kobject);
784 } 784 }
785 EXPORT_SYMBOL(cpufreq_put_global_kobject); 785 EXPORT_SYMBOL(cpufreq_put_global_kobject);
786 786
787 int cpufreq_sysfs_create_file(const struct attribute *attr) 787 int cpufreq_sysfs_create_file(const struct attribute *attr)
788 { 788 {
789 int ret = cpufreq_get_global_kobject(); 789 int ret = cpufreq_get_global_kobject();
790 790
791 if (!ret) { 791 if (!ret) {
792 ret = sysfs_create_file(cpufreq_global_kobject, attr); 792 ret = sysfs_create_file(cpufreq_global_kobject, attr);
793 if (ret) 793 if (ret)
794 cpufreq_put_global_kobject(); 794 cpufreq_put_global_kobject();
795 } 795 }
796 796
797 return ret; 797 return ret;
798 } 798 }
799 EXPORT_SYMBOL(cpufreq_sysfs_create_file); 799 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
800 800
801 void cpufreq_sysfs_remove_file(const struct attribute *attr) 801 void cpufreq_sysfs_remove_file(const struct attribute *attr)
802 { 802 {
803 sysfs_remove_file(cpufreq_global_kobject, attr); 803 sysfs_remove_file(cpufreq_global_kobject, attr);
804 cpufreq_put_global_kobject(); 804 cpufreq_put_global_kobject();
805 } 805 }
806 EXPORT_SYMBOL(cpufreq_sysfs_remove_file); 806 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
807 807
808 /* symlink affected CPUs */ 808 /* symlink affected CPUs */
809 static int cpufreq_add_dev_symlink(unsigned int cpu, 809 static int cpufreq_add_dev_symlink(unsigned int cpu,
810 struct cpufreq_policy *policy) 810 struct cpufreq_policy *policy)
811 { 811 {
812 unsigned int j; 812 unsigned int j;
813 int ret = 0; 813 int ret = 0;
814 814
815 for_each_cpu(j, policy->cpus) { 815 for_each_cpu(j, policy->cpus) {
816 struct cpufreq_policy *managed_policy; 816 struct cpufreq_policy *managed_policy;
817 struct device *cpu_dev; 817 struct device *cpu_dev;
818 818
819 if (j == cpu) 819 if (j == cpu)
820 continue; 820 continue;
821 821
822 pr_debug("CPU %u already managed, adding link\n", j); 822 pr_debug("CPU %u already managed, adding link\n", j);
823 managed_policy = cpufreq_cpu_get(cpu); 823 managed_policy = cpufreq_cpu_get(cpu);
824 cpu_dev = get_cpu_device(j); 824 cpu_dev = get_cpu_device(j);
825 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, 825 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
826 "cpufreq"); 826 "cpufreq");
827 if (ret) { 827 if (ret) {
828 cpufreq_cpu_put(managed_policy); 828 cpufreq_cpu_put(managed_policy);
829 return ret; 829 return ret;
830 } 830 }
831 } 831 }
832 return ret; 832 return ret;
833 } 833 }
834 834
835 static int cpufreq_add_dev_interface(unsigned int cpu, 835 static int cpufreq_add_dev_interface(unsigned int cpu,
836 struct cpufreq_policy *policy, 836 struct cpufreq_policy *policy,
837 struct device *dev) 837 struct device *dev)
838 { 838 {
839 struct freq_attr **drv_attr; 839 struct freq_attr **drv_attr;
840 int ret = 0; 840 int ret = 0;
841 841
842 /* prepare interface data */ 842 /* prepare interface data */
843 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, 843 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
844 &dev->kobj, "cpufreq"); 844 &dev->kobj, "cpufreq");
845 if (ret) 845 if (ret)
846 return ret; 846 return ret;
847 847
848 /* set up files for this cpu device */ 848 /* set up files for this cpu device */
849 drv_attr = cpufreq_driver->attr; 849 drv_attr = cpufreq_driver->attr;
850 while ((drv_attr) && (*drv_attr)) { 850 while ((drv_attr) && (*drv_attr)) {
851 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); 851 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
852 if (ret) 852 if (ret)
853 goto err_out_kobj_put; 853 goto err_out_kobj_put;
854 drv_attr++; 854 drv_attr++;
855 } 855 }
856 if (cpufreq_driver->get) { 856 if (cpufreq_driver->get) {
857 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); 857 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
858 if (ret) 858 if (ret)
859 goto err_out_kobj_put; 859 goto err_out_kobj_put;
860 } 860 }
861 if (cpufreq_driver->target) { 861 if (cpufreq_driver->target) {
862 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 862 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
863 if (ret) 863 if (ret)
864 goto err_out_kobj_put; 864 goto err_out_kobj_put;
865 } 865 }
866 if (cpufreq_driver->bios_limit) { 866 if (cpufreq_driver->bios_limit) {
867 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); 867 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
868 if (ret) 868 if (ret)
869 goto err_out_kobj_put; 869 goto err_out_kobj_put;
870 } 870 }
871 871
872 ret = cpufreq_add_dev_symlink(cpu, policy); 872 ret = cpufreq_add_dev_symlink(cpu, policy);
873 if (ret) 873 if (ret)
874 goto err_out_kobj_put; 874 goto err_out_kobj_put;
875 875
876 return ret; 876 return ret;
877 877
878 err_out_kobj_put: 878 err_out_kobj_put:
879 kobject_put(&policy->kobj); 879 kobject_put(&policy->kobj);
880 wait_for_completion(&policy->kobj_unregister); 880 wait_for_completion(&policy->kobj_unregister);
881 return ret; 881 return ret;
882 } 882 }
883 883
884 static void cpufreq_init_policy(struct cpufreq_policy *policy) 884 static void cpufreq_init_policy(struct cpufreq_policy *policy)
885 { 885 {
886 struct cpufreq_policy new_policy; 886 struct cpufreq_policy new_policy;
887 int ret = 0; 887 int ret = 0;
888 888
889 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); 889 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
890 /* assure that the starting sequence is run in __cpufreq_set_policy */ 890 /* assure that the starting sequence is run in __cpufreq_set_policy */
891 policy->governor = NULL; 891 policy->governor = NULL;
892 892
893 /* set default policy */ 893 /* set default policy */
894 ret = __cpufreq_set_policy(policy, &new_policy); 894 ret = __cpufreq_set_policy(policy, &new_policy);
895 policy->user_policy.policy = policy->policy; 895 policy->user_policy.policy = policy->policy;
896 policy->user_policy.governor = policy->governor; 896 policy->user_policy.governor = policy->governor;
897 897
898 if (ret) { 898 if (ret) {
899 pr_debug("setting policy failed\n"); 899 pr_debug("setting policy failed\n");
900 if (cpufreq_driver->exit) 900 if (cpufreq_driver->exit)
901 cpufreq_driver->exit(policy); 901 cpufreq_driver->exit(policy);
902 } 902 }
903 } 903 }
904 904
905 #ifdef CONFIG_HOTPLUG_CPU 905 #ifdef CONFIG_HOTPLUG_CPU
906 static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling, 906 static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
907 struct device *dev, bool frozen) 907 struct device *dev, bool frozen)
908 { 908 {
909 struct cpufreq_policy *policy; 909 struct cpufreq_policy *policy;
910 int ret = 0, has_target = !!cpufreq_driver->target; 910 int ret = 0, has_target = !!cpufreq_driver->target;
911 unsigned long flags; 911 unsigned long flags;
912 912
913 policy = cpufreq_cpu_get(sibling); 913 policy = cpufreq_cpu_get(sibling);
914 WARN_ON(!policy); 914 WARN_ON(!policy);
915 915
916 if (has_target) 916 if (has_target)
917 __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 917 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
918 918
919 lock_policy_rwsem_write(sibling); 919 lock_policy_rwsem_write(sibling);
920 920
921 write_lock_irqsave(&cpufreq_driver_lock, flags); 921 write_lock_irqsave(&cpufreq_driver_lock, flags);
922 922
923 cpumask_set_cpu(cpu, policy->cpus); 923 cpumask_set_cpu(cpu, policy->cpus);
924 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu; 924 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
925 per_cpu(cpufreq_cpu_data, cpu) = policy; 925 per_cpu(cpufreq_cpu_data, cpu) = policy;
926 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 926 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
927 927
928 unlock_policy_rwsem_write(sibling); 928 unlock_policy_rwsem_write(sibling);
929 929
930 if (has_target) { 930 if (has_target) {
931 __cpufreq_governor(policy, CPUFREQ_GOV_START); 931 __cpufreq_governor(policy, CPUFREQ_GOV_START);
932 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 932 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
933 } 933 }
934 934
935 /* Don't touch sysfs links during light-weight init */ 935 /* Don't touch sysfs links during light-weight init */
936 if (frozen) { 936 if (frozen) {
937 /* Drop the extra refcount that we took above */ 937 /* Drop the extra refcount that we took above */
938 cpufreq_cpu_put(policy); 938 cpufreq_cpu_put(policy);
939 return 0; 939 return 0;
940 } 940 }
941 941
942 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); 942 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
943 if (ret) 943 if (ret)
944 cpufreq_cpu_put(policy); 944 cpufreq_cpu_put(policy);
945 945
946 return ret; 946 return ret;
947 } 947 }
948 #endif 948 #endif
949 949
950 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu) 950 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
951 { 951 {
952 struct cpufreq_policy *policy; 952 struct cpufreq_policy *policy;
953 unsigned long flags; 953 unsigned long flags;
954 954
955 write_lock_irqsave(&cpufreq_driver_lock, flags); 955 write_lock_irqsave(&cpufreq_driver_lock, flags);
956 956
957 policy = per_cpu(cpufreq_cpu_data_fallback, cpu); 957 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
958 958
959 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 959 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
960 960
961 return policy; 961 return policy;
962 } 962 }
963 963
964 static struct cpufreq_policy *cpufreq_policy_alloc(void) 964 static struct cpufreq_policy *cpufreq_policy_alloc(void)
965 { 965 {
966 struct cpufreq_policy *policy; 966 struct cpufreq_policy *policy;
967 967
968 policy = kzalloc(sizeof(*policy), GFP_KERNEL); 968 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
969 if (!policy) 969 if (!policy)
970 return NULL; 970 return NULL;
971 971
972 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) 972 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
973 goto err_free_policy; 973 goto err_free_policy;
974 974
975 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) 975 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
976 goto err_free_cpumask; 976 goto err_free_cpumask;
977 977
978 return policy; 978 return policy;
979 979
980 err_free_cpumask: 980 err_free_cpumask:
981 free_cpumask_var(policy->cpus); 981 free_cpumask_var(policy->cpus);
982 err_free_policy: 982 err_free_policy:
983 kfree(policy); 983 kfree(policy);
984 984
985 return NULL; 985 return NULL;
986 } 986 }
987 987
988 static void cpufreq_policy_free(struct cpufreq_policy *policy) 988 static void cpufreq_policy_free(struct cpufreq_policy *policy)
989 { 989 {
990 free_cpumask_var(policy->related_cpus); 990 free_cpumask_var(policy->related_cpus);
991 free_cpumask_var(policy->cpus); 991 free_cpumask_var(policy->cpus);
992 kfree(policy); 992 kfree(policy);
993 } 993 }
994 994
995 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, 995 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
996 bool frozen) 996 bool frozen)
997 { 997 {
998 unsigned int j, cpu = dev->id; 998 unsigned int j, cpu = dev->id;
999 int ret = -ENOMEM; 999 int ret = -ENOMEM;
1000 struct cpufreq_policy *policy; 1000 struct cpufreq_policy *policy;
1001 unsigned long flags; 1001 unsigned long flags;
1002 #ifdef CONFIG_HOTPLUG_CPU 1002 #ifdef CONFIG_HOTPLUG_CPU
1003 struct cpufreq_governor *gov; 1003 struct cpufreq_governor *gov;
1004 int sibling; 1004 int sibling;
1005 #endif 1005 #endif
1006 1006
1007 if (cpu_is_offline(cpu)) 1007 if (cpu_is_offline(cpu))
1008 return 0; 1008 return 0;
1009 1009
1010 pr_debug("adding CPU %u\n", cpu); 1010 pr_debug("adding CPU %u\n", cpu);
1011 1011
1012 #ifdef CONFIG_SMP 1012 #ifdef CONFIG_SMP
1013 /* check whether a different CPU already registered this 1013 /* check whether a different CPU already registered this
1014 * CPU because it is in the same boat. */ 1014 * CPU because it is in the same boat. */
1015 policy = cpufreq_cpu_get(cpu); 1015 policy = cpufreq_cpu_get(cpu);
1016 if (unlikely(policy)) { 1016 if (unlikely(policy)) {
1017 cpufreq_cpu_put(policy); 1017 cpufreq_cpu_put(policy);
1018 return 0; 1018 return 0;
1019 } 1019 }
1020 1020
1021 #ifdef CONFIG_HOTPLUG_CPU 1021 #ifdef CONFIG_HOTPLUG_CPU
1022 /* Check if this cpu was hot-unplugged earlier and has siblings */ 1022 /* Check if this cpu was hot-unplugged earlier and has siblings */
1023 read_lock_irqsave(&cpufreq_driver_lock, flags); 1023 read_lock_irqsave(&cpufreq_driver_lock, flags);
1024 for_each_online_cpu(sibling) { 1024 for_each_online_cpu(sibling) {
1025 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling); 1025 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
1026 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) { 1026 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
1027 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1027 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1028 return cpufreq_add_policy_cpu(cpu, sibling, dev, 1028 return cpufreq_add_policy_cpu(cpu, sibling, dev,
1029 frozen); 1029 frozen);
1030 } 1030 }
1031 } 1031 }
1032 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1032 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1033 #endif 1033 #endif
1034 #endif 1034 #endif
1035 1035
1036 if (!try_module_get(cpufreq_driver->owner)) { 1036 if (!try_module_get(cpufreq_driver->owner)) {
1037 ret = -EINVAL; 1037 ret = -EINVAL;
1038 goto module_out; 1038 goto module_out;
1039 } 1039 }
1040 1040
1041 if (frozen) 1041 if (frozen)
1042 /* Restore the saved policy when doing light-weight init */ 1042 /* Restore the saved policy when doing light-weight init */
1043 policy = cpufreq_policy_restore(cpu); 1043 policy = cpufreq_policy_restore(cpu);
1044 else 1044 else
1045 policy = cpufreq_policy_alloc(); 1045 policy = cpufreq_policy_alloc();
1046 1046
1047 if (!policy) 1047 if (!policy)
1048 goto nomem_out; 1048 goto nomem_out;
1049 1049
1050 policy->cpu = cpu; 1050 policy->cpu = cpu;
1051 policy->governor = CPUFREQ_DEFAULT_GOVERNOR; 1051 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1052 cpumask_copy(policy->cpus, cpumask_of(cpu)); 1052 cpumask_copy(policy->cpus, cpumask_of(cpu));
1053 1053
1054 /* Initially set CPU itself as the policy_cpu */ 1054 /* Initially set CPU itself as the policy_cpu */
1055 per_cpu(cpufreq_policy_cpu, cpu) = cpu; 1055 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
1056 1056
1057 init_completion(&policy->kobj_unregister); 1057 init_completion(&policy->kobj_unregister);
1058 INIT_WORK(&policy->update, handle_update); 1058 INIT_WORK(&policy->update, handle_update);
1059 1059
1060 /* call driver. From then on the cpufreq must be able 1060 /* call driver. From then on the cpufreq must be able
1061 * to accept all calls to ->verify and ->setpolicy for this CPU 1061 * to accept all calls to ->verify and ->setpolicy for this CPU
1062 */ 1062 */
1063 ret = cpufreq_driver->init(policy); 1063 ret = cpufreq_driver->init(policy);
1064 if (ret) { 1064 if (ret) {
1065 pr_debug("initialization failed\n"); 1065 pr_debug("initialization failed\n");
1066 goto err_set_policy_cpu; 1066 goto err_set_policy_cpu;
1067 } 1067 }
1068 1068
1069 /* related cpus should atleast have policy->cpus */ 1069 /* related cpus should atleast have policy->cpus */
1070 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 1070 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1071 1071
1072 /* 1072 /*
1073 * affected cpus must always be the one, which are online. We aren't 1073 * affected cpus must always be the one, which are online. We aren't
1074 * managing offline cpus here. 1074 * managing offline cpus here.
1075 */ 1075 */
1076 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); 1076 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1077 1077
1078 policy->user_policy.min = policy->min; 1078 policy->user_policy.min = policy->min;
1079 policy->user_policy.max = policy->max; 1079 policy->user_policy.max = policy->max;
1080 1080
1081 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1081 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1082 CPUFREQ_START, policy); 1082 CPUFREQ_START, policy);
1083 1083
1084 #ifdef CONFIG_HOTPLUG_CPU 1084 #ifdef CONFIG_HOTPLUG_CPU
1085 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); 1085 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1086 if (gov) { 1086 if (gov) {
1087 policy->governor = gov; 1087 policy->governor = gov;
1088 pr_debug("Restoring governor %s for cpu %d\n", 1088 pr_debug("Restoring governor %s for cpu %d\n",
1089 policy->governor->name, cpu); 1089 policy->governor->name, cpu);
1090 } 1090 }
1091 #endif 1091 #endif
1092 1092
1093 write_lock_irqsave(&cpufreq_driver_lock, flags); 1093 write_lock_irqsave(&cpufreq_driver_lock, flags);
1094 for_each_cpu(j, policy->cpus) { 1094 for_each_cpu(j, policy->cpus) {
1095 per_cpu(cpufreq_cpu_data, j) = policy; 1095 per_cpu(cpufreq_cpu_data, j) = policy;
1096 per_cpu(cpufreq_policy_cpu, j) = policy->cpu; 1096 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
1097 } 1097 }
1098 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1098 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1099 1099
1100 if (!frozen) { 1100 if (!frozen) {
1101 ret = cpufreq_add_dev_interface(cpu, policy, dev); 1101 ret = cpufreq_add_dev_interface(cpu, policy, dev);
1102 if (ret) 1102 if (ret)
1103 goto err_out_unregister; 1103 goto err_out_unregister;
1104 } 1104 }
1105 1105
1106 cpufreq_init_policy(policy); 1106 cpufreq_init_policy(policy);
1107 1107
1108 kobject_uevent(&policy->kobj, KOBJ_ADD); 1108 kobject_uevent(&policy->kobj, KOBJ_ADD);
1109 module_put(cpufreq_driver->owner); 1109 module_put(cpufreq_driver->owner);
1110 pr_debug("initialization complete\n"); 1110 pr_debug("initialization complete\n");
1111 1111
1112 return 0; 1112 return 0;
1113 1113
1114 err_out_unregister: 1114 err_out_unregister:
1115 write_lock_irqsave(&cpufreq_driver_lock, flags); 1115 write_lock_irqsave(&cpufreq_driver_lock, flags);
1116 for_each_cpu(j, policy->cpus) { 1116 for_each_cpu(j, policy->cpus) {
1117 per_cpu(cpufreq_cpu_data, j) = NULL; 1117 per_cpu(cpufreq_cpu_data, j) = NULL;
1118 if (j != cpu) 1118 if (j != cpu)
1119 per_cpu(cpufreq_policy_cpu, j) = -1; 1119 per_cpu(cpufreq_policy_cpu, j) = -1;
1120 } 1120 }
1121 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1121 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1122 1122
1123 kobject_put(&policy->kobj); 1123 kobject_put(&policy->kobj);
1124 wait_for_completion(&policy->kobj_unregister); 1124 wait_for_completion(&policy->kobj_unregister);
1125 1125
1126 err_set_policy_cpu: 1126 err_set_policy_cpu:
1127 per_cpu(cpufreq_policy_cpu, cpu) = -1; 1127 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1128 cpufreq_policy_free(policy); 1128 cpufreq_policy_free(policy);
1129 nomem_out: 1129 nomem_out:
1130 module_put(cpufreq_driver->owner); 1130 module_put(cpufreq_driver->owner);
1131 module_out: 1131 module_out:
1132 return ret; 1132 return ret;
1133 } 1133 }
1134 1134
1135 /** 1135 /**
1136 * cpufreq_add_dev - add a CPU device 1136 * cpufreq_add_dev - add a CPU device
1137 * 1137 *
1138 * Adds the cpufreq interface for a CPU device. 1138 * Adds the cpufreq interface for a CPU device.
1139 * 1139 *
1140 * The Oracle says: try running cpufreq registration/unregistration concurrently 1140 * The Oracle says: try running cpufreq registration/unregistration concurrently
1141 * with with cpu hotplugging and all hell will break loose. Tried to clean this 1141 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1142 * mess up, but more thorough testing is needed. - Mathieu 1142 * mess up, but more thorough testing is needed. - Mathieu
1143 */ 1143 */
1144 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) 1144 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1145 { 1145 {
1146 return __cpufreq_add_dev(dev, sif, false); 1146 return __cpufreq_add_dev(dev, sif, false);
1147 } 1147 }
1148 1148
1149 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) 1149 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1150 { 1150 {
1151 int j; 1151 int j;
1152 1152
1153 policy->last_cpu = policy->cpu; 1153 policy->last_cpu = policy->cpu;
1154 policy->cpu = cpu; 1154 policy->cpu = cpu;
1155 1155
1156 for_each_cpu(j, policy->cpus) 1156 for_each_cpu(j, policy->cpus)
1157 per_cpu(cpufreq_policy_cpu, j) = cpu; 1157 per_cpu(cpufreq_policy_cpu, j) = cpu;
1158 1158
1159 #ifdef CONFIG_CPU_FREQ_TABLE 1159 #ifdef CONFIG_CPU_FREQ_TABLE
1160 cpufreq_frequency_table_update_policy_cpu(policy); 1160 cpufreq_frequency_table_update_policy_cpu(policy);
1161 #endif 1161 #endif
1162 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1162 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1163 CPUFREQ_UPDATE_POLICY_CPU, policy); 1163 CPUFREQ_UPDATE_POLICY_CPU, policy);
1164 } 1164 }
1165 1165
1166 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *data, 1166 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *data,
1167 unsigned int old_cpu, bool frozen) 1167 unsigned int old_cpu, bool frozen)
1168 { 1168 {
1169 struct device *cpu_dev; 1169 struct device *cpu_dev;
1170 unsigned long flags; 1170 unsigned long flags;
1171 int ret; 1171 int ret;
1172 1172
1173 /* first sibling now owns the new sysfs dir */ 1173 /* first sibling now owns the new sysfs dir */
1174 cpu_dev = get_cpu_device(cpumask_first(data->cpus)); 1174 cpu_dev = get_cpu_device(cpumask_first(data->cpus));
1175 1175
1176 /* Don't touch sysfs files during light-weight tear-down */ 1176 /* Don't touch sysfs files during light-weight tear-down */
1177 if (frozen) 1177 if (frozen)
1178 return cpu_dev->id; 1178 return cpu_dev->id;
1179 1179
1180 sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); 1180 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1181 ret = kobject_move(&data->kobj, &cpu_dev->kobj); 1181 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1182 if (ret) { 1182 if (ret) {
1183 pr_err("%s: Failed to move kobj: %d", __func__, ret); 1183 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1184 1184
1185 WARN_ON(lock_policy_rwsem_write(old_cpu)); 1185 WARN_ON(lock_policy_rwsem_write(old_cpu));
1186 cpumask_set_cpu(old_cpu, data->cpus); 1186 cpumask_set_cpu(old_cpu, data->cpus);
1187 1187
1188 write_lock_irqsave(&cpufreq_driver_lock, flags); 1188 write_lock_irqsave(&cpufreq_driver_lock, flags);
1189 per_cpu(cpufreq_cpu_data, old_cpu) = data; 1189 per_cpu(cpufreq_cpu_data, old_cpu) = data;
1190 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1190 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1191 1191
1192 unlock_policy_rwsem_write(old_cpu); 1192 unlock_policy_rwsem_write(old_cpu);
1193 1193
1194 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj, 1194 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1195 "cpufreq"); 1195 "cpufreq");
1196 1196
1197 return -EINVAL; 1197 return -EINVAL;
1198 } 1198 }
1199 1199
1200 return cpu_dev->id; 1200 return cpu_dev->id;
1201 } 1201 }
1202 1202
1203 /** 1203 /**
1204 * __cpufreq_remove_dev - remove a CPU device 1204 * __cpufreq_remove_dev - remove a CPU device
1205 * 1205 *
1206 * Removes the cpufreq interface for a CPU device. 1206 * Removes the cpufreq interface for a CPU device.
1207 * Caller should already have policy_rwsem in write mode for this CPU. 1207 * Caller should already have policy_rwsem in write mode for this CPU.
1208 * This routine frees the rwsem before returning. 1208 * This routine frees the rwsem before returning.
1209 */ 1209 */
1210 static int __cpufreq_remove_dev(struct device *dev, 1210 static int __cpufreq_remove_dev(struct device *dev,
1211 struct subsys_interface *sif, bool frozen) 1211 struct subsys_interface *sif, bool frozen)
1212 { 1212 {
1213 unsigned int cpu = dev->id, cpus; 1213 unsigned int cpu = dev->id, cpus;
1214 int new_cpu; 1214 int new_cpu;
1215 unsigned long flags; 1215 unsigned long flags;
1216 struct cpufreq_policy *data; 1216 struct cpufreq_policy *data;
1217 struct kobject *kobj; 1217 struct kobject *kobj;
1218 struct completion *cmp; 1218 struct completion *cmp;
1219 1219
1220 pr_debug("%s: unregistering CPU %u\n", __func__, cpu); 1220 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1221 1221
1222 write_lock_irqsave(&cpufreq_driver_lock, flags); 1222 write_lock_irqsave(&cpufreq_driver_lock, flags);
1223 1223
1224 data = per_cpu(cpufreq_cpu_data, cpu); 1224 data = per_cpu(cpufreq_cpu_data, cpu);
1225 per_cpu(cpufreq_cpu_data, cpu) = NULL; 1225 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1226 1226
1227 /* Save the policy somewhere when doing a light-weight tear-down */ 1227 /* Save the policy somewhere when doing a light-weight tear-down */
1228 if (frozen) 1228 if (frozen)
1229 per_cpu(cpufreq_cpu_data_fallback, cpu) = data; 1229 per_cpu(cpufreq_cpu_data_fallback, cpu) = data;
1230 1230
1231 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1231 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1232 1232
1233 if (!data) { 1233 if (!data) {
1234 pr_debug("%s: No cpu_data found\n", __func__); 1234 pr_debug("%s: No cpu_data found\n", __func__);
1235 return -EINVAL; 1235 return -EINVAL;
1236 } 1236 }
1237 1237
1238 if (cpufreq_driver->target) 1238 if (cpufreq_driver->target)
1239 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 1239 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1240 1240
1241 #ifdef CONFIG_HOTPLUG_CPU 1241 #ifdef CONFIG_HOTPLUG_CPU
1242 if (!cpufreq_driver->setpolicy) 1242 if (!cpufreq_driver->setpolicy)
1243 strncpy(per_cpu(cpufreq_cpu_governor, cpu), 1243 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1244 data->governor->name, CPUFREQ_NAME_LEN); 1244 data->governor->name, CPUFREQ_NAME_LEN);
1245 #endif 1245 #endif
1246 1246
1247 WARN_ON(lock_policy_rwsem_write(cpu)); 1247 WARN_ON(lock_policy_rwsem_write(cpu));
1248 cpus = cpumask_weight(data->cpus); 1248 cpus = cpumask_weight(data->cpus);
1249 1249
1250 if (cpus > 1) 1250 if (cpus > 1)
1251 cpumask_clear_cpu(cpu, data->cpus); 1251 cpumask_clear_cpu(cpu, data->cpus);
1252 unlock_policy_rwsem_write(cpu); 1252 unlock_policy_rwsem_write(cpu);
1253 1253
1254 if (cpu != data->cpu && !frozen) { 1254 if (cpu != data->cpu && !frozen) {
1255 sysfs_remove_link(&dev->kobj, "cpufreq"); 1255 sysfs_remove_link(&dev->kobj, "cpufreq");
1256 } else if (cpus > 1) { 1256 } else if (cpus > 1) {
1257 1257
1258 new_cpu = cpufreq_nominate_new_policy_cpu(data, cpu, frozen); 1258 new_cpu = cpufreq_nominate_new_policy_cpu(data, cpu, frozen);
1259 if (new_cpu >= 0) { 1259 if (new_cpu >= 0) {
1260 WARN_ON(lock_policy_rwsem_write(cpu)); 1260 WARN_ON(lock_policy_rwsem_write(cpu));
1261 update_policy_cpu(data, new_cpu); 1261 update_policy_cpu(data, new_cpu);
1262 unlock_policy_rwsem_write(cpu); 1262 unlock_policy_rwsem_write(cpu);
1263 1263
1264 if (!frozen) { 1264 if (!frozen) {
1265 pr_debug("%s: policy Kobject moved to cpu: %d " 1265 pr_debug("%s: policy Kobject moved to cpu: %d "
1266 "from: %d\n",__func__, new_cpu, cpu); 1266 "from: %d\n",__func__, new_cpu, cpu);
1267 } 1267 }
1268 } 1268 }
1269 } 1269 }
1270 1270
1271 /* If cpu is last user of policy, free policy */ 1271 /* If cpu is last user of policy, free policy */
1272 if (cpus == 1) { 1272 if (cpus == 1) {
1273 if (cpufreq_driver->target) 1273 if (cpufreq_driver->target)
1274 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); 1274 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1275 1275
1276 if (!frozen) { 1276 if (!frozen) {
1277 lock_policy_rwsem_read(cpu); 1277 lock_policy_rwsem_read(cpu);
1278 kobj = &data->kobj; 1278 kobj = &data->kobj;
1279 cmp = &data->kobj_unregister; 1279 cmp = &data->kobj_unregister;
1280 unlock_policy_rwsem_read(cpu); 1280 unlock_policy_rwsem_read(cpu);
1281 kobject_put(kobj); 1281 kobject_put(kobj);
1282 1282
1283 /* 1283 /*
1284 * We need to make sure that the underlying kobj is 1284 * We need to make sure that the underlying kobj is
1285 * actually not referenced anymore by anybody before we 1285 * actually not referenced anymore by anybody before we
1286 * proceed with unloading. 1286 * proceed with unloading.
1287 */ 1287 */
1288 pr_debug("waiting for dropping of refcount\n"); 1288 pr_debug("waiting for dropping of refcount\n");
1289 wait_for_completion(cmp); 1289 wait_for_completion(cmp);
1290 pr_debug("wait complete\n"); 1290 pr_debug("wait complete\n");
1291 } 1291 }
1292 1292
1293 /* 1293 /*
1294 * Perform the ->exit() even during light-weight tear-down, 1294 * Perform the ->exit() even during light-weight tear-down,
1295 * since this is a core component, and is essential for the 1295 * since this is a core component, and is essential for the
1296 * subsequent light-weight ->init() to succeed. 1296 * subsequent light-weight ->init() to succeed.
1297 */ 1297 */
1298 if (cpufreq_driver->exit) 1298 if (cpufreq_driver->exit)
1299 cpufreq_driver->exit(data); 1299 cpufreq_driver->exit(data);
1300 1300
1301 if (!frozen) 1301 if (!frozen)
1302 cpufreq_policy_free(data); 1302 cpufreq_policy_free(data);
1303 } else { 1303 } else {
1304 1304
1305 if (!frozen) { 1305 if (!frozen) {
1306 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); 1306 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1307 cpufreq_cpu_put(data); 1307 cpufreq_cpu_put(data);
1308 } 1308 }
1309 1309
1310 if (cpufreq_driver->target) { 1310 if (cpufreq_driver->target) {
1311 __cpufreq_governor(data, CPUFREQ_GOV_START); 1311 __cpufreq_governor(data, CPUFREQ_GOV_START);
1312 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); 1312 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1313 } 1313 }
1314 } 1314 }
1315 1315
1316 per_cpu(cpufreq_policy_cpu, cpu) = -1; 1316 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1317 return 0; 1317 return 0;
1318 } 1318 }
1319 1319
1320 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) 1320 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1321 { 1321 {
1322 unsigned int cpu = dev->id; 1322 unsigned int cpu = dev->id;
1323 int retval; 1323 int retval;
1324 1324
1325 if (cpu_is_offline(cpu)) 1325 if (cpu_is_offline(cpu))
1326 return 0; 1326 return 0;
1327 1327
1328 retval = __cpufreq_remove_dev(dev, sif, false); 1328 retval = __cpufreq_remove_dev(dev, sif, false);
1329 return retval; 1329 return retval;
1330 } 1330 }
1331 1331
1332 static void handle_update(struct work_struct *work) 1332 static void handle_update(struct work_struct *work)
1333 { 1333 {
1334 struct cpufreq_policy *policy = 1334 struct cpufreq_policy *policy =
1335 container_of(work, struct cpufreq_policy, update); 1335 container_of(work, struct cpufreq_policy, update);
1336 unsigned int cpu = policy->cpu; 1336 unsigned int cpu = policy->cpu;
1337 pr_debug("handle_update for cpu %u called\n", cpu); 1337 pr_debug("handle_update for cpu %u called\n", cpu);
1338 cpufreq_update_policy(cpu); 1338 cpufreq_update_policy(cpu);
1339 } 1339 }
1340 1340
1341 /** 1341 /**
1342 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're 1342 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1343 * in deep trouble. 1343 * in deep trouble.
1344 * @cpu: cpu number 1344 * @cpu: cpu number
1345 * @old_freq: CPU frequency the kernel thinks the CPU runs at 1345 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1346 * @new_freq: CPU frequency the CPU actually runs at 1346 * @new_freq: CPU frequency the CPU actually runs at
1347 * 1347 *
1348 * We adjust to current frequency first, and need to clean up later. 1348 * We adjust to current frequency first, and need to clean up later.
1349 * So either call to cpufreq_update_policy() or schedule handle_update()). 1349 * So either call to cpufreq_update_policy() or schedule handle_update()).
1350 */ 1350 */
1351 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, 1351 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1352 unsigned int new_freq) 1352 unsigned int new_freq)
1353 { 1353 {
1354 struct cpufreq_policy *policy; 1354 struct cpufreq_policy *policy;
1355 struct cpufreq_freqs freqs; 1355 struct cpufreq_freqs freqs;
1356 unsigned long flags; 1356 unsigned long flags;
1357 1357
1358 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing " 1358 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1359 "core thinks of %u, is %u kHz.\n", old_freq, new_freq); 1359 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1360 1360
1361 freqs.old = old_freq; 1361 freqs.old = old_freq;
1362 freqs.new = new_freq; 1362 freqs.new = new_freq;
1363 1363
1364 read_lock_irqsave(&cpufreq_driver_lock, flags); 1364 read_lock_irqsave(&cpufreq_driver_lock, flags);
1365 policy = per_cpu(cpufreq_cpu_data, cpu); 1365 policy = per_cpu(cpufreq_cpu_data, cpu);
1366 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1366 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1367 1367
1368 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); 1368 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1369 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); 1369 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1370 } 1370 }
1371 1371
1372 /** 1372 /**
1373 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur 1373 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1374 * @cpu: CPU number 1374 * @cpu: CPU number
1375 * 1375 *
1376 * This is the last known freq, without actually getting it from the driver. 1376 * This is the last known freq, without actually getting it from the driver.
1377 * Return value will be same as what is shown in scaling_cur_freq in sysfs. 1377 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1378 */ 1378 */
1379 unsigned int cpufreq_quick_get(unsigned int cpu) 1379 unsigned int cpufreq_quick_get(unsigned int cpu)
1380 { 1380 {
1381 struct cpufreq_policy *policy; 1381 struct cpufreq_policy *policy;
1382 unsigned int ret_freq = 0; 1382 unsigned int ret_freq = 0;
1383 1383
1384 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) 1384 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1385 return cpufreq_driver->get(cpu); 1385 return cpufreq_driver->get(cpu);
1386 1386
1387 policy = cpufreq_cpu_get(cpu); 1387 policy = cpufreq_cpu_get(cpu);
1388 if (policy) { 1388 if (policy) {
1389 ret_freq = policy->cur; 1389 ret_freq = policy->cur;
1390 cpufreq_cpu_put(policy); 1390 cpufreq_cpu_put(policy);
1391 } 1391 }
1392 1392
1393 return ret_freq; 1393 return ret_freq;
1394 } 1394 }
1395 EXPORT_SYMBOL(cpufreq_quick_get); 1395 EXPORT_SYMBOL(cpufreq_quick_get);
1396 1396
1397 /** 1397 /**
1398 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU 1398 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1399 * @cpu: CPU number 1399 * @cpu: CPU number
1400 * 1400 *
1401 * Just return the max possible frequency for a given CPU. 1401 * Just return the max possible frequency for a given CPU.
1402 */ 1402 */
1403 unsigned int cpufreq_quick_get_max(unsigned int cpu) 1403 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1404 { 1404 {
1405 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 1405 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1406 unsigned int ret_freq = 0; 1406 unsigned int ret_freq = 0;
1407 1407
1408 if (policy) { 1408 if (policy) {
1409 ret_freq = policy->max; 1409 ret_freq = policy->max;
1410 cpufreq_cpu_put(policy); 1410 cpufreq_cpu_put(policy);
1411 } 1411 }
1412 1412
1413 return ret_freq; 1413 return ret_freq;
1414 } 1414 }
1415 EXPORT_SYMBOL(cpufreq_quick_get_max); 1415 EXPORT_SYMBOL(cpufreq_quick_get_max);
1416 1416
1417 static unsigned int __cpufreq_get(unsigned int cpu) 1417 static unsigned int __cpufreq_get(unsigned int cpu)
1418 { 1418 {
1419 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); 1419 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1420 unsigned int ret_freq = 0; 1420 unsigned int ret_freq = 0;
1421 1421
1422 if (!cpufreq_driver->get) 1422 if (!cpufreq_driver->get)
1423 return ret_freq; 1423 return ret_freq;
1424 1424
1425 ret_freq = cpufreq_driver->get(cpu); 1425 ret_freq = cpufreq_driver->get(cpu);
1426 1426
1427 if (ret_freq && policy->cur && 1427 if (ret_freq && policy->cur &&
1428 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { 1428 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1429 /* verify no discrepancy between actual and 1429 /* verify no discrepancy between actual and
1430 saved value exists */ 1430 saved value exists */
1431 if (unlikely(ret_freq != policy->cur)) { 1431 if (unlikely(ret_freq != policy->cur)) {
1432 cpufreq_out_of_sync(cpu, policy->cur, ret_freq); 1432 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1433 schedule_work(&policy->update); 1433 schedule_work(&policy->update);
1434 } 1434 }
1435 } 1435 }
1436 1436
1437 return ret_freq; 1437 return ret_freq;
1438 } 1438 }
1439 1439
1440 /** 1440 /**
1441 * cpufreq_get - get the current CPU frequency (in kHz) 1441 * cpufreq_get - get the current CPU frequency (in kHz)
1442 * @cpu: CPU number 1442 * @cpu: CPU number
1443 * 1443 *
1444 * Get the CPU current (static) CPU frequency 1444 * Get the CPU current (static) CPU frequency
1445 */ 1445 */
1446 unsigned int cpufreq_get(unsigned int cpu) 1446 unsigned int cpufreq_get(unsigned int cpu)
1447 { 1447 {
1448 unsigned int ret_freq = 0; 1448 unsigned int ret_freq = 0;
1449 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 1449 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1450 1450
1451 if (!policy) 1451 if (!policy)
1452 goto out; 1452 goto out;
1453 1453
1454 if (unlikely(lock_policy_rwsem_read(cpu))) 1454 if (unlikely(lock_policy_rwsem_read(cpu)))
1455 goto out_policy; 1455 goto out_policy;
1456 1456
1457 ret_freq = __cpufreq_get(cpu); 1457 ret_freq = __cpufreq_get(cpu);
1458 1458
1459 unlock_policy_rwsem_read(cpu); 1459 unlock_policy_rwsem_read(cpu);
1460 1460
1461 out_policy: 1461 out_policy:
1462 cpufreq_cpu_put(policy); 1462 cpufreq_cpu_put(policy);
1463 out: 1463 out:
1464 return ret_freq; 1464 return ret_freq;
1465 } 1465 }
1466 EXPORT_SYMBOL(cpufreq_get); 1466 EXPORT_SYMBOL(cpufreq_get);
1467 1467
1468 static struct subsys_interface cpufreq_interface = { 1468 static struct subsys_interface cpufreq_interface = {
1469 .name = "cpufreq", 1469 .name = "cpufreq",
1470 .subsys = &cpu_subsys, 1470 .subsys = &cpu_subsys,
1471 .add_dev = cpufreq_add_dev, 1471 .add_dev = cpufreq_add_dev,
1472 .remove_dev = cpufreq_remove_dev, 1472 .remove_dev = cpufreq_remove_dev,
1473 }; 1473 };
1474 1474
1475 /** 1475 /**
1476 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend. 1476 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1477 * 1477 *
1478 * This function is only executed for the boot processor. The other CPUs 1478 * This function is only executed for the boot processor. The other CPUs
1479 * have been put offline by means of CPU hotplug. 1479 * have been put offline by means of CPU hotplug.
1480 */ 1480 */
1481 static int cpufreq_bp_suspend(void) 1481 static int cpufreq_bp_suspend(void)
1482 { 1482 {
1483 int ret = 0; 1483 int ret = 0;
1484 1484
1485 int cpu = smp_processor_id(); 1485 int cpu = smp_processor_id();
1486 struct cpufreq_policy *cpu_policy; 1486 struct cpufreq_policy *cpu_policy;
1487 1487
1488 pr_debug("suspending cpu %u\n", cpu); 1488 pr_debug("suspending cpu %u\n", cpu);
1489 1489
1490 /* If there's no policy for the boot CPU, we have nothing to do. */ 1490 /* If there's no policy for the boot CPU, we have nothing to do. */
1491 cpu_policy = cpufreq_cpu_get(cpu); 1491 cpu_policy = cpufreq_cpu_get(cpu);
1492 if (!cpu_policy) 1492 if (!cpu_policy)
1493 return 0; 1493 return 0;
1494 1494
1495 if (cpufreq_driver->suspend) { 1495 if (cpufreq_driver->suspend) {
1496 ret = cpufreq_driver->suspend(cpu_policy); 1496 ret = cpufreq_driver->suspend(cpu_policy);
1497 if (ret) 1497 if (ret)
1498 printk(KERN_ERR "cpufreq: suspend failed in ->suspend " 1498 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1499 "step on CPU %u\n", cpu_policy->cpu); 1499 "step on CPU %u\n", cpu_policy->cpu);
1500 } 1500 }
1501 1501
1502 cpufreq_cpu_put(cpu_policy); 1502 cpufreq_cpu_put(cpu_policy);
1503 return ret; 1503 return ret;
1504 } 1504 }
1505 1505
1506 /** 1506 /**
1507 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU. 1507 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1508 * 1508 *
1509 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) 1509 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1510 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are 1510 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1511 * restored. It will verify that the current freq is in sync with 1511 * restored. It will verify that the current freq is in sync with
1512 * what we believe it to be. This is a bit later than when it 1512 * what we believe it to be. This is a bit later than when it
1513 * should be, but nonethteless it's better than calling 1513 * should be, but nonethteless it's better than calling
1514 * cpufreq_driver->get() here which might re-enable interrupts... 1514 * cpufreq_driver->get() here which might re-enable interrupts...
1515 * 1515 *
1516 * This function is only executed for the boot CPU. The other CPUs have not 1516 * This function is only executed for the boot CPU. The other CPUs have not
1517 * been turned on yet. 1517 * been turned on yet.
1518 */ 1518 */
1519 static void cpufreq_bp_resume(void) 1519 static void cpufreq_bp_resume(void)
1520 { 1520 {
1521 int ret = 0; 1521 int ret = 0;
1522 1522
1523 int cpu = smp_processor_id(); 1523 int cpu = smp_processor_id();
1524 struct cpufreq_policy *cpu_policy; 1524 struct cpufreq_policy *cpu_policy;
1525 1525
1526 pr_debug("resuming cpu %u\n", cpu); 1526 pr_debug("resuming cpu %u\n", cpu);
1527 1527
1528 /* If there's no policy for the boot CPU, we have nothing to do. */ 1528 /* If there's no policy for the boot CPU, we have nothing to do. */
1529 cpu_policy = cpufreq_cpu_get(cpu); 1529 cpu_policy = cpufreq_cpu_get(cpu);
1530 if (!cpu_policy) 1530 if (!cpu_policy)
1531 return; 1531 return;
1532 1532
1533 if (cpufreq_driver->resume) { 1533 if (cpufreq_driver->resume) {
1534 ret = cpufreq_driver->resume(cpu_policy); 1534 ret = cpufreq_driver->resume(cpu_policy);
1535 if (ret) { 1535 if (ret) {
1536 printk(KERN_ERR "cpufreq: resume failed in ->resume " 1536 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1537 "step on CPU %u\n", cpu_policy->cpu); 1537 "step on CPU %u\n", cpu_policy->cpu);
1538 goto fail; 1538 goto fail;
1539 } 1539 }
1540 } 1540 }
1541 1541
1542 schedule_work(&cpu_policy->update); 1542 schedule_work(&cpu_policy->update);
1543 1543
1544 fail: 1544 fail:
1545 cpufreq_cpu_put(cpu_policy); 1545 cpufreq_cpu_put(cpu_policy);
1546 } 1546 }
1547 1547
1548 static struct syscore_ops cpufreq_syscore_ops = { 1548 static struct syscore_ops cpufreq_syscore_ops = {
1549 .suspend = cpufreq_bp_suspend, 1549 .suspend = cpufreq_bp_suspend,
1550 .resume = cpufreq_bp_resume, 1550 .resume = cpufreq_bp_resume,
1551 }; 1551 };
1552 1552
1553 /** 1553 /**
1554 * cpufreq_get_current_driver - return current driver's name 1554 * cpufreq_get_current_driver - return current driver's name
1555 * 1555 *
1556 * Return the name string of the currently loaded cpufreq driver 1556 * Return the name string of the currently loaded cpufreq driver
1557 * or NULL, if none. 1557 * or NULL, if none.
1558 */ 1558 */
1559 const char *cpufreq_get_current_driver(void) 1559 const char *cpufreq_get_current_driver(void)
1560 { 1560 {
1561 if (cpufreq_driver) 1561 if (cpufreq_driver)
1562 return cpufreq_driver->name; 1562 return cpufreq_driver->name;
1563 1563
1564 return NULL; 1564 return NULL;
1565 } 1565 }
1566 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); 1566 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1567 1567
1568 /********************************************************************* 1568 /*********************************************************************
1569 * NOTIFIER LISTS INTERFACE * 1569 * NOTIFIER LISTS INTERFACE *
1570 *********************************************************************/ 1570 *********************************************************************/
1571 1571
1572 /** 1572 /**
1573 * cpufreq_register_notifier - register a driver with cpufreq 1573 * cpufreq_register_notifier - register a driver with cpufreq
1574 * @nb: notifier function to register 1574 * @nb: notifier function to register
1575 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 1575 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1576 * 1576 *
1577 * Add a driver to one of two lists: either a list of drivers that 1577 * Add a driver to one of two lists: either a list of drivers that
1578 * are notified about clock rate changes (once before and once after 1578 * are notified about clock rate changes (once before and once after
1579 * the transition), or a list of drivers that are notified about 1579 * the transition), or a list of drivers that are notified about
1580 * changes in cpufreq policy. 1580 * changes in cpufreq policy.
1581 * 1581 *
1582 * This function may sleep, and has the same return conditions as 1582 * This function may sleep, and has the same return conditions as
1583 * blocking_notifier_chain_register. 1583 * blocking_notifier_chain_register.
1584 */ 1584 */
1585 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) 1585 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1586 { 1586 {
1587 int ret; 1587 int ret;
1588 1588
1589 if (cpufreq_disabled()) 1589 if (cpufreq_disabled())
1590 return -EINVAL; 1590 return -EINVAL;
1591 1591
1592 WARN_ON(!init_cpufreq_transition_notifier_list_called); 1592 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1593 1593
1594 switch (list) { 1594 switch (list) {
1595 case CPUFREQ_TRANSITION_NOTIFIER: 1595 case CPUFREQ_TRANSITION_NOTIFIER:
1596 ret = srcu_notifier_chain_register( 1596 ret = srcu_notifier_chain_register(
1597 &cpufreq_transition_notifier_list, nb); 1597 &cpufreq_transition_notifier_list, nb);
1598 break; 1598 break;
1599 case CPUFREQ_POLICY_NOTIFIER: 1599 case CPUFREQ_POLICY_NOTIFIER:
1600 ret = blocking_notifier_chain_register( 1600 ret = blocking_notifier_chain_register(
1601 &cpufreq_policy_notifier_list, nb); 1601 &cpufreq_policy_notifier_list, nb);
1602 break; 1602 break;
1603 default: 1603 default:
1604 ret = -EINVAL; 1604 ret = -EINVAL;
1605 } 1605 }
1606 1606
1607 return ret; 1607 return ret;
1608 } 1608 }
1609 EXPORT_SYMBOL(cpufreq_register_notifier); 1609 EXPORT_SYMBOL(cpufreq_register_notifier);
1610 1610
1611 /** 1611 /**
1612 * cpufreq_unregister_notifier - unregister a driver with cpufreq 1612 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1613 * @nb: notifier block to be unregistered 1613 * @nb: notifier block to be unregistered
1614 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER 1614 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1615 * 1615 *
1616 * Remove a driver from the CPU frequency notifier list. 1616 * Remove a driver from the CPU frequency notifier list.
1617 * 1617 *
1618 * This function may sleep, and has the same return conditions as 1618 * This function may sleep, and has the same return conditions as
1619 * blocking_notifier_chain_unregister. 1619 * blocking_notifier_chain_unregister.
1620 */ 1620 */
1621 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) 1621 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1622 { 1622 {
1623 int ret; 1623 int ret;
1624 1624
1625 if (cpufreq_disabled()) 1625 if (cpufreq_disabled())
1626 return -EINVAL; 1626 return -EINVAL;
1627 1627
1628 switch (list) { 1628 switch (list) {
1629 case CPUFREQ_TRANSITION_NOTIFIER: 1629 case CPUFREQ_TRANSITION_NOTIFIER:
1630 ret = srcu_notifier_chain_unregister( 1630 ret = srcu_notifier_chain_unregister(
1631 &cpufreq_transition_notifier_list, nb); 1631 &cpufreq_transition_notifier_list, nb);
1632 break; 1632 break;
1633 case CPUFREQ_POLICY_NOTIFIER: 1633 case CPUFREQ_POLICY_NOTIFIER:
1634 ret = blocking_notifier_chain_unregister( 1634 ret = blocking_notifier_chain_unregister(
1635 &cpufreq_policy_notifier_list, nb); 1635 &cpufreq_policy_notifier_list, nb);
1636 break; 1636 break;
1637 default: 1637 default:
1638 ret = -EINVAL; 1638 ret = -EINVAL;
1639 } 1639 }
1640 1640
1641 return ret; 1641 return ret;
1642 } 1642 }
1643 EXPORT_SYMBOL(cpufreq_unregister_notifier); 1643 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1644 1644
1645 1645
1646 /********************************************************************* 1646 /*********************************************************************
1647 * GOVERNORS * 1647 * GOVERNORS *
1648 *********************************************************************/ 1648 *********************************************************************/
1649 1649
1650 int __cpufreq_driver_target(struct cpufreq_policy *policy, 1650 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1651 unsigned int target_freq, 1651 unsigned int target_freq,
1652 unsigned int relation) 1652 unsigned int relation)
1653 { 1653 {
1654 int retval = -EINVAL; 1654 int retval = -EINVAL;
1655 unsigned int old_target_freq = target_freq; 1655 unsigned int old_target_freq = target_freq;
1656 1656
1657 if (cpufreq_disabled()) 1657 if (cpufreq_disabled())
1658 return -ENODEV; 1658 return -ENODEV;
1659 if (policy->transition_ongoing) 1659 if (policy->transition_ongoing)
1660 return -EBUSY; 1660 return -EBUSY;
1661 1661
1662 /* Make sure that target_freq is within supported range */ 1662 /* Make sure that target_freq is within supported range */
1663 if (target_freq > policy->max) 1663 if (target_freq > policy->max)
1664 target_freq = policy->max; 1664 target_freq = policy->max;
1665 if (target_freq < policy->min) 1665 if (target_freq < policy->min)
1666 target_freq = policy->min; 1666 target_freq = policy->min;
1667 1667
1668 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n", 1668 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1669 policy->cpu, target_freq, relation, old_target_freq); 1669 policy->cpu, target_freq, relation, old_target_freq);
1670 1670
1671 if (target_freq == policy->cur) 1671 if (target_freq == policy->cur)
1672 return 0; 1672 return 0;
1673 1673
1674 if (cpufreq_driver->target) 1674 if (cpufreq_driver->target)
1675 retval = cpufreq_driver->target(policy, target_freq, relation); 1675 retval = cpufreq_driver->target(policy, target_freq, relation);
1676 1676
1677 return retval; 1677 return retval;
1678 } 1678 }
1679 EXPORT_SYMBOL_GPL(__cpufreq_driver_target); 1679 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1680 1680
1681 int cpufreq_driver_target(struct cpufreq_policy *policy, 1681 int cpufreq_driver_target(struct cpufreq_policy *policy,
1682 unsigned int target_freq, 1682 unsigned int target_freq,
1683 unsigned int relation) 1683 unsigned int relation)
1684 { 1684 {
1685 int ret = -EINVAL; 1685 int ret = -EINVAL;
1686 1686
1687 if (unlikely(lock_policy_rwsem_write(policy->cpu))) 1687 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1688 goto fail; 1688 goto fail;
1689 1689
1690 ret = __cpufreq_driver_target(policy, target_freq, relation); 1690 ret = __cpufreq_driver_target(policy, target_freq, relation);
1691 1691
1692 unlock_policy_rwsem_write(policy->cpu); 1692 unlock_policy_rwsem_write(policy->cpu);
1693 1693
1694 fail: 1694 fail:
1695 return ret; 1695 return ret;
1696 } 1696 }
1697 EXPORT_SYMBOL_GPL(cpufreq_driver_target); 1697 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1698 1698
1699 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu) 1699 int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1700 { 1700 {
1701 if (cpufreq_disabled()) 1701 if (cpufreq_disabled())
1702 return 0; 1702 return 0;
1703 1703
1704 if (!cpufreq_driver->getavg) 1704 if (!cpufreq_driver->getavg)
1705 return 0; 1705 return 0;
1706 1706
1707 return cpufreq_driver->getavg(policy, cpu); 1707 return cpufreq_driver->getavg(policy, cpu);
1708 } 1708 }
1709 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg); 1709 EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1710 1710
1711 /* 1711 /*
1712 * when "event" is CPUFREQ_GOV_LIMITS 1712 * when "event" is CPUFREQ_GOV_LIMITS
1713 */ 1713 */
1714 1714
1715 static int __cpufreq_governor(struct cpufreq_policy *policy, 1715 static int __cpufreq_governor(struct cpufreq_policy *policy,
1716 unsigned int event) 1716 unsigned int event)
1717 { 1717 {
1718 int ret; 1718 int ret;
1719 1719
1720 /* Only must be defined when default governor is known to have latency 1720 /* Only must be defined when default governor is known to have latency
1721 restrictions, like e.g. conservative or ondemand. 1721 restrictions, like e.g. conservative or ondemand.
1722 That this is the case is already ensured in Kconfig 1722 That this is the case is already ensured in Kconfig
1723 */ 1723 */
1724 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE 1724 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1725 struct cpufreq_governor *gov = &cpufreq_gov_performance; 1725 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1726 #else 1726 #else
1727 struct cpufreq_governor *gov = NULL; 1727 struct cpufreq_governor *gov = NULL;
1728 #endif 1728 #endif
1729 1729
1730 if (policy->governor->max_transition_latency && 1730 if (policy->governor->max_transition_latency &&
1731 policy->cpuinfo.transition_latency > 1731 policy->cpuinfo.transition_latency >
1732 policy->governor->max_transition_latency) { 1732 policy->governor->max_transition_latency) {
1733 if (!gov) 1733 if (!gov)
1734 return -EINVAL; 1734 return -EINVAL;
1735 else { 1735 else {
1736 printk(KERN_WARNING "%s governor failed, too long" 1736 printk(KERN_WARNING "%s governor failed, too long"
1737 " transition latency of HW, fallback" 1737 " transition latency of HW, fallback"
1738 " to %s governor\n", 1738 " to %s governor\n",
1739 policy->governor->name, 1739 policy->governor->name,
1740 gov->name); 1740 gov->name);
1741 policy->governor = gov; 1741 policy->governor = gov;
1742 } 1742 }
1743 } 1743 }
1744 1744
1745 if (!try_module_get(policy->governor->owner)) 1745 if (!try_module_get(policy->governor->owner))
1746 return -EINVAL; 1746 return -EINVAL;
1747 1747
1748 pr_debug("__cpufreq_governor for CPU %u, event %u\n", 1748 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1749 policy->cpu, event); 1749 policy->cpu, event);
1750 1750
1751 mutex_lock(&cpufreq_governor_lock); 1751 mutex_lock(&cpufreq_governor_lock);
1752 if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) || 1752 if ((!policy->governor_enabled && (event == CPUFREQ_GOV_STOP)) ||
1753 (policy->governor_enabled && (event == CPUFREQ_GOV_START))) { 1753 (policy->governor_enabled && (event == CPUFREQ_GOV_START))) {
1754 mutex_unlock(&cpufreq_governor_lock); 1754 mutex_unlock(&cpufreq_governor_lock);
1755 return -EBUSY; 1755 return -EBUSY;
1756 } 1756 }
1757 1757
1758 if (event == CPUFREQ_GOV_STOP) 1758 if (event == CPUFREQ_GOV_STOP)
1759 policy->governor_enabled = false; 1759 policy->governor_enabled = false;
1760 else if (event == CPUFREQ_GOV_START) 1760 else if (event == CPUFREQ_GOV_START)
1761 policy->governor_enabled = true; 1761 policy->governor_enabled = true;
1762 1762
1763 mutex_unlock(&cpufreq_governor_lock); 1763 mutex_unlock(&cpufreq_governor_lock);
1764 1764
1765 ret = policy->governor->governor(policy, event); 1765 ret = policy->governor->governor(policy, event);
1766 1766
1767 if (!ret) { 1767 if (!ret) {
1768 if (event == CPUFREQ_GOV_POLICY_INIT) 1768 if (event == CPUFREQ_GOV_POLICY_INIT)
1769 policy->governor->initialized++; 1769 policy->governor->initialized++;
1770 else if (event == CPUFREQ_GOV_POLICY_EXIT) 1770 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1771 policy->governor->initialized--; 1771 policy->governor->initialized--;
1772 } else { 1772 } else {
1773 /* Restore original values */ 1773 /* Restore original values */
1774 mutex_lock(&cpufreq_governor_lock); 1774 mutex_lock(&cpufreq_governor_lock);
1775 if (event == CPUFREQ_GOV_STOP) 1775 if (event == CPUFREQ_GOV_STOP)
1776 policy->governor_enabled = true; 1776 policy->governor_enabled = true;
1777 else if (event == CPUFREQ_GOV_START) 1777 else if (event == CPUFREQ_GOV_START)
1778 policy->governor_enabled = false; 1778 policy->governor_enabled = false;
1779 mutex_unlock(&cpufreq_governor_lock); 1779 mutex_unlock(&cpufreq_governor_lock);
1780 } 1780 }
1781 1781
1782 /* we keep one module reference alive for 1782 /* we keep one module reference alive for
1783 each CPU governed by this CPU */ 1783 each CPU governed by this CPU */
1784 if ((event != CPUFREQ_GOV_START) || ret) 1784 if ((event != CPUFREQ_GOV_START) || ret)
1785 module_put(policy->governor->owner); 1785 module_put(policy->governor->owner);
1786 if ((event == CPUFREQ_GOV_STOP) && !ret) 1786 if ((event == CPUFREQ_GOV_STOP) && !ret)
1787 module_put(policy->governor->owner); 1787 module_put(policy->governor->owner);
1788 1788
1789 return ret; 1789 return ret;
1790 } 1790 }
1791 1791
1792 int cpufreq_register_governor(struct cpufreq_governor *governor) 1792 int cpufreq_register_governor(struct cpufreq_governor *governor)
1793 { 1793 {
1794 int err; 1794 int err;
1795 1795
1796 if (!governor) 1796 if (!governor)
1797 return -EINVAL; 1797 return -EINVAL;
1798 1798
1799 if (cpufreq_disabled()) 1799 if (cpufreq_disabled())
1800 return -ENODEV; 1800 return -ENODEV;
1801 1801
1802 mutex_lock(&cpufreq_governor_mutex); 1802 mutex_lock(&cpufreq_governor_mutex);
1803 1803
1804 governor->initialized = 0; 1804 governor->initialized = 0;
1805 err = -EBUSY; 1805 err = -EBUSY;
1806 if (__find_governor(governor->name) == NULL) { 1806 if (__find_governor(governor->name) == NULL) {
1807 err = 0; 1807 err = 0;
1808 list_add(&governor->governor_list, &cpufreq_governor_list); 1808 list_add(&governor->governor_list, &cpufreq_governor_list);
1809 } 1809 }
1810 1810
1811 mutex_unlock(&cpufreq_governor_mutex); 1811 mutex_unlock(&cpufreq_governor_mutex);
1812 return err; 1812 return err;
1813 } 1813 }
1814 EXPORT_SYMBOL_GPL(cpufreq_register_governor); 1814 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1815 1815
1816 void cpufreq_unregister_governor(struct cpufreq_governor *governor) 1816 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1817 { 1817 {
1818 #ifdef CONFIG_HOTPLUG_CPU 1818 #ifdef CONFIG_HOTPLUG_CPU
1819 int cpu; 1819 int cpu;
1820 #endif 1820 #endif
1821 1821
1822 if (!governor) 1822 if (!governor)
1823 return; 1823 return;
1824 1824
1825 if (cpufreq_disabled()) 1825 if (cpufreq_disabled())
1826 return; 1826 return;
1827 1827
1828 #ifdef CONFIG_HOTPLUG_CPU 1828 #ifdef CONFIG_HOTPLUG_CPU
1829 for_each_present_cpu(cpu) { 1829 for_each_present_cpu(cpu) {
1830 if (cpu_online(cpu)) 1830 if (cpu_online(cpu))
1831 continue; 1831 continue;
1832 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name)) 1832 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1833 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0"); 1833 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1834 } 1834 }
1835 #endif 1835 #endif
1836 1836
1837 mutex_lock(&cpufreq_governor_mutex); 1837 mutex_lock(&cpufreq_governor_mutex);
1838 list_del(&governor->governor_list); 1838 list_del(&governor->governor_list);
1839 mutex_unlock(&cpufreq_governor_mutex); 1839 mutex_unlock(&cpufreq_governor_mutex);
1840 return; 1840 return;
1841 } 1841 }
1842 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); 1842 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1843 1843
1844 1844
1845 /********************************************************************* 1845 /*********************************************************************
1846 * POLICY INTERFACE * 1846 * POLICY INTERFACE *
1847 *********************************************************************/ 1847 *********************************************************************/
1848 1848
1849 /** 1849 /**
1850 * cpufreq_get_policy - get the current cpufreq_policy 1850 * cpufreq_get_policy - get the current cpufreq_policy
1851 * @policy: struct cpufreq_policy into which the current cpufreq_policy 1851 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1852 * is written 1852 * is written
1853 * 1853 *
1854 * Reads the current cpufreq policy. 1854 * Reads the current cpufreq policy.
1855 */ 1855 */
1856 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) 1856 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1857 { 1857 {
1858 struct cpufreq_policy *cpu_policy; 1858 struct cpufreq_policy *cpu_policy;
1859 if (!policy) 1859 if (!policy)
1860 return -EINVAL; 1860 return -EINVAL;
1861 1861
1862 cpu_policy = cpufreq_cpu_get(cpu); 1862 cpu_policy = cpufreq_cpu_get(cpu);
1863 if (!cpu_policy) 1863 if (!cpu_policy)
1864 return -EINVAL; 1864 return -EINVAL;
1865 1865
1866 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); 1866 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1867 1867
1868 cpufreq_cpu_put(cpu_policy); 1868 cpufreq_cpu_put(cpu_policy);
1869 return 0; 1869 return 0;
1870 } 1870 }
1871 EXPORT_SYMBOL(cpufreq_get_policy); 1871 EXPORT_SYMBOL(cpufreq_get_policy);
1872 1872
1873 /* 1873 /*
1874 * data : current policy. 1874 * data : current policy.
1875 * policy : policy to be set. 1875 * policy : policy to be set.
1876 */ 1876 */
1877 static int __cpufreq_set_policy(struct cpufreq_policy *data, 1877 static int __cpufreq_set_policy(struct cpufreq_policy *data,
1878 struct cpufreq_policy *policy) 1878 struct cpufreq_policy *policy)
1879 { 1879 {
1880 int ret = 0, failed = 1; 1880 int ret = 0, failed = 1;
1881 1881
1882 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu, 1882 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
1883 policy->min, policy->max); 1883 policy->min, policy->max);
1884 1884
1885 memcpy(&policy->cpuinfo, &data->cpuinfo, 1885 memcpy(&policy->cpuinfo, &data->cpuinfo,
1886 sizeof(struct cpufreq_cpuinfo)); 1886 sizeof(struct cpufreq_cpuinfo));
1887 1887
1888 if (policy->min > data->max || policy->max < data->min) { 1888 if (policy->min > data->max || policy->max < data->min) {
1889 ret = -EINVAL; 1889 ret = -EINVAL;
1890 goto error_out; 1890 goto error_out;
1891 } 1891 }
1892 1892
1893 /* verify the cpu speed can be set within this limit */ 1893 /* verify the cpu speed can be set within this limit */
1894 ret = cpufreq_driver->verify(policy); 1894 ret = cpufreq_driver->verify(policy);
1895 if (ret) 1895 if (ret)
1896 goto error_out; 1896 goto error_out;
1897 1897
1898 /* adjust if necessary - all reasons */ 1898 /* adjust if necessary - all reasons */
1899 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1899 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1900 CPUFREQ_ADJUST, policy); 1900 CPUFREQ_ADJUST, policy);
1901 1901
1902 /* adjust if necessary - hardware incompatibility*/ 1902 /* adjust if necessary - hardware incompatibility*/
1903 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1903 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1904 CPUFREQ_INCOMPATIBLE, policy); 1904 CPUFREQ_INCOMPATIBLE, policy);
1905 1905
1906 /* 1906 /*
1907 * verify the cpu speed can be set within this limit, which might be 1907 * verify the cpu speed can be set within this limit, which might be
1908 * different to the first one 1908 * different to the first one
1909 */ 1909 */
1910 ret = cpufreq_driver->verify(policy); 1910 ret = cpufreq_driver->verify(policy);
1911 if (ret) 1911 if (ret)
1912 goto error_out; 1912 goto error_out;
1913 1913
1914 /* notification of the new policy */ 1914 /* notification of the new policy */
1915 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1915 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1916 CPUFREQ_NOTIFY, policy); 1916 CPUFREQ_NOTIFY, policy);
1917 1917
1918 data->min = policy->min; 1918 data->min = policy->min;
1919 data->max = policy->max; 1919 data->max = policy->max;
1920 1920
1921 pr_debug("new min and max freqs are %u - %u kHz\n", 1921 pr_debug("new min and max freqs are %u - %u kHz\n",
1922 data->min, data->max); 1922 data->min, data->max);
1923 1923
1924 if (cpufreq_driver->setpolicy) { 1924 if (cpufreq_driver->setpolicy) {
1925 data->policy = policy->policy; 1925 data->policy = policy->policy;
1926 pr_debug("setting range\n"); 1926 pr_debug("setting range\n");
1927 ret = cpufreq_driver->setpolicy(policy); 1927 ret = cpufreq_driver->setpolicy(policy);
1928 } else { 1928 } else {
1929 if (policy->governor != data->governor) { 1929 if (policy->governor != data->governor) {
1930 /* save old, working values */ 1930 /* save old, working values */
1931 struct cpufreq_governor *old_gov = data->governor; 1931 struct cpufreq_governor *old_gov = data->governor;
1932 1932
1933 pr_debug("governor switch\n"); 1933 pr_debug("governor switch\n");
1934 1934
1935 /* end old governor */ 1935 /* end old governor */
1936 if (data->governor) { 1936 if (data->governor) {
1937 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 1937 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1938 unlock_policy_rwsem_write(policy->cpu); 1938 unlock_policy_rwsem_write(policy->cpu);
1939 __cpufreq_governor(data, 1939 __cpufreq_governor(data,
1940 CPUFREQ_GOV_POLICY_EXIT); 1940 CPUFREQ_GOV_POLICY_EXIT);
1941 lock_policy_rwsem_write(policy->cpu); 1941 lock_policy_rwsem_write(policy->cpu);
1942 } 1942 }
1943 1943
1944 /* start new governor */ 1944 /* start new governor */
1945 data->governor = policy->governor; 1945 data->governor = policy->governor;
1946 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) { 1946 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1947 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) { 1947 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1948 failed = 0; 1948 failed = 0;
1949 } else { 1949 } else {
1950 unlock_policy_rwsem_write(policy->cpu); 1950 unlock_policy_rwsem_write(policy->cpu);
1951 __cpufreq_governor(data, 1951 __cpufreq_governor(data,
1952 CPUFREQ_GOV_POLICY_EXIT); 1952 CPUFREQ_GOV_POLICY_EXIT);
1953 lock_policy_rwsem_write(policy->cpu); 1953 lock_policy_rwsem_write(policy->cpu);
1954 } 1954 }
1955 } 1955 }
1956 1956
1957 if (failed) { 1957 if (failed) {
1958 /* new governor failed, so re-start old one */ 1958 /* new governor failed, so re-start old one */
1959 pr_debug("starting governor %s failed\n", 1959 pr_debug("starting governor %s failed\n",
1960 data->governor->name); 1960 data->governor->name);
1961 if (old_gov) { 1961 if (old_gov) {
1962 data->governor = old_gov; 1962 data->governor = old_gov;
1963 __cpufreq_governor(data, 1963 __cpufreq_governor(data,
1964 CPUFREQ_GOV_POLICY_INIT); 1964 CPUFREQ_GOV_POLICY_INIT);
1965 __cpufreq_governor(data, 1965 __cpufreq_governor(data,
1966 CPUFREQ_GOV_START); 1966 CPUFREQ_GOV_START);
1967 } 1967 }
1968 ret = -EINVAL; 1968 ret = -EINVAL;
1969 goto error_out; 1969 goto error_out;
1970 } 1970 }
1971 /* might be a policy change, too, so fall through */ 1971 /* might be a policy change, too, so fall through */
1972 } 1972 }
1973 pr_debug("governor: change or update limits\n"); 1973 pr_debug("governor: change or update limits\n");
1974 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); 1974 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1975 } 1975 }
1976 1976
1977 error_out: 1977 error_out:
1978 return ret; 1978 return ret;
1979 } 1979 }
1980 1980
1981 /** 1981 /**
1982 * cpufreq_update_policy - re-evaluate an existing cpufreq policy 1982 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1983 * @cpu: CPU which shall be re-evaluated 1983 * @cpu: CPU which shall be re-evaluated
1984 * 1984 *
1985 * Useful for policy notifiers which have different necessities 1985 * Useful for policy notifiers which have different necessities
1986 * at different times. 1986 * at different times.
1987 */ 1987 */
1988 int cpufreq_update_policy(unsigned int cpu) 1988 int cpufreq_update_policy(unsigned int cpu)
1989 { 1989 {
1990 struct cpufreq_policy *data = cpufreq_cpu_get(cpu); 1990 struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
1991 struct cpufreq_policy policy; 1991 struct cpufreq_policy policy;
1992 int ret; 1992 int ret;
1993 1993
1994 if (!data) { 1994 if (!data) {
1995 ret = -ENODEV; 1995 ret = -ENODEV;
1996 goto no_policy; 1996 goto no_policy;
1997 } 1997 }
1998 1998
1999 if (unlikely(lock_policy_rwsem_write(cpu))) { 1999 if (unlikely(lock_policy_rwsem_write(cpu))) {
2000 ret = -EINVAL; 2000 ret = -EINVAL;
2001 goto fail; 2001 goto fail;
2002 } 2002 }
2003 2003
2004 pr_debug("updating policy for CPU %u\n", cpu); 2004 pr_debug("updating policy for CPU %u\n", cpu);
2005 memcpy(&policy, data, sizeof(struct cpufreq_policy)); 2005 memcpy(&policy, data, sizeof(struct cpufreq_policy));
2006 policy.min = data->user_policy.min; 2006 policy.min = data->user_policy.min;
2007 policy.max = data->user_policy.max; 2007 policy.max = data->user_policy.max;
2008 policy.policy = data->user_policy.policy; 2008 policy.policy = data->user_policy.policy;
2009 policy.governor = data->user_policy.governor; 2009 policy.governor = data->user_policy.governor;
2010 2010
2011 /* 2011 /*
2012 * BIOS might change freq behind our back 2012 * BIOS might change freq behind our back
2013 * -> ask driver for current freq and notify governors about a change 2013 * -> ask driver for current freq and notify governors about a change
2014 */ 2014 */
2015 if (cpufreq_driver->get) { 2015 if (cpufreq_driver->get) {
2016 policy.cur = cpufreq_driver->get(cpu); 2016 policy.cur = cpufreq_driver->get(cpu);
2017 if (!data->cur) { 2017 if (!data->cur) {
2018 pr_debug("Driver did not initialize current freq"); 2018 pr_debug("Driver did not initialize current freq");
2019 data->cur = policy.cur; 2019 data->cur = policy.cur;
2020 } else { 2020 } else {
2021 if (data->cur != policy.cur && cpufreq_driver->target) 2021 if (data->cur != policy.cur && cpufreq_driver->target)
2022 cpufreq_out_of_sync(cpu, data->cur, 2022 cpufreq_out_of_sync(cpu, data->cur,
2023 policy.cur); 2023 policy.cur);
2024 } 2024 }
2025 } 2025 }
2026 2026
2027 ret = __cpufreq_set_policy(data, &policy); 2027 ret = __cpufreq_set_policy(data, &policy);
2028 2028
2029 unlock_policy_rwsem_write(cpu); 2029 unlock_policy_rwsem_write(cpu);
2030 2030
2031 fail: 2031 fail:
2032 cpufreq_cpu_put(data); 2032 cpufreq_cpu_put(data);
2033 no_policy: 2033 no_policy:
2034 return ret; 2034 return ret;
2035 } 2035 }
2036 EXPORT_SYMBOL(cpufreq_update_policy); 2036 EXPORT_SYMBOL(cpufreq_update_policy);
2037 2037
2038 static int cpufreq_cpu_callback(struct notifier_block *nfb, 2038 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2039 unsigned long action, void *hcpu) 2039 unsigned long action, void *hcpu)
2040 { 2040 {
2041 unsigned int cpu = (unsigned long)hcpu; 2041 unsigned int cpu = (unsigned long)hcpu;
2042 struct device *dev; 2042 struct device *dev;
2043 bool frozen = false;
2043 2044
2044 dev = get_cpu_device(cpu); 2045 dev = get_cpu_device(cpu);
2045 if (dev) { 2046 if (dev) {
2046 switch (action) { 2047
2048 if (action & CPU_TASKS_FROZEN)
2049 frozen = true;
2050
2051 switch (action & ~CPU_TASKS_FROZEN) {
2047 case CPU_ONLINE: 2052 case CPU_ONLINE:
2048 case CPU_ONLINE_FROZEN: 2053 __cpufreq_add_dev(dev, NULL, frozen);
2049 cpufreq_add_dev(dev, NULL);
2050 cpufreq_update_policy(cpu); 2054 cpufreq_update_policy(cpu);
2051 break; 2055 break;
2056
2052 case CPU_DOWN_PREPARE: 2057 case CPU_DOWN_PREPARE:
2053 case CPU_DOWN_PREPARE_FROZEN: 2058 __cpufreq_remove_dev(dev, NULL, frozen);
2054 __cpufreq_remove_dev(dev, NULL, false);
2055 break; 2059 break;
2060
2056 case CPU_DOWN_FAILED: 2061 case CPU_DOWN_FAILED:
2057 case CPU_DOWN_FAILED_FROZEN: 2062 __cpufreq_add_dev(dev, NULL, frozen);
2058 cpufreq_add_dev(dev, NULL);
2059 break; 2063 break;
2060 } 2064 }
2061 } 2065 }
2062 return NOTIFY_OK; 2066 return NOTIFY_OK;
2063 } 2067 }
2064 2068
2065 static struct notifier_block __refdata cpufreq_cpu_notifier = { 2069 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2066 .notifier_call = cpufreq_cpu_callback, 2070 .notifier_call = cpufreq_cpu_callback,
2067 }; 2071 };
2068 2072
2069 /********************************************************************* 2073 /*********************************************************************
2070 * REGISTER / UNREGISTER CPUFREQ DRIVER * 2074 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2071 *********************************************************************/ 2075 *********************************************************************/
2072 2076
2073 /** 2077 /**
2074 * cpufreq_register_driver - register a CPU Frequency driver 2078 * cpufreq_register_driver - register a CPU Frequency driver
2075 * @driver_data: A struct cpufreq_driver containing the values# 2079 * @driver_data: A struct cpufreq_driver containing the values#
2076 * submitted by the CPU Frequency driver. 2080 * submitted by the CPU Frequency driver.
2077 * 2081 *
2078 * Registers a CPU Frequency driver to this core code. This code 2082 * Registers a CPU Frequency driver to this core code. This code
2079 * returns zero on success, -EBUSY when another driver got here first 2083 * returns zero on success, -EBUSY when another driver got here first
2080 * (and isn't unregistered in the meantime). 2084 * (and isn't unregistered in the meantime).
2081 * 2085 *
2082 */ 2086 */
2083 int cpufreq_register_driver(struct cpufreq_driver *driver_data) 2087 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2084 { 2088 {
2085 unsigned long flags; 2089 unsigned long flags;
2086 int ret; 2090 int ret;
2087 2091
2088 if (cpufreq_disabled()) 2092 if (cpufreq_disabled())
2089 return -ENODEV; 2093 return -ENODEV;
2090 2094
2091 if (!driver_data || !driver_data->verify || !driver_data->init || 2095 if (!driver_data || !driver_data->verify || !driver_data->init ||
2092 ((!driver_data->setpolicy) && (!driver_data->target))) 2096 ((!driver_data->setpolicy) && (!driver_data->target)))
2093 return -EINVAL; 2097 return -EINVAL;
2094 2098
2095 pr_debug("trying to register driver %s\n", driver_data->name); 2099 pr_debug("trying to register driver %s\n", driver_data->name);
2096 2100
2097 if (driver_data->setpolicy) 2101 if (driver_data->setpolicy)
2098 driver_data->flags |= CPUFREQ_CONST_LOOPS; 2102 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2099 2103
2100 write_lock_irqsave(&cpufreq_driver_lock, flags); 2104 write_lock_irqsave(&cpufreq_driver_lock, flags);
2101 if (cpufreq_driver) { 2105 if (cpufreq_driver) {
2102 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2106 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2103 return -EBUSY; 2107 return -EBUSY;
2104 } 2108 }
2105 cpufreq_driver = driver_data; 2109 cpufreq_driver = driver_data;
2106 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2110 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2107 2111
2108 ret = subsys_interface_register(&cpufreq_interface); 2112 ret = subsys_interface_register(&cpufreq_interface);
2109 if (ret) 2113 if (ret)
2110 goto err_null_driver; 2114 goto err_null_driver;
2111 2115
2112 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) { 2116 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2113 int i; 2117 int i;
2114 ret = -ENODEV; 2118 ret = -ENODEV;
2115 2119
2116 /* check for at least one working CPU */ 2120 /* check for at least one working CPU */
2117 for (i = 0; i < nr_cpu_ids; i++) 2121 for (i = 0; i < nr_cpu_ids; i++)
2118 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) { 2122 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2119 ret = 0; 2123 ret = 0;
2120 break; 2124 break;
2121 } 2125 }
2122 2126
2123 /* if all ->init() calls failed, unregister */ 2127 /* if all ->init() calls failed, unregister */
2124 if (ret) { 2128 if (ret) {
2125 pr_debug("no CPU initialized for driver %s\n", 2129 pr_debug("no CPU initialized for driver %s\n",
2126 driver_data->name); 2130 driver_data->name);
2127 goto err_if_unreg; 2131 goto err_if_unreg;
2128 } 2132 }
2129 } 2133 }
2130 2134
2131 register_hotcpu_notifier(&cpufreq_cpu_notifier); 2135 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2132 pr_debug("driver %s up and running\n", driver_data->name); 2136 pr_debug("driver %s up and running\n", driver_data->name);
2133 2137
2134 return 0; 2138 return 0;
2135 err_if_unreg: 2139 err_if_unreg:
2136 subsys_interface_unregister(&cpufreq_interface); 2140 subsys_interface_unregister(&cpufreq_interface);
2137 err_null_driver: 2141 err_null_driver:
2138 write_lock_irqsave(&cpufreq_driver_lock, flags); 2142 write_lock_irqsave(&cpufreq_driver_lock, flags);
2139 cpufreq_driver = NULL; 2143 cpufreq_driver = NULL;
2140 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2144 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2141 return ret; 2145 return ret;
2142 } 2146 }
2143 EXPORT_SYMBOL_GPL(cpufreq_register_driver); 2147 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2144 2148
2145 /** 2149 /**
2146 * cpufreq_unregister_driver - unregister the current CPUFreq driver 2150 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2147 * 2151 *
2148 * Unregister the current CPUFreq driver. Only call this if you have 2152 * Unregister the current CPUFreq driver. Only call this if you have
2149 * the right to do so, i.e. if you have succeeded in initialising before! 2153 * the right to do so, i.e. if you have succeeded in initialising before!
2150 * Returns zero if successful, and -EINVAL if the cpufreq_driver is 2154 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2151 * currently not initialised. 2155 * currently not initialised.
2152 */ 2156 */
2153 int cpufreq_unregister_driver(struct cpufreq_driver *driver) 2157 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2154 { 2158 {
2155 unsigned long flags; 2159 unsigned long flags;
2156 2160
2157 if (!cpufreq_driver || (driver != cpufreq_driver)) 2161 if (!cpufreq_driver || (driver != cpufreq_driver))
2158 return -EINVAL; 2162 return -EINVAL;
2159 2163
2160 pr_debug("unregistering driver %s\n", driver->name); 2164 pr_debug("unregistering driver %s\n", driver->name);
2161 2165
2162 subsys_interface_unregister(&cpufreq_interface); 2166 subsys_interface_unregister(&cpufreq_interface);
2163 unregister_hotcpu_notifier(&cpufreq_cpu_notifier); 2167 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2164 2168
2165 write_lock_irqsave(&cpufreq_driver_lock, flags); 2169 write_lock_irqsave(&cpufreq_driver_lock, flags);
2166 cpufreq_driver = NULL; 2170 cpufreq_driver = NULL;
2167 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 2171 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2168 2172
2169 return 0; 2173 return 0;
2170 } 2174 }
2171 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); 2175 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2172 2176
2173 static int __init cpufreq_core_init(void) 2177 static int __init cpufreq_core_init(void)
2174 { 2178 {
2175 int cpu; 2179 int cpu;
2176 2180
2177 if (cpufreq_disabled()) 2181 if (cpufreq_disabled())
2178 return -ENODEV; 2182 return -ENODEV;
2179 2183
2180 for_each_possible_cpu(cpu) { 2184 for_each_possible_cpu(cpu) {
2181 per_cpu(cpufreq_policy_cpu, cpu) = -1; 2185 per_cpu(cpufreq_policy_cpu, cpu) = -1;
2182 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); 2186 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2183 } 2187 }
2184 2188
2185 cpufreq_global_kobject = kobject_create(); 2189 cpufreq_global_kobject = kobject_create();
2186 BUG_ON(!cpufreq_global_kobject); 2190 BUG_ON(!cpufreq_global_kobject);
2187 register_syscore_ops(&cpufreq_syscore_ops); 2191 register_syscore_ops(&cpufreq_syscore_ops);
2188 2192
2189 return 0; 2193 return 0;
drivers/cpufreq/cpufreq_stats.c
1 /* 1 /*
2 * drivers/cpufreq/cpufreq_stats.c 2 * drivers/cpufreq/cpufreq_stats.c
3 * 3 *
4 * Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. 4 * Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
5 * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>. 5 * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12 #include <linux/kernel.h> 12 #include <linux/kernel.h>
13 #include <linux/slab.h> 13 #include <linux/slab.h>
14 #include <linux/cpu.h> 14 #include <linux/cpu.h>
15 #include <linux/sysfs.h> 15 #include <linux/sysfs.h>
16 #include <linux/cpufreq.h> 16 #include <linux/cpufreq.h>
17 #include <linux/module.h> 17 #include <linux/module.h>
18 #include <linux/jiffies.h> 18 #include <linux/jiffies.h>
19 #include <linux/percpu.h> 19 #include <linux/percpu.h>
20 #include <linux/kobject.h> 20 #include <linux/kobject.h>
21 #include <linux/spinlock.h> 21 #include <linux/spinlock.h>
22 #include <linux/notifier.h> 22 #include <linux/notifier.h>
23 #include <asm/cputime.h> 23 #include <asm/cputime.h>
24 24
25 static spinlock_t cpufreq_stats_lock; 25 static spinlock_t cpufreq_stats_lock;
26 26
27 struct cpufreq_stats { 27 struct cpufreq_stats {
28 unsigned int cpu; 28 unsigned int cpu;
29 unsigned int total_trans; 29 unsigned int total_trans;
30 unsigned long long last_time; 30 unsigned long long last_time;
31 unsigned int max_state; 31 unsigned int max_state;
32 unsigned int state_num; 32 unsigned int state_num;
33 unsigned int last_index; 33 unsigned int last_index;
34 u64 *time_in_state; 34 u64 *time_in_state;
35 unsigned int *freq_table; 35 unsigned int *freq_table;
36 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS 36 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
37 unsigned int *trans_table; 37 unsigned int *trans_table;
38 #endif 38 #endif
39 }; 39 };
40 40
41 static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table); 41 static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
42 42
43 struct cpufreq_stats_attribute { 43 struct cpufreq_stats_attribute {
44 struct attribute attr; 44 struct attribute attr;
45 ssize_t(*show) (struct cpufreq_stats *, char *); 45 ssize_t(*show) (struct cpufreq_stats *, char *);
46 }; 46 };
47 47
48 static int cpufreq_stats_update(unsigned int cpu) 48 static int cpufreq_stats_update(unsigned int cpu)
49 { 49 {
50 struct cpufreq_stats *stat; 50 struct cpufreq_stats *stat;
51 unsigned long long cur_time; 51 unsigned long long cur_time;
52 52
53 cur_time = get_jiffies_64(); 53 cur_time = get_jiffies_64();
54 spin_lock(&cpufreq_stats_lock); 54 spin_lock(&cpufreq_stats_lock);
55 stat = per_cpu(cpufreq_stats_table, cpu); 55 stat = per_cpu(cpufreq_stats_table, cpu);
56 if (stat->time_in_state) 56 if (stat->time_in_state)
57 stat->time_in_state[stat->last_index] += 57 stat->time_in_state[stat->last_index] +=
58 cur_time - stat->last_time; 58 cur_time - stat->last_time;
59 stat->last_time = cur_time; 59 stat->last_time = cur_time;
60 spin_unlock(&cpufreq_stats_lock); 60 spin_unlock(&cpufreq_stats_lock);
61 return 0; 61 return 0;
62 } 62 }
63 63
64 static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf) 64 static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
65 { 65 {
66 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); 66 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
67 if (!stat) 67 if (!stat)
68 return 0; 68 return 0;
69 return sprintf(buf, "%d\n", 69 return sprintf(buf, "%d\n",
70 per_cpu(cpufreq_stats_table, stat->cpu)->total_trans); 70 per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
71 } 71 }
72 72
73 static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) 73 static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
74 { 74 {
75 ssize_t len = 0; 75 ssize_t len = 0;
76 int i; 76 int i;
77 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); 77 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
78 if (!stat) 78 if (!stat)
79 return 0; 79 return 0;
80 cpufreq_stats_update(stat->cpu); 80 cpufreq_stats_update(stat->cpu);
81 for (i = 0; i < stat->state_num; i++) { 81 for (i = 0; i < stat->state_num; i++) {
82 len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i], 82 len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
83 (unsigned long long) 83 (unsigned long long)
84 cputime64_to_clock_t(stat->time_in_state[i])); 84 cputime64_to_clock_t(stat->time_in_state[i]));
85 } 85 }
86 return len; 86 return len;
87 } 87 }
88 88
89 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS 89 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
90 static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) 90 static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
91 { 91 {
92 ssize_t len = 0; 92 ssize_t len = 0;
93 int i, j; 93 int i, j;
94 94
95 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu); 95 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
96 if (!stat) 96 if (!stat)
97 return 0; 97 return 0;
98 cpufreq_stats_update(stat->cpu); 98 cpufreq_stats_update(stat->cpu);
99 len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n"); 99 len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
100 len += snprintf(buf + len, PAGE_SIZE - len, " : "); 100 len += snprintf(buf + len, PAGE_SIZE - len, " : ");
101 for (i = 0; i < stat->state_num; i++) { 101 for (i = 0; i < stat->state_num; i++) {
102 if (len >= PAGE_SIZE) 102 if (len >= PAGE_SIZE)
103 break; 103 break;
104 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ", 104 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
105 stat->freq_table[i]); 105 stat->freq_table[i]);
106 } 106 }
107 if (len >= PAGE_SIZE) 107 if (len >= PAGE_SIZE)
108 return PAGE_SIZE; 108 return PAGE_SIZE;
109 109
110 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 110 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
111 111
112 for (i = 0; i < stat->state_num; i++) { 112 for (i = 0; i < stat->state_num; i++) {
113 if (len >= PAGE_SIZE) 113 if (len >= PAGE_SIZE)
114 break; 114 break;
115 115
116 len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ", 116 len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
117 stat->freq_table[i]); 117 stat->freq_table[i]);
118 118
119 for (j = 0; j < stat->state_num; j++) { 119 for (j = 0; j < stat->state_num; j++) {
120 if (len >= PAGE_SIZE) 120 if (len >= PAGE_SIZE)
121 break; 121 break;
122 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ", 122 len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
123 stat->trans_table[i*stat->max_state+j]); 123 stat->trans_table[i*stat->max_state+j]);
124 } 124 }
125 if (len >= PAGE_SIZE) 125 if (len >= PAGE_SIZE)
126 break; 126 break;
127 len += snprintf(buf + len, PAGE_SIZE - len, "\n"); 127 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
128 } 128 }
129 if (len >= PAGE_SIZE) 129 if (len >= PAGE_SIZE)
130 return PAGE_SIZE; 130 return PAGE_SIZE;
131 return len; 131 return len;
132 } 132 }
133 cpufreq_freq_attr_ro(trans_table); 133 cpufreq_freq_attr_ro(trans_table);
134 #endif 134 #endif
135 135
136 cpufreq_freq_attr_ro(total_trans); 136 cpufreq_freq_attr_ro(total_trans);
137 cpufreq_freq_attr_ro(time_in_state); 137 cpufreq_freq_attr_ro(time_in_state);
138 138
139 static struct attribute *default_attrs[] = { 139 static struct attribute *default_attrs[] = {
140 &total_trans.attr, 140 &total_trans.attr,
141 &time_in_state.attr, 141 &time_in_state.attr,
142 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS 142 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
143 &trans_table.attr, 143 &trans_table.attr,
144 #endif 144 #endif
145 NULL 145 NULL
146 }; 146 };
147 static struct attribute_group stats_attr_group = { 147 static struct attribute_group stats_attr_group = {
148 .attrs = default_attrs, 148 .attrs = default_attrs,
149 .name = "stats" 149 .name = "stats"
150 }; 150 };
151 151
152 static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq) 152 static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
153 { 153 {
154 int index; 154 int index;
155 for (index = 0; index < stat->max_state; index++) 155 for (index = 0; index < stat->max_state; index++)
156 if (stat->freq_table[index] == freq) 156 if (stat->freq_table[index] == freq)
157 return index; 157 return index;
158 return -1; 158 return -1;
159 } 159 }
160 160
161 /* should be called late in the CPU removal sequence so that the stats 161 /* should be called late in the CPU removal sequence so that the stats
162 * memory is still available in case someone tries to use it. 162 * memory is still available in case someone tries to use it.
163 */ 163 */
164 static void cpufreq_stats_free_table(unsigned int cpu) 164 static void cpufreq_stats_free_table(unsigned int cpu)
165 { 165 {
166 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu); 166 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
167 167
168 if (stat) { 168 if (stat) {
169 pr_debug("%s: Free stat table\n", __func__); 169 pr_debug("%s: Free stat table\n", __func__);
170 kfree(stat->time_in_state); 170 kfree(stat->time_in_state);
171 kfree(stat); 171 kfree(stat);
172 per_cpu(cpufreq_stats_table, cpu) = NULL; 172 per_cpu(cpufreq_stats_table, cpu) = NULL;
173 } 173 }
174 } 174 }
175 175
176 /* must be called early in the CPU removal sequence (before 176 /* must be called early in the CPU removal sequence (before
177 * cpufreq_remove_dev) so that policy is still valid. 177 * cpufreq_remove_dev) so that policy is still valid.
178 */ 178 */
179 static void cpufreq_stats_free_sysfs(unsigned int cpu) 179 static void cpufreq_stats_free_sysfs(unsigned int cpu)
180 { 180 {
181 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 181 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
182 182
183 if (!policy) 183 if (!policy)
184 return; 184 return;
185 185
186 if (!cpufreq_frequency_get_table(cpu)) 186 if (!cpufreq_frequency_get_table(cpu))
187 goto put_ref; 187 goto put_ref;
188 188
189 if (!policy_is_shared(policy)) { 189 if (!policy_is_shared(policy)) {
190 pr_debug("%s: Free sysfs stat\n", __func__); 190 pr_debug("%s: Free sysfs stat\n", __func__);
191 sysfs_remove_group(&policy->kobj, &stats_attr_group); 191 sysfs_remove_group(&policy->kobj, &stats_attr_group);
192 } 192 }
193 193
194 put_ref: 194 put_ref:
195 cpufreq_cpu_put(policy); 195 cpufreq_cpu_put(policy);
196 } 196 }
197 197
198 static int cpufreq_stats_create_table(struct cpufreq_policy *policy, 198 static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
199 struct cpufreq_frequency_table *table) 199 struct cpufreq_frequency_table *table)
200 { 200 {
201 unsigned int i, j, count = 0, ret = 0; 201 unsigned int i, j, count = 0, ret = 0;
202 struct cpufreq_stats *stat; 202 struct cpufreq_stats *stat;
203 struct cpufreq_policy *data; 203 struct cpufreq_policy *data;
204 unsigned int alloc_size; 204 unsigned int alloc_size;
205 unsigned int cpu = policy->cpu; 205 unsigned int cpu = policy->cpu;
206 if (per_cpu(cpufreq_stats_table, cpu)) 206 if (per_cpu(cpufreq_stats_table, cpu))
207 return -EBUSY; 207 return -EBUSY;
208 stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL); 208 stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
209 if ((stat) == NULL) 209 if ((stat) == NULL)
210 return -ENOMEM; 210 return -ENOMEM;
211 211
212 data = cpufreq_cpu_get(cpu); 212 data = cpufreq_cpu_get(cpu);
213 if (data == NULL) { 213 if (data == NULL) {
214 ret = -EINVAL; 214 ret = -EINVAL;
215 goto error_get_fail; 215 goto error_get_fail;
216 } 216 }
217 217
218 ret = sysfs_create_group(&data->kobj, &stats_attr_group); 218 ret = sysfs_create_group(&data->kobj, &stats_attr_group);
219 if (ret) 219 if (ret)
220 goto error_out; 220 goto error_out;
221 221
222 stat->cpu = cpu; 222 stat->cpu = cpu;
223 per_cpu(cpufreq_stats_table, cpu) = stat; 223 per_cpu(cpufreq_stats_table, cpu) = stat;
224 224
225 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { 225 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
226 unsigned int freq = table[i].frequency; 226 unsigned int freq = table[i].frequency;
227 if (freq == CPUFREQ_ENTRY_INVALID) 227 if (freq == CPUFREQ_ENTRY_INVALID)
228 continue; 228 continue;
229 count++; 229 count++;
230 } 230 }
231 231
232 alloc_size = count * sizeof(int) + count * sizeof(u64); 232 alloc_size = count * sizeof(int) + count * sizeof(u64);
233 233
234 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS 234 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
235 alloc_size += count * count * sizeof(int); 235 alloc_size += count * count * sizeof(int);
236 #endif 236 #endif
237 stat->max_state = count; 237 stat->max_state = count;
238 stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL); 238 stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
239 if (!stat->time_in_state) { 239 if (!stat->time_in_state) {
240 ret = -ENOMEM; 240 ret = -ENOMEM;
241 goto error_out; 241 goto error_out;
242 } 242 }
243 stat->freq_table = (unsigned int *)(stat->time_in_state + count); 243 stat->freq_table = (unsigned int *)(stat->time_in_state + count);
244 244
245 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS 245 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
246 stat->trans_table = stat->freq_table + count; 246 stat->trans_table = stat->freq_table + count;
247 #endif 247 #endif
248 j = 0; 248 j = 0;
249 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { 249 for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
250 unsigned int freq = table[i].frequency; 250 unsigned int freq = table[i].frequency;
251 if (freq == CPUFREQ_ENTRY_INVALID) 251 if (freq == CPUFREQ_ENTRY_INVALID)
252 continue; 252 continue;
253 if (freq_table_get_index(stat, freq) == -1) 253 if (freq_table_get_index(stat, freq) == -1)
254 stat->freq_table[j++] = freq; 254 stat->freq_table[j++] = freq;
255 } 255 }
256 stat->state_num = j; 256 stat->state_num = j;
257 spin_lock(&cpufreq_stats_lock); 257 spin_lock(&cpufreq_stats_lock);
258 stat->last_time = get_jiffies_64(); 258 stat->last_time = get_jiffies_64();
259 stat->last_index = freq_table_get_index(stat, policy->cur); 259 stat->last_index = freq_table_get_index(stat, policy->cur);
260 spin_unlock(&cpufreq_stats_lock); 260 spin_unlock(&cpufreq_stats_lock);
261 cpufreq_cpu_put(data); 261 cpufreq_cpu_put(data);
262 return 0; 262 return 0;
263 error_out: 263 error_out:
264 cpufreq_cpu_put(data); 264 cpufreq_cpu_put(data);
265 error_get_fail: 265 error_get_fail:
266 kfree(stat); 266 kfree(stat);
267 per_cpu(cpufreq_stats_table, cpu) = NULL; 267 per_cpu(cpufreq_stats_table, cpu) = NULL;
268 return ret; 268 return ret;
269 } 269 }
270 270
271 static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy) 271 static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
272 { 272 {
273 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, 273 struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
274 policy->last_cpu); 274 policy->last_cpu);
275 275
276 pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n", 276 pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
277 policy->cpu, policy->last_cpu); 277 policy->cpu, policy->last_cpu);
278 per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table, 278 per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
279 policy->last_cpu); 279 policy->last_cpu);
280 per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL; 280 per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
281 stat->cpu = policy->cpu; 281 stat->cpu = policy->cpu;
282 } 282 }
283 283
284 static int cpufreq_stat_notifier_policy(struct notifier_block *nb, 284 static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
285 unsigned long val, void *data) 285 unsigned long val, void *data)
286 { 286 {
287 int ret; 287 int ret;
288 struct cpufreq_policy *policy = data; 288 struct cpufreq_policy *policy = data;
289 struct cpufreq_frequency_table *table; 289 struct cpufreq_frequency_table *table;
290 unsigned int cpu = policy->cpu; 290 unsigned int cpu = policy->cpu;
291 291
292 if (val == CPUFREQ_UPDATE_POLICY_CPU) { 292 if (val == CPUFREQ_UPDATE_POLICY_CPU) {
293 cpufreq_stats_update_policy_cpu(policy); 293 cpufreq_stats_update_policy_cpu(policy);
294 return 0; 294 return 0;
295 } 295 }
296 296
297 if (val != CPUFREQ_NOTIFY) 297 if (val != CPUFREQ_NOTIFY)
298 return 0; 298 return 0;
299 table = cpufreq_frequency_get_table(cpu); 299 table = cpufreq_frequency_get_table(cpu);
300 if (!table) 300 if (!table)
301 return 0; 301 return 0;
302 ret = cpufreq_stats_create_table(policy, table); 302 ret = cpufreq_stats_create_table(policy, table);
303 if (ret) 303 if (ret)
304 return ret; 304 return ret;
305 return 0; 305 return 0;
306 } 306 }
307 307
308 static int cpufreq_stat_notifier_trans(struct notifier_block *nb, 308 static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
309 unsigned long val, void *data) 309 unsigned long val, void *data)
310 { 310 {
311 struct cpufreq_freqs *freq = data; 311 struct cpufreq_freqs *freq = data;
312 struct cpufreq_stats *stat; 312 struct cpufreq_stats *stat;
313 int old_index, new_index; 313 int old_index, new_index;
314 314
315 if (val != CPUFREQ_POSTCHANGE) 315 if (val != CPUFREQ_POSTCHANGE)
316 return 0; 316 return 0;
317 317
318 stat = per_cpu(cpufreq_stats_table, freq->cpu); 318 stat = per_cpu(cpufreq_stats_table, freq->cpu);
319 if (!stat) 319 if (!stat)
320 return 0; 320 return 0;
321 321
322 old_index = stat->last_index; 322 old_index = stat->last_index;
323 new_index = freq_table_get_index(stat, freq->new); 323 new_index = freq_table_get_index(stat, freq->new);
324 324
325 /* We can't do stat->time_in_state[-1]= .. */ 325 /* We can't do stat->time_in_state[-1]= .. */
326 if (old_index == -1 || new_index == -1) 326 if (old_index == -1 || new_index == -1)
327 return 0; 327 return 0;
328 328
329 cpufreq_stats_update(freq->cpu); 329 cpufreq_stats_update(freq->cpu);
330 330
331 if (old_index == new_index) 331 if (old_index == new_index)
332 return 0; 332 return 0;
333 333
334 spin_lock(&cpufreq_stats_lock); 334 spin_lock(&cpufreq_stats_lock);
335 stat->last_index = new_index; 335 stat->last_index = new_index;
336 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS 336 #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
337 stat->trans_table[old_index * stat->max_state + new_index]++; 337 stat->trans_table[old_index * stat->max_state + new_index]++;
338 #endif 338 #endif
339 stat->total_trans++; 339 stat->total_trans++;
340 spin_unlock(&cpufreq_stats_lock); 340 spin_unlock(&cpufreq_stats_lock);
341 return 0; 341 return 0;
342 } 342 }
343 343
344 static int cpufreq_stat_cpu_callback(struct notifier_block *nfb, 344 static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
345 unsigned long action, 345 unsigned long action,
346 void *hcpu) 346 void *hcpu)
347 { 347 {
348 unsigned int cpu = (unsigned long)hcpu; 348 unsigned int cpu = (unsigned long)hcpu;
349 349
350 switch (action) { 350 switch (action) {
351 case CPU_DOWN_PREPARE: 351 case CPU_DOWN_PREPARE:
352 case CPU_DOWN_PREPARE_FROZEN:
353 cpufreq_stats_free_sysfs(cpu); 352 cpufreq_stats_free_sysfs(cpu);
354 break; 353 break;
355 case CPU_DEAD: 354 case CPU_DEAD:
356 case CPU_DEAD_FROZEN:
357 cpufreq_stats_free_table(cpu); 355 cpufreq_stats_free_table(cpu);
358 break; 356 break;
359 } 357 }
360 return NOTIFY_OK; 358 return NOTIFY_OK;
361 } 359 }
362 360
363 /* priority=1 so this will get called before cpufreq_remove_dev */ 361 /* priority=1 so this will get called before cpufreq_remove_dev */
364 static struct notifier_block cpufreq_stat_cpu_notifier __refdata = { 362 static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
365 .notifier_call = cpufreq_stat_cpu_callback, 363 .notifier_call = cpufreq_stat_cpu_callback,
366 .priority = 1, 364 .priority = 1,
367 }; 365 };
368 366
369 static struct notifier_block notifier_policy_block = { 367 static struct notifier_block notifier_policy_block = {
370 .notifier_call = cpufreq_stat_notifier_policy 368 .notifier_call = cpufreq_stat_notifier_policy
371 }; 369 };
372 370
373 static struct notifier_block notifier_trans_block = { 371 static struct notifier_block notifier_trans_block = {
374 .notifier_call = cpufreq_stat_notifier_trans 372 .notifier_call = cpufreq_stat_notifier_trans
375 }; 373 };
376 374
377 static int __init cpufreq_stats_init(void) 375 static int __init cpufreq_stats_init(void)
378 { 376 {
379 int ret; 377 int ret;
380 unsigned int cpu; 378 unsigned int cpu;
381 379
382 spin_lock_init(&cpufreq_stats_lock); 380 spin_lock_init(&cpufreq_stats_lock);
383 ret = cpufreq_register_notifier(&notifier_policy_block, 381 ret = cpufreq_register_notifier(&notifier_policy_block,
384 CPUFREQ_POLICY_NOTIFIER); 382 CPUFREQ_POLICY_NOTIFIER);
385 if (ret) 383 if (ret)
386 return ret; 384 return ret;
387 385
388 register_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 386 register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
389 387
390 ret = cpufreq_register_notifier(&notifier_trans_block, 388 ret = cpufreq_register_notifier(&notifier_trans_block,
391 CPUFREQ_TRANSITION_NOTIFIER); 389 CPUFREQ_TRANSITION_NOTIFIER);
392 if (ret) { 390 if (ret) {
393 cpufreq_unregister_notifier(&notifier_policy_block, 391 cpufreq_unregister_notifier(&notifier_policy_block,
394 CPUFREQ_POLICY_NOTIFIER); 392 CPUFREQ_POLICY_NOTIFIER);
395 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 393 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
396 for_each_online_cpu(cpu) 394 for_each_online_cpu(cpu)
397 cpufreq_stats_free_table(cpu); 395 cpufreq_stats_free_table(cpu);
398 return ret; 396 return ret;
399 } 397 }
400 398
401 return 0; 399 return 0;
402 } 400 }
403 static void __exit cpufreq_stats_exit(void) 401 static void __exit cpufreq_stats_exit(void)
404 { 402 {
405 unsigned int cpu; 403 unsigned int cpu;
406 404
407 cpufreq_unregister_notifier(&notifier_policy_block, 405 cpufreq_unregister_notifier(&notifier_policy_block,
408 CPUFREQ_POLICY_NOTIFIER); 406 CPUFREQ_POLICY_NOTIFIER);
409 cpufreq_unregister_notifier(&notifier_trans_block, 407 cpufreq_unregister_notifier(&notifier_trans_block,
410 CPUFREQ_TRANSITION_NOTIFIER); 408 CPUFREQ_TRANSITION_NOTIFIER);
411 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 409 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
412 for_each_online_cpu(cpu) { 410 for_each_online_cpu(cpu) {
413 cpufreq_stats_free_table(cpu); 411 cpufreq_stats_free_table(cpu);
414 cpufreq_stats_free_sysfs(cpu); 412 cpufreq_stats_free_sysfs(cpu);
415 } 413 }
416 } 414 }
417 415
418 MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>"); 416 MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
419 MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats " 417 MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
420 "through sysfs filesystem"); 418 "through sysfs filesystem");
421 MODULE_LICENSE("GPL"); 419 MODULE_LICENSE("GPL");
422 420
423 module_init(cpufreq_stats_init); 421 module_init(cpufreq_stats_init);
424 module_exit(cpufreq_stats_exit); 422 module_exit(cpufreq_stats_exit);
425 423