Commit defa4c738aa90e29e91eff43b0c1b3198367ce9c
Committed by
Rafael J. Wysocki
1 parent
a262e94cdc
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
cpufreq: powerpc: Add cpufreq driver for Freescale e500mc SoCs
Add cpufreq driver for Freescale e500mc, e5500 and e6500 SoCs which are capable of changing the CPU frequency dynamically Signed-off-by: Tang Yuantian <Yuantian.Tang@freescale.com> Signed-off-by: Li Yang <leoli@freescale.com> Acked-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Showing 3 changed files with 391 additions and 0 deletions Side-by-side Diff
drivers/cpufreq/Kconfig.powerpc
... | ... | @@ -23,4 +23,14 @@ |
23 | 23 | help |
24 | 24 | This adds support for frequency switching on Maple 970FX |
25 | 25 | Evaluation Board and compatible boards (IBM JS2x blades). |
26 | + | |
27 | +config PPC_CORENET_CPUFREQ | |
28 | + tristate "CPU frequency scaling driver for Freescale E500MC SoCs" | |
29 | + depends on PPC_E500MC && OF && COMMON_CLK | |
30 | + select CPU_FREQ_TABLE | |
31 | + select CLK_PPC_CORENET | |
32 | + help | |
33 | + This adds the CPUFreq driver support for Freescale e500mc, | |
34 | + e5500 and e6500 series SoCs which are capable of changing | |
35 | + the CPU's frequency dynamically. |
drivers/cpufreq/Makefile
... | ... | @@ -79,6 +79,7 @@ |
79 | 79 | ppc-cbe-cpufreq-y += ppc_cbe_cpufreq_pervasive.o ppc_cbe_cpufreq.o |
80 | 80 | obj-$(CONFIG_CPU_FREQ_CBE_PMI) += ppc_cbe_cpufreq_pmi.o |
81 | 81 | obj-$(CONFIG_CPU_FREQ_MAPLE) += maple-cpufreq.o |
82 | +obj-$(CONFIG_PPC_CORENET_CPUFREQ) += ppc-corenet-cpufreq.o | |
82 | 83 | |
83 | 84 | ################################################################################## |
84 | 85 | # Other platform drivers |
drivers/cpufreq/ppc-corenet-cpufreq.c
1 | +/* | |
2 | + * Copyright 2013 Freescale Semiconductor, Inc. | |
3 | + * | |
4 | + * CPU Frequency Scaling driver for Freescale PowerPC corenet SoCs. | |
5 | + * | |
6 | + * This program is free software; you can redistribute it and/or modify | |
7 | + * it under the terms of the GNU General Public License version 2 as | |
8 | + * published by the Free Software Foundation. | |
9 | + */ | |
10 | + | |
11 | +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
12 | + | |
13 | +#include <linux/clk.h> | |
14 | +#include <linux/cpufreq.h> | |
15 | +#include <linux/errno.h> | |
16 | +#include <sysdev/fsl_soc.h> | |
17 | +#include <linux/init.h> | |
18 | +#include <linux/kernel.h> | |
19 | +#include <linux/module.h> | |
20 | +#include <linux/mutex.h> | |
21 | +#include <linux/of.h> | |
22 | +#include <linux/slab.h> | |
23 | +#include <linux/smp.h> | |
24 | + | |
25 | +/** | |
26 | + * struct cpu_data - per CPU data struct | |
27 | + * @clk: the clk of CPU | |
28 | + * @parent: the parent node of cpu clock | |
29 | + * @table: frequency table | |
30 | + */ | |
31 | +struct cpu_data { | |
32 | + struct clk *clk; | |
33 | + struct device_node *parent; | |
34 | + struct cpufreq_frequency_table *table; | |
35 | +}; | |
36 | + | |
37 | +/** | |
38 | + * struct soc_data - SoC specific data | |
39 | + * @freq_mask: mask the disallowed frequencies | |
40 | + * @flag: unique flags | |
41 | + */ | |
42 | +struct soc_data { | |
43 | + u32 freq_mask[4]; | |
44 | + u32 flag; | |
45 | +}; | |
46 | + | |
47 | +#define FREQ_MASK 1 | |
48 | +/* see hardware specification for the allowed frqeuencies */ | |
49 | +static const struct soc_data sdata[] = { | |
50 | + { /* used by p2041 and p3041 */ | |
51 | + .freq_mask = {0x8, 0x8, 0x2, 0x2}, | |
52 | + .flag = FREQ_MASK, | |
53 | + }, | |
54 | + { /* used by p5020 */ | |
55 | + .freq_mask = {0x8, 0x2}, | |
56 | + .flag = FREQ_MASK, | |
57 | + }, | |
58 | + { /* used by p4080, p5040 */ | |
59 | + .freq_mask = {0}, | |
60 | + .flag = 0, | |
61 | + }, | |
62 | +}; | |
63 | + | |
64 | +/* | |
65 | + * the minimum allowed core frequency, in Hz | |
66 | + * for chassis v1.0, >= platform frequency | |
67 | + * for chassis v2.0, >= platform frequency / 2 | |
68 | + */ | |
69 | +static u32 min_cpufreq; | |
70 | +static const u32 *fmask; | |
71 | + | |
72 | +/* serialize frequency changes */ | |
73 | +static DEFINE_MUTEX(cpufreq_lock); | |
74 | +static DEFINE_PER_CPU(struct cpu_data *, cpu_data); | |
75 | + | |
76 | +/* cpumask in a cluster */ | |
77 | +static DEFINE_PER_CPU(cpumask_var_t, cpu_mask); | |
78 | + | |
79 | +#ifndef CONFIG_SMP | |
80 | +static inline const struct cpumask *cpu_core_mask(int cpu) | |
81 | +{ | |
82 | + return cpumask_of(0); | |
83 | +} | |
84 | +#endif | |
85 | + | |
86 | +static unsigned int corenet_cpufreq_get_speed(unsigned int cpu) | |
87 | +{ | |
88 | + struct cpu_data *data = per_cpu(cpu_data, cpu); | |
89 | + | |
90 | + return clk_get_rate(data->clk) / 1000; | |
91 | +} | |
92 | + | |
93 | +/* reduce the duplicated frequencies in frequency table */ | |
94 | +static void freq_table_redup(struct cpufreq_frequency_table *freq_table, | |
95 | + int count) | |
96 | +{ | |
97 | + int i, j; | |
98 | + | |
99 | + for (i = 1; i < count; i++) { | |
100 | + for (j = 0; j < i; j++) { | |
101 | + if (freq_table[j].frequency == CPUFREQ_ENTRY_INVALID || | |
102 | + freq_table[j].frequency != | |
103 | + freq_table[i].frequency) | |
104 | + continue; | |
105 | + | |
106 | + freq_table[i].frequency = CPUFREQ_ENTRY_INVALID; | |
107 | + break; | |
108 | + } | |
109 | + } | |
110 | +} | |
111 | + | |
112 | +/* sort the frequencies in frequency table in descenting order */ | |
113 | +static void freq_table_sort(struct cpufreq_frequency_table *freq_table, | |
114 | + int count) | |
115 | +{ | |
116 | + int i, j, ind; | |
117 | + unsigned int freq, max_freq; | |
118 | + struct cpufreq_frequency_table table; | |
119 | + for (i = 0; i < count - 1; i++) { | |
120 | + max_freq = freq_table[i].frequency; | |
121 | + ind = i; | |
122 | + for (j = i + 1; j < count; j++) { | |
123 | + freq = freq_table[j].frequency; | |
124 | + if (freq == CPUFREQ_ENTRY_INVALID || | |
125 | + freq <= max_freq) | |
126 | + continue; | |
127 | + ind = j; | |
128 | + max_freq = freq; | |
129 | + } | |
130 | + | |
131 | + if (ind != i) { | |
132 | + /* exchange the frequencies */ | |
133 | + table.driver_data = freq_table[i].driver_data; | |
134 | + table.frequency = freq_table[i].frequency; | |
135 | + freq_table[i].driver_data = freq_table[ind].driver_data; | |
136 | + freq_table[i].frequency = freq_table[ind].frequency; | |
137 | + freq_table[ind].driver_data = table.driver_data; | |
138 | + freq_table[ind].frequency = table.frequency; | |
139 | + } | |
140 | + } | |
141 | +} | |
142 | + | |
143 | +static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy) | |
144 | +{ | |
145 | + struct device_node *np; | |
146 | + int i, count, ret; | |
147 | + u32 freq, mask; | |
148 | + struct clk *clk; | |
149 | + struct cpufreq_frequency_table *table; | |
150 | + struct cpu_data *data; | |
151 | + unsigned int cpu = policy->cpu; | |
152 | + | |
153 | + np = of_get_cpu_node(cpu, NULL); | |
154 | + if (!np) | |
155 | + return -ENODEV; | |
156 | + | |
157 | + data = kzalloc(sizeof(*data), GFP_KERNEL); | |
158 | + if (!data) { | |
159 | + pr_err("%s: no memory\n", __func__); | |
160 | + goto err_np; | |
161 | + } | |
162 | + | |
163 | + data->clk = of_clk_get(np, 0); | |
164 | + if (IS_ERR(data->clk)) { | |
165 | + pr_err("%s: no clock information\n", __func__); | |
166 | + goto err_nomem2; | |
167 | + } | |
168 | + | |
169 | + data->parent = of_parse_phandle(np, "clocks", 0); | |
170 | + if (!data->parent) { | |
171 | + pr_err("%s: could not get clock information\n", __func__); | |
172 | + goto err_nomem2; | |
173 | + } | |
174 | + | |
175 | + count = of_property_count_strings(data->parent, "clock-names"); | |
176 | + table = kcalloc(count + 1, sizeof(*table), GFP_KERNEL); | |
177 | + if (!table) { | |
178 | + pr_err("%s: no memory\n", __func__); | |
179 | + goto err_node; | |
180 | + } | |
181 | + | |
182 | + if (fmask) | |
183 | + mask = fmask[get_hard_smp_processor_id(cpu)]; | |
184 | + else | |
185 | + mask = 0x0; | |
186 | + | |
187 | + for (i = 0; i < count; i++) { | |
188 | + clk = of_clk_get(data->parent, i); | |
189 | + freq = clk_get_rate(clk); | |
190 | + /* | |
191 | + * the clock is valid if its frequency is not masked | |
192 | + * and large than minimum allowed frequency. | |
193 | + */ | |
194 | + if (freq < min_cpufreq || (mask & (1 << i))) | |
195 | + table[i].frequency = CPUFREQ_ENTRY_INVALID; | |
196 | + else | |
197 | + table[i].frequency = freq / 1000; | |
198 | + table[i].driver_data = i; | |
199 | + } | |
200 | + freq_table_redup(table, count); | |
201 | + freq_table_sort(table, count); | |
202 | + table[i].frequency = CPUFREQ_TABLE_END; | |
203 | + | |
204 | + /* set the min and max frequency properly */ | |
205 | + ret = cpufreq_frequency_table_cpuinfo(policy, table); | |
206 | + if (ret) { | |
207 | + pr_err("invalid frequency table: %d\n", ret); | |
208 | + goto err_nomem1; | |
209 | + } | |
210 | + | |
211 | + data->table = table; | |
212 | + per_cpu(cpu_data, cpu) = data; | |
213 | + | |
214 | + /* update ->cpus if we have cluster, no harm if not */ | |
215 | + cpumask_copy(policy->cpus, per_cpu(cpu_mask, cpu)); | |
216 | + for_each_cpu(i, per_cpu(cpu_mask, cpu)) | |
217 | + per_cpu(cpu_data, i) = data; | |
218 | + | |
219 | + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | |
220 | + policy->cur = corenet_cpufreq_get_speed(policy->cpu); | |
221 | + | |
222 | + cpufreq_frequency_table_get_attr(table, cpu); | |
223 | + of_node_put(np); | |
224 | + | |
225 | + return 0; | |
226 | + | |
227 | +err_nomem1: | |
228 | + kfree(table); | |
229 | +err_node: | |
230 | + of_node_put(data->parent); | |
231 | +err_nomem2: | |
232 | + per_cpu(cpu_data, cpu) = NULL; | |
233 | + kfree(data); | |
234 | +err_np: | |
235 | + of_node_put(np); | |
236 | + | |
237 | + return -ENODEV; | |
238 | +} | |
239 | + | |
240 | +static int __exit corenet_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |
241 | +{ | |
242 | + struct cpu_data *data = per_cpu(cpu_data, policy->cpu); | |
243 | + unsigned int cpu; | |
244 | + | |
245 | + cpufreq_frequency_table_put_attr(policy->cpu); | |
246 | + of_node_put(data->parent); | |
247 | + kfree(data->table); | |
248 | + kfree(data); | |
249 | + | |
250 | + for_each_cpu(cpu, per_cpu(cpu_mask, policy->cpu)) | |
251 | + per_cpu(cpu_data, cpu) = NULL; | |
252 | + | |
253 | + return 0; | |
254 | +} | |
255 | + | |
256 | +static int corenet_cpufreq_verify(struct cpufreq_policy *policy) | |
257 | +{ | |
258 | + struct cpufreq_frequency_table *table = | |
259 | + per_cpu(cpu_data, policy->cpu)->table; | |
260 | + | |
261 | + return cpufreq_frequency_table_verify(policy, table); | |
262 | +} | |
263 | + | |
264 | +static int corenet_cpufreq_target(struct cpufreq_policy *policy, | |
265 | + unsigned int target_freq, unsigned int relation) | |
266 | +{ | |
267 | + struct cpufreq_freqs freqs; | |
268 | + unsigned int new; | |
269 | + struct clk *parent; | |
270 | + int ret; | |
271 | + struct cpu_data *data = per_cpu(cpu_data, policy->cpu); | |
272 | + | |
273 | + cpufreq_frequency_table_target(policy, data->table, | |
274 | + target_freq, relation, &new); | |
275 | + | |
276 | + if (policy->cur == data->table[new].frequency) | |
277 | + return 0; | |
278 | + | |
279 | + freqs.old = policy->cur; | |
280 | + freqs.new = data->table[new].frequency; | |
281 | + | |
282 | + mutex_lock(&cpufreq_lock); | |
283 | + cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); | |
284 | + | |
285 | + parent = of_clk_get(data->parent, data->table[new].driver_data); | |
286 | + ret = clk_set_parent(data->clk, parent); | |
287 | + if (ret) | |
288 | + freqs.new = freqs.old; | |
289 | + | |
290 | + cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); | |
291 | + mutex_unlock(&cpufreq_lock); | |
292 | + | |
293 | + return ret; | |
294 | +} | |
295 | + | |
296 | +static struct freq_attr *corenet_cpufreq_attr[] = { | |
297 | + &cpufreq_freq_attr_scaling_available_freqs, | |
298 | + NULL, | |
299 | +}; | |
300 | + | |
301 | +static struct cpufreq_driver ppc_corenet_cpufreq_driver = { | |
302 | + .name = "ppc_cpufreq", | |
303 | + .owner = THIS_MODULE, | |
304 | + .flags = CPUFREQ_CONST_LOOPS, | |
305 | + .init = corenet_cpufreq_cpu_init, | |
306 | + .exit = __exit_p(corenet_cpufreq_cpu_exit), | |
307 | + .verify = corenet_cpufreq_verify, | |
308 | + .target = corenet_cpufreq_target, | |
309 | + .get = corenet_cpufreq_get_speed, | |
310 | + .attr = corenet_cpufreq_attr, | |
311 | +}; | |
312 | + | |
313 | +static const struct of_device_id node_matches[] __initdata = { | |
314 | + { .compatible = "fsl,p2041-clockgen", .data = &sdata[0], }, | |
315 | + { .compatible = "fsl,p3041-clockgen", .data = &sdata[0], }, | |
316 | + { .compatible = "fsl,p5020-clockgen", .data = &sdata[1], }, | |
317 | + { .compatible = "fsl,p4080-clockgen", .data = &sdata[2], }, | |
318 | + { .compatible = "fsl,p5040-clockgen", .data = &sdata[2], }, | |
319 | + { .compatible = "fsl,qoriq-clockgen-2.0", }, | |
320 | + {} | |
321 | +}; | |
322 | + | |
323 | +static int __init ppc_corenet_cpufreq_init(void) | |
324 | +{ | |
325 | + int ret; | |
326 | + struct device_node *np; | |
327 | + const struct of_device_id *match; | |
328 | + const struct soc_data *data; | |
329 | + unsigned int cpu; | |
330 | + | |
331 | + np = of_find_matching_node(NULL, node_matches); | |
332 | + if (!np) | |
333 | + return -ENODEV; | |
334 | + | |
335 | + for_each_possible_cpu(cpu) { | |
336 | + if (!alloc_cpumask_var(&per_cpu(cpu_mask, cpu), GFP_KERNEL)) | |
337 | + goto err_mask; | |
338 | + cpumask_copy(per_cpu(cpu_mask, cpu), cpu_core_mask(cpu)); | |
339 | + } | |
340 | + | |
341 | + match = of_match_node(node_matches, np); | |
342 | + data = match->data; | |
343 | + if (data) { | |
344 | + if (data->flag) | |
345 | + fmask = data->freq_mask; | |
346 | + min_cpufreq = fsl_get_sys_freq(); | |
347 | + } else { | |
348 | + min_cpufreq = fsl_get_sys_freq() / 2; | |
349 | + } | |
350 | + | |
351 | + of_node_put(np); | |
352 | + | |
353 | + ret = cpufreq_register_driver(&ppc_corenet_cpufreq_driver); | |
354 | + if (!ret) | |
355 | + pr_info("Freescale PowerPC corenet CPU frequency scaling driver\n"); | |
356 | + | |
357 | + return ret; | |
358 | + | |
359 | +err_mask: | |
360 | + for_each_possible_cpu(cpu) | |
361 | + free_cpumask_var(per_cpu(cpu_mask, cpu)); | |
362 | + | |
363 | + return -ENOMEM; | |
364 | +} | |
365 | +module_init(ppc_corenet_cpufreq_init); | |
366 | + | |
367 | +static void __exit ppc_corenet_cpufreq_exit(void) | |
368 | +{ | |
369 | + unsigned int cpu; | |
370 | + | |
371 | + for_each_possible_cpu(cpu) | |
372 | + free_cpumask_var(per_cpu(cpu_mask, cpu)); | |
373 | + | |
374 | + cpufreq_unregister_driver(&ppc_corenet_cpufreq_driver); | |
375 | +} | |
376 | +module_exit(ppc_corenet_cpufreq_exit); | |
377 | + | |
378 | +MODULE_LICENSE("GPL"); | |
379 | +MODULE_AUTHOR("Tang Yuantian <Yuantian.Tang@freescale.com>"); | |
380 | +MODULE_DESCRIPTION("cpufreq driver for Freescale e500mc series SoCs"); |