Blame view
drivers/cpufreq/cpufreq_conservative.c
16.3 KB
b9170836d [CPUFREQ] Conserv... |
1 2 3 4 5 6 |
/* * drivers/cpufreq/cpufreq_conservative.c * * Copyright (C) 2001 Russell King * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>. * Jun Nakajima <jun.nakajima@intel.com> |
11a80a9c7 [CPUFREQ] conserv... |
7 |
* (C) 2009 Alexander Clouter <alex@digriz.org.uk> |
b9170836d [CPUFREQ] Conserv... |
8 9 10 11 12 13 14 15 |
* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> |
b9170836d [CPUFREQ] Conserv... |
16 |
#include <linux/init.h> |
b9170836d [CPUFREQ] Conserv... |
17 |
#include <linux/cpufreq.h> |
138a0128c [PATCH] cpufreq b... |
18 |
#include <linux/cpu.h> |
b9170836d [CPUFREQ] Conserv... |
19 20 |
#include <linux/jiffies.h> #include <linux/kernel_stat.h> |
3fc54d37a [CPUFREQ] Convert... |
21 |
#include <linux/mutex.h> |
8e677ce83 [CPUFREQ] conserv... |
22 23 24 25 |
#include <linux/hrtimer.h> #include <linux/tick.h> #include <linux/ktime.h> #include <linux/sched.h> |
b9170836d [CPUFREQ] Conserv... |
26 27 28 29 30 31 |
/* * dbs is used in this file as a shortform for demandbased switching * It helps to keep variable names smaller, simpler */ #define DEF_FREQUENCY_UP_THRESHOLD (80) |
b9170836d [CPUFREQ] Conserv... |
32 |
#define DEF_FREQUENCY_DOWN_THRESHOLD (20) |
b9170836d [CPUFREQ] Conserv... |
33 |
|
18a7247d1 [CPUFREQ] Fix up ... |
34 35 |
/* * The polling frequency of this governor depends on the capability of |
b9170836d [CPUFREQ] Conserv... |
36 |
* the processor. Default polling frequency is 1000 times the transition |
18a7247d1 [CPUFREQ] Fix up ... |
37 38 |
* latency of the processor. The governor will work on any processor with * transition latency <= 10mS, using appropriate sampling |
b9170836d [CPUFREQ] Conserv... |
39 |
* rate. |
8e677ce83 [CPUFREQ] conserv... |
40 41 |
* For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) * this governor will not work. |
b9170836d [CPUFREQ] Conserv... |
42 43 |
* All times here are in uS. */ |
2c906b317 [PATCH] cpufreq_c... |
44 |
#define MIN_SAMPLING_RATE_RATIO (2) |
112124ab0 [CPUFREQ] ondeman... |
45 |
|
cef9615a8 [CPUFREQ] ondeman... |
46 |
static unsigned int min_sampling_rate; |
112124ab0 [CPUFREQ] ondeman... |
47 |
#define LATENCY_MULTIPLIER (1000) |
cef9615a8 [CPUFREQ] ondeman... |
48 |
#define MIN_LATENCY_MULTIPLIER (100) |
2c906b317 [PATCH] cpufreq_c... |
49 50 |
#define DEF_SAMPLING_DOWN_FACTOR (1) #define MAX_SAMPLING_DOWN_FACTOR (10) |
1c2562459 [CPUFREQ] allow o... |
51 |
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) |
b9170836d [CPUFREQ] Conserv... |
52 |
|
c4028958b WorkStruct: make ... |
53 |
static void do_dbs_timer(struct work_struct *work); |
b9170836d [CPUFREQ] Conserv... |
54 55 |
struct cpu_dbs_info_s { |
8e677ce83 [CPUFREQ] conserv... |
56 57 58 |
cputime64_t prev_cpu_idle; cputime64_t prev_cpu_wall; cputime64_t prev_cpu_nice; |
18a7247d1 [CPUFREQ] Fix up ... |
59 |
struct cpufreq_policy *cur_policy; |
8e677ce83 [CPUFREQ] conserv... |
60 |
struct delayed_work work; |
18a7247d1 [CPUFREQ] Fix up ... |
61 62 |
unsigned int down_skip; unsigned int requested_freq; |
8e677ce83 [CPUFREQ] conserv... |
63 64 |
int cpu; unsigned int enable:1; |
ee88415ca [CPUFREQ] Cleanup... |
65 66 67 68 69 70 |
/* * percpu mutex that serializes governor limit change with * do_dbs_timer invocation. We do not want do_dbs_timer to run * when user is changing the governor or limits. */ struct mutex timer_mutex; |
b9170836d [CPUFREQ] Conserv... |
71 |
}; |
245b2e70e percpu: clean up ... |
72 |
static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); |
b9170836d [CPUFREQ] Conserv... |
73 74 |
static unsigned int dbs_enable; /* number of CPUs using this policy */ |
4ec223d02 [CPUFREQ] Fix ond... |
75 |
/* |
326c86dea [CPUFREQ] Remove ... |
76 |
* dbs_mutex protects dbs_enable in governor start/stop. |
4ec223d02 [CPUFREQ] Fix ond... |
77 |
*/ |
9acef4875 [CPUFREQ] checkpa... |
78 |
static DEFINE_MUTEX(dbs_mutex); |
b9170836d [CPUFREQ] Conserv... |
79 |
|
8e677ce83 [CPUFREQ] conserv... |
80 |
static struct dbs_tuners { |
18a7247d1 [CPUFREQ] Fix up ... |
81 82 83 84 85 86 |
unsigned int sampling_rate; unsigned int sampling_down_factor; unsigned int up_threshold; unsigned int down_threshold; unsigned int ignore_nice; unsigned int freq_step; |
8e677ce83 [CPUFREQ] conserv... |
87 |
} dbs_tuners_ins = { |
18a7247d1 [CPUFREQ] Fix up ... |
88 89 90 91 92 |
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD, .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, .ignore_nice = 0, .freq_step = 5, |
b9170836d [CPUFREQ] Conserv... |
93 |
}; |
3292beb34 sched/accounting:... |
94 |
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall) |
dac1c1a56 [CPUFREQ] ondeman... |
95 |
{ |
3292beb34 sched/accounting:... |
96 |
u64 idle_time; |
612ef28a0 Merge branch 'sch... |
97 |
u64 cur_wall_time; |
3292beb34 sched/accounting:... |
98 |
u64 busy_time; |
8e677ce83 [CPUFREQ] conserv... |
99 100 |
cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); |
e08f5f5bb [CPUFREQ] Fix cod... |
101 |
|
612ef28a0 Merge branch 'sch... |
102 103 |
busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; |
3292beb34 sched/accounting:... |
104 105 106 107 |
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; |
648616343 [S390] cputime: a... |
108 109 |
idle_time = cur_wall_time - busy_time; |
8e677ce83 [CPUFREQ] conserv... |
110 |
if (wall) |
3292beb34 sched/accounting:... |
111 |
*wall = jiffies_to_usecs(cur_wall_time); |
e08f5f5bb [CPUFREQ] Fix cod... |
112 |
|
3292beb34 sched/accounting:... |
113 |
return jiffies_to_usecs(idle_time); |
8e677ce83 [CPUFREQ] conserv... |
114 115 116 117 |
} static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) { |
6beea0cda nohz: Fix update_... |
118 |
u64 idle_time = get_cpu_idle_time_us(cpu, NULL); |
8e677ce83 [CPUFREQ] conserv... |
119 120 121 |
if (idle_time == -1ULL) return get_cpu_idle_time_jiffy(cpu, wall); |
6beea0cda nohz: Fix update_... |
122 123 |
else idle_time += get_cpu_iowait_time_us(cpu, wall); |
8e677ce83 [CPUFREQ] conserv... |
124 125 |
return idle_time; |
dac1c1a56 [CPUFREQ] ondeman... |
126 |
} |
a8d7c3bc2 [CPUFREQ] Make cp... |
127 128 129 130 131 132 |
/* keep track of frequency transitions */ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; |
245b2e70e percpu: clean up ... |
133 |
struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info, |
a8d7c3bc2 [CPUFREQ] Make cp... |
134 |
freq->cpu); |
f407a08bb [CPUFREQ] conserv... |
135 |
struct cpufreq_policy *policy; |
a8d7c3bc2 [CPUFREQ] Make cp... |
136 137 |
if (!this_dbs_info->enable) return 0; |
f407a08bb [CPUFREQ] conserv... |
138 139 140 141 142 143 144 145 146 147 |
policy = this_dbs_info->cur_policy; /* * we only care if our internally tracked freq moves outside * the 'valid' ranges of freqency available to us otherwise * we do not change it */ if (this_dbs_info->requested_freq > policy->max || this_dbs_info->requested_freq < policy->min) this_dbs_info->requested_freq = freq->new; |
a8d7c3bc2 [CPUFREQ] Make cp... |
148 149 150 151 152 153 154 |
return 0; } static struct notifier_block dbs_cpufreq_notifier_block = { .notifier_call = dbs_cpufreq_notifier }; |
b9170836d [CPUFREQ] Conserv... |
155 |
/************************** sysfs interface ************************/ |
49b015ce3 [CPUFREQ] Use glo... |
156 157 |
static ssize_t show_sampling_rate_min(struct kobject *kobj, struct attribute *attr, char *buf) |
b9170836d [CPUFREQ] Conserv... |
158 |
{ |
cef9615a8 [CPUFREQ] ondeman... |
159 160 |
return sprintf(buf, "%u ", min_sampling_rate); |
b9170836d [CPUFREQ] Conserv... |
161 |
} |
6dad2a296 cpufreq: Unify sy... |
162 |
define_one_global_ro(sampling_rate_min); |
b9170836d [CPUFREQ] Conserv... |
163 164 165 166 |
/* cpufreq_conservative Governor Tunables */ #define show_one(file_name, object) \ static ssize_t show_##file_name \ |
49b015ce3 [CPUFREQ] Use glo... |
167 |
(struct kobject *kobj, struct attribute *attr, char *buf) \ |
b9170836d [CPUFREQ] Conserv... |
168 169 170 171 172 173 174 175 |
{ \ return sprintf(buf, "%u ", dbs_tuners_ins.object); \ } show_one(sampling_rate, sampling_rate); show_one(sampling_down_factor, sampling_down_factor); show_one(up_threshold, up_threshold); show_one(down_threshold, down_threshold); |
001893cda [PATCH] cpufreq_c... |
176 |
show_one(ignore_nice_load, ignore_nice); |
b9170836d [CPUFREQ] Conserv... |
177 |
show_one(freq_step, freq_step); |
49b015ce3 [CPUFREQ] Use glo... |
178 179 180 |
static ssize_t store_sampling_down_factor(struct kobject *a, struct attribute *b, const char *buf, size_t count) |
b9170836d [CPUFREQ] Conserv... |
181 182 183 |
{ unsigned int input; int ret; |
9acef4875 [CPUFREQ] checkpa... |
184 |
ret = sscanf(buf, "%u", &input); |
8e677ce83 [CPUFREQ] conserv... |
185 |
|
2c906b317 [PATCH] cpufreq_c... |
186 |
if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) |
b9170836d [CPUFREQ] Conserv... |
187 |
return -EINVAL; |
b9170836d [CPUFREQ] Conserv... |
188 |
dbs_tuners_ins.sampling_down_factor = input; |
b9170836d [CPUFREQ] Conserv... |
189 190 |
return count; } |
49b015ce3 [CPUFREQ] Use glo... |
191 192 |
static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, const char *buf, size_t count) |
b9170836d [CPUFREQ] Conserv... |
193 194 195 |
{ unsigned int input; int ret; |
9acef4875 [CPUFREQ] checkpa... |
196 |
ret = sscanf(buf, "%u", &input); |
b9170836d [CPUFREQ] Conserv... |
197 |
|
8e677ce83 [CPUFREQ] conserv... |
198 |
if (ret != 1) |
b9170836d [CPUFREQ] Conserv... |
199 |
return -EINVAL; |
8e677ce83 [CPUFREQ] conserv... |
200 |
|
cef9615a8 [CPUFREQ] ondeman... |
201 |
dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); |
b9170836d [CPUFREQ] Conserv... |
202 203 |
return count; } |
49b015ce3 [CPUFREQ] Use glo... |
204 205 |
static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, const char *buf, size_t count) |
b9170836d [CPUFREQ] Conserv... |
206 207 208 |
{ unsigned int input; int ret; |
9acef4875 [CPUFREQ] checkpa... |
209 |
ret = sscanf(buf, "%u", &input); |
b9170836d [CPUFREQ] Conserv... |
210 |
|
9acef4875 [CPUFREQ] checkpa... |
211 |
if (ret != 1 || input > 100 || |
326c86dea [CPUFREQ] Remove ... |
212 |
input <= dbs_tuners_ins.down_threshold) |
b9170836d [CPUFREQ] Conserv... |
213 |
return -EINVAL; |
b9170836d [CPUFREQ] Conserv... |
214 215 |
dbs_tuners_ins.up_threshold = input; |
b9170836d [CPUFREQ] Conserv... |
216 217 |
return count; } |
49b015ce3 [CPUFREQ] Use glo... |
218 219 |
static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, const char *buf, size_t count) |
b9170836d [CPUFREQ] Conserv... |
220 221 222 |
{ unsigned int input; int ret; |
9acef4875 [CPUFREQ] checkpa... |
223 |
ret = sscanf(buf, "%u", &input); |
b9170836d [CPUFREQ] Conserv... |
224 |
|
8e677ce83 [CPUFREQ] conserv... |
225 226 |
/* cannot be lower than 11 otherwise freq will not fall */ if (ret != 1 || input < 11 || input > 100 || |
326c86dea [CPUFREQ] Remove ... |
227 |
input >= dbs_tuners_ins.up_threshold) |
b9170836d [CPUFREQ] Conserv... |
228 |
return -EINVAL; |
b9170836d [CPUFREQ] Conserv... |
229 230 |
dbs_tuners_ins.down_threshold = input; |
b9170836d [CPUFREQ] Conserv... |
231 232 |
return count; } |
49b015ce3 [CPUFREQ] Use glo... |
233 234 |
static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, const char *buf, size_t count) |
b9170836d [CPUFREQ] Conserv... |
235 236 237 238 239 |
{ unsigned int input; int ret; unsigned int j; |
18a7247d1 [CPUFREQ] Fix up ... |
240 241 242 |
ret = sscanf(buf, "%u", &input); if (ret != 1) |
b9170836d [CPUFREQ] Conserv... |
243 |
return -EINVAL; |
18a7247d1 [CPUFREQ] Fix up ... |
244 |
if (input > 1) |
b9170836d [CPUFREQ] Conserv... |
245 |
input = 1; |
18a7247d1 [CPUFREQ] Fix up ... |
246 |
|
326c86dea [CPUFREQ] Remove ... |
247 |
if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */ |
b9170836d [CPUFREQ] Conserv... |
248 |
return count; |
326c86dea [CPUFREQ] Remove ... |
249 |
|
b9170836d [CPUFREQ] Conserv... |
250 |
dbs_tuners_ins.ignore_nice = input; |
8e677ce83 [CPUFREQ] conserv... |
251 |
/* we need to re-evaluate prev_cpu_idle */ |
dac1c1a56 [CPUFREQ] ondeman... |
252 |
for_each_online_cpu(j) { |
8e677ce83 [CPUFREQ] conserv... |
253 |
struct cpu_dbs_info_s *dbs_info; |
245b2e70e percpu: clean up ... |
254 |
dbs_info = &per_cpu(cs_cpu_dbs_info, j); |
8e677ce83 [CPUFREQ] conserv... |
255 256 257 |
dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &dbs_info->prev_cpu_wall); if (dbs_tuners_ins.ignore_nice) |
3292beb34 sched/accounting:... |
258 |
dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
b9170836d [CPUFREQ] Conserv... |
259 |
} |
b9170836d [CPUFREQ] Conserv... |
260 261 |
return count; } |
49b015ce3 [CPUFREQ] Use glo... |
262 263 |
static ssize_t store_freq_step(struct kobject *a, struct attribute *b, const char *buf, size_t count) |
b9170836d [CPUFREQ] Conserv... |
264 265 266 |
{ unsigned int input; int ret; |
18a7247d1 [CPUFREQ] Fix up ... |
267 |
ret = sscanf(buf, "%u", &input); |
b9170836d [CPUFREQ] Conserv... |
268 |
|
18a7247d1 [CPUFREQ] Fix up ... |
269 |
if (ret != 1) |
b9170836d [CPUFREQ] Conserv... |
270 |
return -EINVAL; |
18a7247d1 [CPUFREQ] Fix up ... |
271 |
if (input > 100) |
b9170836d [CPUFREQ] Conserv... |
272 |
input = 100; |
18a7247d1 [CPUFREQ] Fix up ... |
273 |
|
b9170836d [CPUFREQ] Conserv... |
274 275 |
/* no need to test here if freq_step is zero as the user might actually * want this, they would be crazy though :) */ |
b9170836d [CPUFREQ] Conserv... |
276 |
dbs_tuners_ins.freq_step = input; |
b9170836d [CPUFREQ] Conserv... |
277 278 |
return count; } |
6dad2a296 cpufreq: Unify sy... |
279 280 281 282 283 284 |
define_one_global_rw(sampling_rate); define_one_global_rw(sampling_down_factor); define_one_global_rw(up_threshold); define_one_global_rw(down_threshold); define_one_global_rw(ignore_nice_load); define_one_global_rw(freq_step); |
b9170836d [CPUFREQ] Conserv... |
285 |
|
9acef4875 [CPUFREQ] checkpa... |
286 |
static struct attribute *dbs_attributes[] = { |
b9170836d [CPUFREQ] Conserv... |
287 288 289 290 291 |
&sampling_rate_min.attr, &sampling_rate.attr, &sampling_down_factor.attr, &up_threshold.attr, &down_threshold.attr, |
001893cda [PATCH] cpufreq_c... |
292 |
&ignore_nice_load.attr, |
b9170836d [CPUFREQ] Conserv... |
293 294 295 296 297 298 299 300 301 302 |
&freq_step.attr, NULL }; static struct attribute_group dbs_attr_group = { .attrs = dbs_attributes, .name = "conservative", }; /************************** sysfs end ************************/ |
8e677ce83 [CPUFREQ] conserv... |
303 |
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) |
b9170836d [CPUFREQ] Conserv... |
304 |
{ |
8e677ce83 [CPUFREQ] conserv... |
305 |
unsigned int load = 0; |
fd187aaf9 [CPUFREQ] use max... |
306 |
unsigned int max_load = 0; |
f068c04ba [CPUFREQ] Fix -Ws... |
307 |
unsigned int freq_target; |
b9170836d [CPUFREQ] Conserv... |
308 |
|
8e677ce83 [CPUFREQ] conserv... |
309 310 |
struct cpufreq_policy *policy; unsigned int j; |
b9170836d [CPUFREQ] Conserv... |
311 |
|
08a28e2e9 [PATCH] cpufreq_c... |
312 |
policy = this_dbs_info->cur_policy; |
18a7247d1 [CPUFREQ] Fix up ... |
313 |
/* |
8e677ce83 [CPUFREQ] conserv... |
314 315 316 317 |
* Every sampling_rate, we check, if current idle time is less * than 20% (default), then we try to increase frequency * Every sampling_rate*sampling_down_factor, we check, if current * idle time is more than 80%, then we try to decrease frequency |
b9170836d [CPUFREQ] Conserv... |
318 |
* |
18a7247d1 [CPUFREQ] Fix up ... |
319 320 |
* Any frequency increase takes it to the maximum frequency. * Frequency reduction happens at minimum steps of |
8e677ce83 [CPUFREQ] conserv... |
321 |
* 5% (default) of maximum frequency |
b9170836d [CPUFREQ] Conserv... |
322 |
*/ |
8e677ce83 [CPUFREQ] conserv... |
323 324 325 326 327 |
/* Get Absolute Load */ for_each_cpu(j, policy->cpus) { struct cpu_dbs_info_s *j_dbs_info; cputime64_t cur_wall_time, cur_idle_time; unsigned int idle_time, wall_time; |
b9170836d [CPUFREQ] Conserv... |
328 |
|
245b2e70e percpu: clean up ... |
329 |
j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); |
8e677ce83 [CPUFREQ] conserv... |
330 331 |
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); |
648616343 [S390] cputime: a... |
332 333 |
wall_time = (unsigned int) (cur_wall_time - j_dbs_info->prev_cpu_wall); |
8e677ce83 [CPUFREQ] conserv... |
334 |
j_dbs_info->prev_cpu_wall = cur_wall_time; |
08a28e2e9 [PATCH] cpufreq_c... |
335 |
|
648616343 [S390] cputime: a... |
336 337 |
idle_time = (unsigned int) (cur_idle_time - j_dbs_info->prev_cpu_idle); |
8e677ce83 [CPUFREQ] conserv... |
338 |
j_dbs_info->prev_cpu_idle = cur_idle_time; |
b9170836d [CPUFREQ] Conserv... |
339 |
|
8e677ce83 [CPUFREQ] conserv... |
340 |
if (dbs_tuners_ins.ignore_nice) { |
3292beb34 sched/accounting:... |
341 |
u64 cur_nice; |
8e677ce83 [CPUFREQ] conserv... |
342 |
unsigned long cur_nice_jiffies; |
3292beb34 sched/accounting:... |
343 344 |
cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - j_dbs_info->prev_cpu_nice; |
8e677ce83 [CPUFREQ] conserv... |
345 346 347 348 349 350 |
/* * Assumption: nice time between sampling periods will * be less than 2^32 jiffies for 32 bit sys */ cur_nice_jiffies = (unsigned long) cputime64_to_jiffies64(cur_nice); |
3292beb34 sched/accounting:... |
351 |
j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
8e677ce83 [CPUFREQ] conserv... |
352 353 354 355 356 357 358 |
idle_time += jiffies_to_usecs(cur_nice_jiffies); } if (unlikely(!wall_time || wall_time < idle_time)) continue; load = 100 * (wall_time - idle_time) / wall_time; |
fd187aaf9 [CPUFREQ] use max... |
359 360 361 |
if (load > max_load) max_load = load; |
8e677ce83 [CPUFREQ] conserv... |
362 363 364 365 366 367 368 369 |
} /* * break out if we 'cannot' reduce the speed as the user might * want freq_step to be zero */ if (dbs_tuners_ins.freq_step == 0) return; |
b9170836d [CPUFREQ] Conserv... |
370 |
|
8e677ce83 [CPUFREQ] conserv... |
371 |
/* Check for frequency increase */ |
fd187aaf9 [CPUFREQ] use max... |
372 |
if (max_load > dbs_tuners_ins.up_threshold) { |
a159b8277 [PATCH] cpufreq_c... |
373 |
this_dbs_info->down_skip = 0; |
790d76fa9 [CPUFREQ] ondeman... |
374 |
|
b9170836d [CPUFREQ] Conserv... |
375 |
/* if we are already at full speed then break out early */ |
a159b8277 [PATCH] cpufreq_c... |
376 |
if (this_dbs_info->requested_freq == policy->max) |
b9170836d [CPUFREQ] Conserv... |
377 |
return; |
18a7247d1 [CPUFREQ] Fix up ... |
378 |
|
f068c04ba [CPUFREQ] Fix -Ws... |
379 |
freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; |
b9170836d [CPUFREQ] Conserv... |
380 381 |
/* max freq cannot be less than 100. But who knows.... */ |
f068c04ba [CPUFREQ] Fix -Ws... |
382 383 |
if (unlikely(freq_target == 0)) freq_target = 5; |
18a7247d1 [CPUFREQ] Fix up ... |
384 |
|
f068c04ba [CPUFREQ] Fix -Ws... |
385 |
this_dbs_info->requested_freq += freq_target; |
a159b8277 [PATCH] cpufreq_c... |
386 387 |
if (this_dbs_info->requested_freq > policy->max) this_dbs_info->requested_freq = policy->max; |
b9170836d [CPUFREQ] Conserv... |
388 |
|
a159b8277 [PATCH] cpufreq_c... |
389 |
__cpufreq_driver_target(policy, this_dbs_info->requested_freq, |
b9170836d [CPUFREQ] Conserv... |
390 |
CPUFREQ_RELATION_H); |
b9170836d [CPUFREQ] Conserv... |
391 392 |
return; } |
8e677ce83 [CPUFREQ] conserv... |
393 394 395 396 397 |
/* * The optimal frequency is the frequency that is the lowest that * can support the current CPU usage without triggering the up * policy. To be safe, we focus 10 points under the threshold. */ |
fd187aaf9 [CPUFREQ] use max... |
398 |
if (max_load < (dbs_tuners_ins.down_threshold - 10)) { |
f068c04ba [CPUFREQ] Fix -Ws... |
399 |
freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100; |
b9170836d [CPUFREQ] Conserv... |
400 |
|
f068c04ba [CPUFREQ] Fix -Ws... |
401 |
this_dbs_info->requested_freq -= freq_target; |
a159b8277 [PATCH] cpufreq_c... |
402 403 |
if (this_dbs_info->requested_freq < policy->min) this_dbs_info->requested_freq = policy->min; |
b9170836d [CPUFREQ] Conserv... |
404 |
|
8e677ce83 [CPUFREQ] conserv... |
405 406 407 408 409 |
/* * if we cannot reduce the frequency anymore, break out early */ if (policy->cur == policy->min) return; |
a159b8277 [PATCH] cpufreq_c... |
410 |
__cpufreq_driver_target(policy, this_dbs_info->requested_freq, |
2c906b317 [PATCH] cpufreq_c... |
411 |
CPUFREQ_RELATION_H); |
b9170836d [CPUFREQ] Conserv... |
412 413 414 |
return; } } |
c4028958b WorkStruct: make ... |
415 |
static void do_dbs_timer(struct work_struct *work) |
18a7247d1 [CPUFREQ] Fix up ... |
416 |
{ |
8e677ce83 [CPUFREQ] conserv... |
417 418 419 420 421 422 423 424 |
struct cpu_dbs_info_s *dbs_info = container_of(work, struct cpu_dbs_info_s, work.work); unsigned int cpu = dbs_info->cpu; /* We want all CPUs to do sampling nearly on same jiffy */ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); delay -= jiffies % delay; |
ee88415ca [CPUFREQ] Cleanup... |
425 |
mutex_lock(&dbs_info->timer_mutex); |
8e677ce83 [CPUFREQ] conserv... |
426 427 |
dbs_check_cpu(dbs_info); |
57df5573a cpufreq: use syst... |
428 |
schedule_delayed_work_on(cpu, &dbs_info->work, delay); |
ee88415ca [CPUFREQ] Cleanup... |
429 |
mutex_unlock(&dbs_info->timer_mutex); |
18a7247d1 [CPUFREQ] Fix up ... |
430 |
} |
b9170836d [CPUFREQ] Conserv... |
431 |
|
8e677ce83 [CPUFREQ] conserv... |
432 |
static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) |
b9170836d [CPUFREQ] Conserv... |
433 |
{ |
8e677ce83 [CPUFREQ] conserv... |
434 435 436 437 438 439 |
/* We want all CPUs to do sampling nearly on same jiffy */ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); delay -= jiffies % delay; dbs_info->enable = 1; INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); |
57df5573a cpufreq: use syst... |
440 |
schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); |
b9170836d [CPUFREQ] Conserv... |
441 |
} |
8e677ce83 [CPUFREQ] conserv... |
442 |
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) |
b9170836d [CPUFREQ] Conserv... |
443 |
{ |
8e677ce83 [CPUFREQ] conserv... |
444 |
dbs_info->enable = 0; |
b253d2b2d [CPUFREQ] fix tim... |
445 |
cancel_delayed_work_sync(&dbs_info->work); |
b9170836d [CPUFREQ] Conserv... |
446 447 448 449 450 451 452 453 |
} static int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event) { unsigned int cpu = policy->cpu; struct cpu_dbs_info_s *this_dbs_info; unsigned int j; |
914f7c31b [CPUFREQ] handle ... |
454 |
int rc; |
b9170836d [CPUFREQ] Conserv... |
455 |
|
245b2e70e percpu: clean up ... |
456 |
this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); |
b9170836d [CPUFREQ] Conserv... |
457 458 459 |
switch (event) { case CPUFREQ_GOV_START: |
18a7247d1 [CPUFREQ] Fix up ... |
460 |
if ((!cpu_online(cpu)) || (!policy->cur)) |
b9170836d [CPUFREQ] Conserv... |
461 |
return -EINVAL; |
3fc54d37a [CPUFREQ] Convert... |
462 |
mutex_lock(&dbs_mutex); |
914f7c31b [CPUFREQ] handle ... |
463 |
|
835481d9b cpumask: convert ... |
464 |
for_each_cpu(j, policy->cpus) { |
b9170836d [CPUFREQ] Conserv... |
465 |
struct cpu_dbs_info_s *j_dbs_info; |
245b2e70e percpu: clean up ... |
466 |
j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); |
b9170836d [CPUFREQ] Conserv... |
467 |
j_dbs_info->cur_policy = policy; |
18a7247d1 [CPUFREQ] Fix up ... |
468 |
|
8e677ce83 [CPUFREQ] conserv... |
469 470 |
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, &j_dbs_info->prev_cpu_wall); |
3292beb34 sched/accounting:... |
471 |
if (dbs_tuners_ins.ignore_nice) |
8e677ce83 [CPUFREQ] conserv... |
472 |
j_dbs_info->prev_cpu_nice = |
3292beb34 sched/accounting:... |
473 |
kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
b9170836d [CPUFREQ] Conserv... |
474 |
} |
a159b8277 [PATCH] cpufreq_c... |
475 476 |
this_dbs_info->down_skip = 0; this_dbs_info->requested_freq = policy->cur; |
914f7c31b [CPUFREQ] handle ... |
477 |
|
ee88415ca [CPUFREQ] Cleanup... |
478 |
mutex_init(&this_dbs_info->timer_mutex); |
b9170836d [CPUFREQ] Conserv... |
479 480 481 482 483 484 485 486 |
dbs_enable++; /* * Start the timerschedule work, when this governor * is used for first time */ if (dbs_enable == 1) { unsigned int latency; /* policy latency is in nS. Convert it to uS first */ |
2c906b317 [PATCH] cpufreq_c... |
487 488 489 |
latency = policy->cpuinfo.transition_latency / 1000; if (latency == 0) latency = 1; |
b9170836d [CPUFREQ] Conserv... |
490 |
|
49b015ce3 [CPUFREQ] Use glo... |
491 492 493 494 495 496 |
rc = sysfs_create_group(cpufreq_global_kobject, &dbs_attr_group); if (rc) { mutex_unlock(&dbs_mutex); return rc; } |
cef9615a8 [CPUFREQ] ondeman... |
497 498 499 500 501 502 503 504 505 506 507 508 |
/* * conservative does not implement micro like ondemand * governor, thus we are bound to jiffes/HZ */ min_sampling_rate = MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); /* Bring kernel and HW constraints together */ min_sampling_rate = max(min_sampling_rate, MIN_LATENCY_MULTIPLIER * latency); dbs_tuners_ins.sampling_rate = max(min_sampling_rate, latency * LATENCY_MULTIPLIER); |
b9170836d [CPUFREQ] Conserv... |
509 |
|
a8d7c3bc2 [CPUFREQ] Make cp... |
510 511 512 |
cpufreq_register_notifier( &dbs_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); |
b9170836d [CPUFREQ] Conserv... |
513 |
} |
3fc54d37a [CPUFREQ] Convert... |
514 |
mutex_unlock(&dbs_mutex); |
8e677ce83 [CPUFREQ] conserv... |
515 |
|
7d26e2d5e [CPUFREQ] Elimina... |
516 |
dbs_timer_init(this_dbs_info); |
b9170836d [CPUFREQ] Conserv... |
517 518 519 |
break; case CPUFREQ_GOV_STOP: |
8e677ce83 [CPUFREQ] conserv... |
520 |
dbs_timer_exit(this_dbs_info); |
7d26e2d5e [CPUFREQ] Elimina... |
521 522 |
mutex_lock(&dbs_mutex); |
b9170836d [CPUFREQ] Conserv... |
523 |
dbs_enable--; |
ee88415ca [CPUFREQ] Cleanup... |
524 |
mutex_destroy(&this_dbs_info->timer_mutex); |
8e677ce83 [CPUFREQ] conserv... |
525 |
|
b9170836d [CPUFREQ] Conserv... |
526 527 528 529 |
/* * Stop the timerschedule work, when this governor * is used for first time */ |
8e677ce83 [CPUFREQ] conserv... |
530 |
if (dbs_enable == 0) |
a8d7c3bc2 [CPUFREQ] Make cp... |
531 532 533 |
cpufreq_unregister_notifier( &dbs_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); |
a8d7c3bc2 [CPUFREQ] Make cp... |
534 |
|
3fc54d37a [CPUFREQ] Convert... |
535 |
mutex_unlock(&dbs_mutex); |
49b015ce3 [CPUFREQ] Use glo... |
536 537 538 |
if (!dbs_enable) sysfs_remove_group(cpufreq_global_kobject, &dbs_attr_group); |
b9170836d [CPUFREQ] Conserv... |
539 540 541 542 |
break; case CPUFREQ_GOV_LIMITS: |
ee88415ca [CPUFREQ] Cleanup... |
543 |
mutex_lock(&this_dbs_info->timer_mutex); |
b9170836d [CPUFREQ] Conserv... |
544 545 546 |
if (policy->max < this_dbs_info->cur_policy->cur) __cpufreq_driver_target( this_dbs_info->cur_policy, |
18a7247d1 [CPUFREQ] Fix up ... |
547 |
policy->max, CPUFREQ_RELATION_H); |
b9170836d [CPUFREQ] Conserv... |
548 549 550 |
else if (policy->min > this_dbs_info->cur_policy->cur) __cpufreq_driver_target( this_dbs_info->cur_policy, |
18a7247d1 [CPUFREQ] Fix up ... |
551 |
policy->min, CPUFREQ_RELATION_L); |
ee88415ca [CPUFREQ] Cleanup... |
552 |
mutex_unlock(&this_dbs_info->timer_mutex); |
8e677ce83 [CPUFREQ] conserv... |
553 |
|
b9170836d [CPUFREQ] Conserv... |
554 555 556 557 |
break; } return 0; } |
c4d14bc0b [CPUFREQ] Don't e... |
558 559 560 |
#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE static #endif |
1c2562459 [CPUFREQ] allow o... |
561 562 563 564 565 |
struct cpufreq_governor cpufreq_gov_conservative = { .name = "conservative", .governor = cpufreq_governor_dbs, .max_transition_latency = TRANSITION_LATENCY_LIMIT, .owner = THIS_MODULE, |
b9170836d [CPUFREQ] Conserv... |
566 567 568 569 |
}; static int __init cpufreq_gov_dbs_init(void) { |
57df5573a cpufreq: use syst... |
570 |
return cpufreq_register_governor(&cpufreq_gov_conservative); |
b9170836d [CPUFREQ] Conserv... |
571 572 573 574 |
} static void __exit cpufreq_gov_dbs_exit(void) { |
1c2562459 [CPUFREQ] allow o... |
575 |
cpufreq_unregister_governor(&cpufreq_gov_conservative); |
b9170836d [CPUFREQ] Conserv... |
576 |
} |
11a80a9c7 [CPUFREQ] conserv... |
577 |
MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>"); |
9acef4875 [CPUFREQ] checkpa... |
578 |
MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for " |
b9170836d [CPUFREQ] Conserv... |
579 580 |
"Low Latency Frequency Transition capable processors " "optimised for use in a battery environment"); |
9acef4875 [CPUFREQ] checkpa... |
581 |
MODULE_LICENSE("GPL"); |
b9170836d [CPUFREQ] Conserv... |
582 |
|
6915719b3 cpufreq: Initiali... |
583 584 585 |
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE fs_initcall(cpufreq_gov_dbs_init); #else |
b9170836d [CPUFREQ] Conserv... |
586 |
module_init(cpufreq_gov_dbs_init); |
6915719b3 cpufreq: Initiali... |
587 |
#endif |
b9170836d [CPUFREQ] Conserv... |
588 |
module_exit(cpufreq_gov_dbs_exit); |