Commit 7e6fdd4bad033fa2d73716377b184fa975b0d985

Authored by Rajagopal Venkat
Committed by Rafael J. Wysocki
1 parent 77b67063bb

PM / devfreq: Core updates to support devices which can idle

Prepare devfreq core framework to support devices which
can idle. When device idleness is detected perhaps through
runtime-pm, need some mechanism to suspend devfreq load
monitoring and resume back when device is online. Present
code continues monitoring unless device is removed from
devfreq core.

This patch introduces following design changes,

 - use per device work instead of global work to monitor device
   load. This enables suspend/resume of device devfreq and
   reduces monitoring code complexity.
 - decouple delayed work based load monitoring logic from core
   by introducing helpers functions to be used by governors. This
   provides flexibility for governors either to use delayed work
   based monitoring functions or to implement their own mechanism.
 - devfreq core interacts with governors via events to perform
   specific actions. These events include start/stop devfreq.
   This sets ground for adding suspend/resume events.

The devfreq apis are not modified and are kept intact.

Signed-off-by: Rajagopal Venkat <rajagopal.venkat@linaro.org>
Acked-by: MyungJoo Ham <myungjoo.ham@samsung.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>

Showing 8 changed files with 278 additions and 296 deletions Side-by-side Diff

Documentation/ABI/testing/sysfs-class-devfreq
... ... @@ -21,14 +21,6 @@
21 21 The /sys/class/devfreq/.../cur_freq shows the current
22 22 frequency of the corresponding devfreq object.
23 23  
24   -What: /sys/class/devfreq/.../central_polling
25   -Date: September 2011
26   -Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
27   -Description:
28   - The /sys/class/devfreq/.../central_polling shows whether
29   - the devfreq ojbect is using devfreq-provided central
30   - polling mechanism or not.
31   -
32 24 What: /sys/class/devfreq/.../polling_interval
33 25 Date: September 2011
34 26 Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
drivers/devfreq/devfreq.c
... ... @@ -30,18 +30,12 @@
30 30 struct class *devfreq_class;
31 31  
32 32 /*
33   - * devfreq_work periodically monitors every registered device.
34   - * The minimum polling interval is one jiffy. The polling interval is
35   - * determined by the minimum polling period among all polling devfreq
36   - * devices. The resolution of polling interval is one jiffy.
  33 + * devfreq core provides delayed work based load monitoring helper
  34 + * functions. Governors can use these or can implement their own
  35 + * monitoring mechanism.
37 36 */
38   -static bool polling;
39 37 static struct workqueue_struct *devfreq_wq;
40   -static struct delayed_work devfreq_work;
41 38  
42   -/* wait removing if this is to be removed */
43   -static struct devfreq *wait_remove_device;
44   -
45 39 /* The list of all device-devfreq */
46 40 static LIST_HEAD(devfreq_list);
47 41 static DEFINE_MUTEX(devfreq_list_lock);
... ... @@ -72,6 +66,8 @@
72 66 return ERR_PTR(-ENODEV);
73 67 }
74 68  
  69 +/* Load monitoring helper functions for governors use */
  70 +
75 71 /**
76 72 * update_devfreq() - Reevaluate the device and configure frequency.
77 73 * @devfreq: the devfreq instance.
... ... @@ -121,6 +117,152 @@
121 117 }
122 118  
123 119 /**
  120 + * devfreq_monitor() - Periodically poll devfreq objects.
  121 + * @work: the work struct used to run devfreq_monitor periodically.
  122 + *
  123 + */
  124 +static void devfreq_monitor(struct work_struct *work)
  125 +{
  126 + int err;
  127 + struct devfreq *devfreq = container_of(work,
  128 + struct devfreq, work.work);
  129 +
  130 + mutex_lock(&devfreq->lock);
  131 + err = update_devfreq(devfreq);
  132 + if (err)
  133 + dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
  134 +
  135 + queue_delayed_work(devfreq_wq, &devfreq->work,
  136 + msecs_to_jiffies(devfreq->profile->polling_ms));
  137 + mutex_unlock(&devfreq->lock);
  138 +}
  139 +
  140 +/**
  141 + * devfreq_monitor_start() - Start load monitoring of devfreq instance
  142 + * @devfreq: the devfreq instance.
  143 + *
  144 + * Helper function for starting devfreq device load monitoing. By
  145 + * default delayed work based monitoring is supported. Function
  146 + * to be called from governor in response to DEVFREQ_GOV_START
  147 + * event when device is added to devfreq framework.
  148 + */
  149 +void devfreq_monitor_start(struct devfreq *devfreq)
  150 +{
  151 + INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
  152 + if (devfreq->profile->polling_ms)
  153 + queue_delayed_work(devfreq_wq, &devfreq->work,
  154 + msecs_to_jiffies(devfreq->profile->polling_ms));
  155 +}
  156 +
  157 +/**
  158 + * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
  159 + * @devfreq: the devfreq instance.
  160 + *
  161 + * Helper function to stop devfreq device load monitoing. Function
  162 + * to be called from governor in response to DEVFREQ_GOV_STOP
  163 + * event when device is removed from devfreq framework.
  164 + */
  165 +void devfreq_monitor_stop(struct devfreq *devfreq)
  166 +{
  167 + cancel_delayed_work_sync(&devfreq->work);
  168 +}
  169 +
  170 +/**
  171 + * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
  172 + * @devfreq: the devfreq instance.
  173 + *
  174 + * Helper function to suspend devfreq device load monitoing. Function
  175 + * to be called from governor in response to DEVFREQ_GOV_SUSPEND
  176 + * event or when polling interval is set to zero.
  177 + *
  178 + * Note: Though this function is same as devfreq_monitor_stop(),
  179 + * intentionally kept separate to provide hooks for collecting
  180 + * transition statistics.
  181 + */
  182 +void devfreq_monitor_suspend(struct devfreq *devfreq)
  183 +{
  184 + mutex_lock(&devfreq->lock);
  185 + if (devfreq->stop_polling) {
  186 + mutex_unlock(&devfreq->lock);
  187 + return;
  188 + }
  189 +
  190 + devfreq->stop_polling = true;
  191 + mutex_unlock(&devfreq->lock);
  192 + cancel_delayed_work_sync(&devfreq->work);
  193 +}
  194 +
  195 +/**
  196 + * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
  197 + * @devfreq: the devfreq instance.
  198 + *
  199 + * Helper function to resume devfreq device load monitoing. Function
  200 + * to be called from governor in response to DEVFREQ_GOV_RESUME
  201 + * event or when polling interval is set to non-zero.
  202 + */
  203 +void devfreq_monitor_resume(struct devfreq *devfreq)
  204 +{
  205 + mutex_lock(&devfreq->lock);
  206 + if (!devfreq->stop_polling)
  207 + goto out;
  208 +
  209 + if (!delayed_work_pending(&devfreq->work) &&
  210 + devfreq->profile->polling_ms)
  211 + queue_delayed_work(devfreq_wq, &devfreq->work,
  212 + msecs_to_jiffies(devfreq->profile->polling_ms));
  213 + devfreq->stop_polling = false;
  214 +
  215 +out:
  216 + mutex_unlock(&devfreq->lock);
  217 +}
  218 +
  219 +/**
  220 + * devfreq_interval_update() - Update device devfreq monitoring interval
  221 + * @devfreq: the devfreq instance.
  222 + * @delay: new polling interval to be set.
  223 + *
  224 + * Helper function to set new load monitoring polling interval. Function
  225 + * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
  226 + */
  227 +void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
  228 +{
  229 + unsigned int cur_delay = devfreq->profile->polling_ms;
  230 + unsigned int new_delay = *delay;
  231 +
  232 + mutex_lock(&devfreq->lock);
  233 + devfreq->profile->polling_ms = new_delay;
  234 +
  235 + if (devfreq->stop_polling)
  236 + goto out;
  237 +
  238 + /* if new delay is zero, stop polling */
  239 + if (!new_delay) {
  240 + mutex_unlock(&devfreq->lock);
  241 + cancel_delayed_work_sync(&devfreq->work);
  242 + return;
  243 + }
  244 +
  245 + /* if current delay is zero, start polling with new delay */
  246 + if (!cur_delay) {
  247 + queue_delayed_work(devfreq_wq, &devfreq->work,
  248 + msecs_to_jiffies(devfreq->profile->polling_ms));
  249 + goto out;
  250 + }
  251 +
  252 + /* if current delay is greater than new delay, restart polling */
  253 + if (cur_delay > new_delay) {
  254 + mutex_unlock(&devfreq->lock);
  255 + cancel_delayed_work_sync(&devfreq->work);
  256 + mutex_lock(&devfreq->lock);
  257 + if (!devfreq->stop_polling)
  258 + queue_delayed_work(devfreq_wq, &devfreq->work,
  259 + msecs_to_jiffies(devfreq->profile->polling_ms));
  260 + }
  261 +out:
  262 + mutex_unlock(&devfreq->lock);
  263 +}
  264 +
  265 +/**
124 266 * devfreq_notifier_call() - Notify that the device frequency requirements
125 267 * has been changed out of devfreq framework.
126 268 * @nb the notifier_block (supposed to be devfreq->nb)
127 269  
128 270  
129 271  
130 272  
131 273  
132 274  
133 275  
134 276  
... ... @@ -143,59 +285,32 @@
143 285 }
144 286  
145 287 /**
146   - * _remove_devfreq() - Remove devfreq from the device.
  288 + * _remove_devfreq() - Remove devfreq from the list and release its resources.
147 289 * @devfreq: the devfreq struct
148 290 * @skip: skip calling device_unregister().
149   - *
150   - * Note that the caller should lock devfreq->lock before calling
151   - * this. _remove_devfreq() will unlock it and free devfreq
152   - * internally. devfreq_list_lock should be locked by the caller
153   - * as well (not relased at return)
154   - *
155   - * Lock usage:
156   - * devfreq->lock: locked before call.
157   - * unlocked at return (and freed)
158   - * devfreq_list_lock: locked before call.
159   - * kept locked at return.
160   - * if devfreq is centrally polled.
161   - *
162   - * Freed memory:
163   - * devfreq
164 291 */
165 292 static void _remove_devfreq(struct devfreq *devfreq, bool skip)
166 293 {
167   - if (!mutex_is_locked(&devfreq->lock)) {
168   - WARN(true, "devfreq->lock must be locked by the caller.\n");
  294 + mutex_lock(&devfreq_list_lock);
  295 + if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
  296 + mutex_unlock(&devfreq_list_lock);
  297 + dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
169 298 return;
170 299 }
171   - if (!devfreq->governor->no_central_polling &&
172   - !mutex_is_locked(&devfreq_list_lock)) {
173   - WARN(true, "devfreq_list_lock must be locked by the caller.\n");
174   - return;
175   - }
  300 + list_del(&devfreq->node);
  301 + mutex_unlock(&devfreq_list_lock);
176 302  
177   - if (devfreq->being_removed)
178   - return;
  303 + devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_STOP, NULL);
179 304  
180   - devfreq->being_removed = true;
181   -
182 305 if (devfreq->profile->exit)
183 306 devfreq->profile->exit(devfreq->dev.parent);
184 307  
185   - if (devfreq->governor->exit)
186   - devfreq->governor->exit(devfreq);
187   -
188 308 if (!skip && get_device(&devfreq->dev)) {
189 309 device_unregister(&devfreq->dev);
190 310 put_device(&devfreq->dev);
191 311 }
192 312  
193   - if (!devfreq->governor->no_central_polling)
194   - list_del(&devfreq->node);
195   -
196   - mutex_unlock(&devfreq->lock);
197 313 mutex_destroy(&devfreq->lock);
198   -
199 314 kfree(devfreq);
200 315 }
201 316  
202 317  
203 318  
204 319  
... ... @@ -210,133 +325,11 @@
210 325 static void devfreq_dev_release(struct device *dev)
211 326 {
212 327 struct devfreq *devfreq = to_devfreq(dev);
213   - bool central_polling = !devfreq->governor->no_central_polling;
214 328  
215   - /*
216   - * If devfreq_dev_release() was called by device_unregister() of
217   - * _remove_devfreq(), we cannot mutex_lock(&devfreq->lock) and
218   - * being_removed is already set. This also partially checks the case
219   - * where devfreq_dev_release() is called from a thread other than
220   - * the one called _remove_devfreq(); however, this case is
221   - * dealt completely with another following being_removed check.
222   - *
223   - * Because being_removed is never being
224   - * unset, we do not need to worry about race conditions on
225   - * being_removed.
226   - */
227   - if (devfreq->being_removed)
228   - return;
229   -
230   - if (central_polling)
231   - mutex_lock(&devfreq_list_lock);
232   -
233   - mutex_lock(&devfreq->lock);
234   -
235   - /*
236   - * Check being_removed flag again for the case where
237   - * devfreq_dev_release() was called in a thread other than the one
238   - * possibly called _remove_devfreq().
239   - */
240   - if (devfreq->being_removed) {
241   - mutex_unlock(&devfreq->lock);
242   - goto out;
243   - }
244   -
245   - /* devfreq->lock is unlocked and removed in _removed_devfreq() */
246 329 _remove_devfreq(devfreq, true);
247   -
248   -out:
249   - if (central_polling)
250   - mutex_unlock(&devfreq_list_lock);
251 330 }
252 331  
253 332 /**
254   - * devfreq_monitor() - Periodically poll devfreq objects.
255   - * @work: the work struct used to run devfreq_monitor periodically.
256   - *
257   - */
258   -static void devfreq_monitor(struct work_struct *work)
259   -{
260   - static unsigned long last_polled_at;
261   - struct devfreq *devfreq, *tmp;
262   - int error;
263   - unsigned long jiffies_passed;
264   - unsigned long next_jiffies = ULONG_MAX, now = jiffies;
265   - struct device *dev;
266   -
267   - /* Initially last_polled_at = 0, polling every device at bootup */
268   - jiffies_passed = now - last_polled_at;
269   - last_polled_at = now;
270   - if (jiffies_passed == 0)
271   - jiffies_passed = 1;
272   -
273   - mutex_lock(&devfreq_list_lock);
274   - list_for_each_entry_safe(devfreq, tmp, &devfreq_list, node) {
275   - mutex_lock(&devfreq->lock);
276   - dev = devfreq->dev.parent;
277   -
278   - /* Do not remove tmp for a while */
279   - wait_remove_device = tmp;
280   -
281   - if (devfreq->governor->no_central_polling ||
282   - devfreq->next_polling == 0) {
283   - mutex_unlock(&devfreq->lock);
284   - continue;
285   - }
286   - mutex_unlock(&devfreq_list_lock);
287   -
288   - /*
289   - * Reduce more next_polling if devfreq_wq took an extra
290   - * delay. (i.e., CPU has been idled.)
291   - */
292   - if (devfreq->next_polling <= jiffies_passed) {
293   - error = update_devfreq(devfreq);
294   -
295   - /* Remove a devfreq with an error. */
296   - if (error && error != -EAGAIN) {
297   -
298   - dev_err(dev, "Due to update_devfreq error(%d), devfreq(%s) is removed from the device\n",
299   - error, devfreq->governor->name);
300   -
301   - /*
302   - * Unlock devfreq before locking the list
303   - * in order to avoid deadlock with
304   - * find_device_devfreq or others
305   - */
306   - mutex_unlock(&devfreq->lock);
307   - mutex_lock(&devfreq_list_lock);
308   - /* Check if devfreq is already removed */
309   - if (IS_ERR(find_device_devfreq(dev)))
310   - continue;
311   - mutex_lock(&devfreq->lock);
312   - /* This unlocks devfreq->lock and free it */
313   - _remove_devfreq(devfreq, false);
314   - continue;
315   - }
316   - devfreq->next_polling = devfreq->polling_jiffies;
317   - } else {
318   - devfreq->next_polling -= jiffies_passed;
319   - }
320   -
321   - if (devfreq->next_polling)
322   - next_jiffies = (next_jiffies > devfreq->next_polling) ?
323   - devfreq->next_polling : next_jiffies;
324   -
325   - mutex_unlock(&devfreq->lock);
326   - mutex_lock(&devfreq_list_lock);
327   - }
328   - wait_remove_device = NULL;
329   - mutex_unlock(&devfreq_list_lock);
330   -
331   - if (next_jiffies > 0 && next_jiffies < ULONG_MAX) {
332   - polling = true;
333   - queue_delayed_work(devfreq_wq, &devfreq_work, next_jiffies);
334   - } else {
335   - polling = false;
336   - }
337   -}
338   -
339   -/**
340 333 * devfreq_add_device() - Add devfreq feature to the device
341 334 * @dev: the device to add devfreq feature.
342 335 * @profile: device-specific profile to run devfreq.
... ... @@ -357,16 +350,13 @@
357 350 return ERR_PTR(-EINVAL);
358 351 }
359 352  
360   -
361   - if (!governor->no_central_polling) {
362   - mutex_lock(&devfreq_list_lock);
363   - devfreq = find_device_devfreq(dev);
364   - mutex_unlock(&devfreq_list_lock);
365   - if (!IS_ERR(devfreq)) {
366   - dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
367   - err = -EINVAL;
368   - goto err_out;
369   - }
  353 + mutex_lock(&devfreq_list_lock);
  354 + devfreq = find_device_devfreq(dev);
  355 + mutex_unlock(&devfreq_list_lock);
  356 + if (!IS_ERR(devfreq)) {
  357 + dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
  358 + err = -EINVAL;
  359 + goto err_out;
370 360 }
371 361  
372 362 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
373 363  
374 364  
375 365  
376 366  
377 367  
378 368  
379 369  
380 370  
381 371  
382 372  
... ... @@ -386,48 +376,41 @@
386 376 devfreq->governor = governor;
387 377 devfreq->previous_freq = profile->initial_freq;
388 378 devfreq->data = data;
389   - devfreq->next_polling = devfreq->polling_jiffies
390   - = msecs_to_jiffies(devfreq->profile->polling_ms);
391 379 devfreq->nb.notifier_call = devfreq_notifier_call;
392 380  
393 381 dev_set_name(&devfreq->dev, dev_name(dev));
394 382 err = device_register(&devfreq->dev);
395 383 if (err) {
396 384 put_device(&devfreq->dev);
  385 + mutex_unlock(&devfreq->lock);
397 386 goto err_dev;
398 387 }
399 388  
400   - if (governor->init)
401   - err = governor->init(devfreq);
402   - if (err)
403   - goto err_init;
404   -
405 389 mutex_unlock(&devfreq->lock);
406 390  
407   - if (governor->no_central_polling)
408   - goto out;
409   -
410 391 mutex_lock(&devfreq_list_lock);
411   -
412 392 list_add(&devfreq->node, &devfreq_list);
  393 + mutex_unlock(&devfreq_list_lock);
413 394  
414   - if (devfreq_wq && devfreq->next_polling && !polling) {
415   - polling = true;
416   - queue_delayed_work(devfreq_wq, &devfreq_work,
417   - devfreq->next_polling);
  395 + err = devfreq->governor->event_handler(devfreq,
  396 + DEVFREQ_GOV_START, NULL);
  397 + if (err) {
  398 + dev_err(dev, "%s: Unable to start governor for the device\n",
  399 + __func__);
  400 + goto err_init;
418 401 }
419   - mutex_unlock(&devfreq_list_lock);
420   -out:
  402 +
421 403 return devfreq;
422 404  
423 405 err_init:
  406 + list_del(&devfreq->node);
424 407 device_unregister(&devfreq->dev);
425 408 err_dev:
426   - mutex_unlock(&devfreq->lock);
427 409 kfree(devfreq);
428 410 err_out:
429 411 return ERR_PTR(err);
430 412 }
  413 +EXPORT_SYMBOL(devfreq_add_device);
431 414  
432 415 /**
433 416 * devfreq_remove_device() - Remove devfreq feature from a device.
434 417  
435 418  
436 419  
... ... @@ -435,30 +418,14 @@
435 418 */
436 419 int devfreq_remove_device(struct devfreq *devfreq)
437 420 {
438   - bool central_polling;
439   -
440 421 if (!devfreq)
441 422 return -EINVAL;
442 423  
443   - central_polling = !devfreq->governor->no_central_polling;
  424 + _remove_devfreq(devfreq, false);
444 425  
445   - if (central_polling) {
446   - mutex_lock(&devfreq_list_lock);
447   - while (wait_remove_device == devfreq) {
448   - mutex_unlock(&devfreq_list_lock);
449   - schedule();
450   - mutex_lock(&devfreq_list_lock);
451   - }
452   - }
453   -
454   - mutex_lock(&devfreq->lock);
455   - _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */
456   -
457   - if (central_polling)
458   - mutex_unlock(&devfreq_list_lock);
459   -
460 426 return 0;
461 427 }
  428 +EXPORT_SYMBOL(devfreq_remove_device);
462 429  
463 430 static ssize_t show_governor(struct device *dev,
464 431 struct device_attribute *attr, char *buf)
465 432  
466 433  
... ... @@ -490,35 +457,13 @@
490 457 if (ret != 1)
491 458 goto out;
492 459  
493   - mutex_lock(&df->lock);
494   - df->profile->polling_ms = value;
495   - df->next_polling = df->polling_jiffies
496   - = msecs_to_jiffies(value);
497   - mutex_unlock(&df->lock);
498   -
  460 + df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
499 461 ret = count;
500 462  
501   - if (df->governor->no_central_polling)
502   - goto out;
503   -
504   - mutex_lock(&devfreq_list_lock);
505   - if (df->next_polling > 0 && !polling) {
506   - polling = true;
507   - queue_delayed_work(devfreq_wq, &devfreq_work,
508   - df->next_polling);
509   - }
510   - mutex_unlock(&devfreq_list_lock);
511 463 out:
512 464 return ret;
513 465 }
514 466  
515   -static ssize_t show_central_polling(struct device *dev,
516   - struct device_attribute *attr, char *buf)
517   -{
518   - return sprintf(buf, "%d\n",
519   - !to_devfreq(dev)->governor->no_central_polling);
520   -}
521   -
522 467 static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
523 468 const char *buf, size_t count)
524 469 {
... ... @@ -590,7 +535,6 @@
590 535 static struct device_attribute devfreq_attrs[] = {
591 536 __ATTR(governor, S_IRUGO, show_governor, NULL),
592 537 __ATTR(cur_freq, S_IRUGO, show_freq, NULL),
593   - __ATTR(central_polling, S_IRUGO, show_central_polling, NULL),
594 538 __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
595 539 store_polling_interval),
596 540 __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),
... ... @@ -598,23 +542,6 @@
598 542 { },
599 543 };
600 544  
601   -/**
602   - * devfreq_start_polling() - Initialize data structure for devfreq framework and
603   - * start polling registered devfreq devices.
604   - */
605   -static int __init devfreq_start_polling(void)
606   -{
607   - mutex_lock(&devfreq_list_lock);
608   - polling = false;
609   - devfreq_wq = create_freezable_workqueue("devfreq_wq");
610   - INIT_DEFERRABLE_WORK(&devfreq_work, devfreq_monitor);
611   - mutex_unlock(&devfreq_list_lock);
612   -
613   - devfreq_monitor(&devfreq_work.work);
614   - return 0;
615   -}
616   -late_initcall(devfreq_start_polling);
617   -
618 545 static int __init devfreq_init(void)
619 546 {
620 547 devfreq_class = class_create(THIS_MODULE, "devfreq");
621 548  
... ... @@ -622,7 +549,15 @@
622 549 pr_err("%s: couldn't create class\n", __FILE__);
623 550 return PTR_ERR(devfreq_class);
624 551 }
  552 +
  553 + devfreq_wq = create_freezable_workqueue("devfreq_wq");
  554 + if (IS_ERR(devfreq_wq)) {
  555 + class_destroy(devfreq_class);
  556 + pr_err("%s: couldn't create workqueue\n", __FILE__);
  557 + return PTR_ERR(devfreq_wq);
  558 + }
625 559 devfreq_class->dev_attrs = devfreq_attrs;
  560 +
626 561 return 0;
627 562 }
628 563 subsys_initcall(devfreq_init);
... ... @@ -630,6 +565,7 @@
630 565 static void __exit devfreq_exit(void)
631 566 {
632 567 class_destroy(devfreq_class);
  568 + destroy_workqueue(devfreq_wq);
633 569 }
634 570 module_exit(devfreq_exit);
635 571  
drivers/devfreq/governor.h
... ... @@ -18,8 +18,19 @@
18 18  
19 19 #define to_devfreq(DEV) container_of((DEV), struct devfreq, dev)
20 20  
  21 +/* Devfreq events */
  22 +#define DEVFREQ_GOV_START 0x1
  23 +#define DEVFREQ_GOV_STOP 0x2
  24 +#define DEVFREQ_GOV_INTERVAL 0x3
  25 +
21 26 /* Caution: devfreq->lock must be locked before calling update_devfreq */
22 27 extern int update_devfreq(struct devfreq *devfreq);
23 28  
  29 +extern void devfreq_monitor_start(struct devfreq *devfreq);
  30 +extern void devfreq_monitor_stop(struct devfreq *devfreq);
  31 +extern void devfreq_monitor_suspend(struct devfreq *devfreq);
  32 +extern void devfreq_monitor_resume(struct devfreq *devfreq);
  33 +extern void devfreq_interval_update(struct devfreq *devfreq,
  34 + unsigned int *delay);
24 35 #endif /* _GOVERNOR_H */
drivers/devfreq/governor_performance.c
... ... @@ -26,15 +26,23 @@
26 26 return 0;
27 27 }
28 28  
29   -static int performance_init(struct devfreq *devfreq)
  29 +static int devfreq_performance_handler(struct devfreq *devfreq,
  30 + unsigned int event, void *data)
30 31 {
31   - return update_devfreq(devfreq);
  32 + int ret = 0;
  33 +
  34 + if (event == DEVFREQ_GOV_START) {
  35 + mutex_lock(&devfreq->lock);
  36 + ret = update_devfreq(devfreq);
  37 + mutex_unlock(&devfreq->lock);
  38 + }
  39 +
  40 + return ret;
32 41 }
33 42  
34 43 const struct devfreq_governor devfreq_performance = {
35 44 .name = "performance",
36   - .init = performance_init,
37 45 .get_target_freq = devfreq_performance_func,
38   - .no_central_polling = true,
  46 + .event_handler = devfreq_performance_handler,
39 47 };
drivers/devfreq/governor_powersave.c
... ... @@ -23,15 +23,23 @@
23 23 return 0;
24 24 }
25 25  
26   -static int powersave_init(struct devfreq *devfreq)
  26 +static int devfreq_powersave_handler(struct devfreq *devfreq,
  27 + unsigned int event, void *data)
27 28 {
28   - return update_devfreq(devfreq);
  29 + int ret = 0;
  30 +
  31 + if (event == DEVFREQ_GOV_START) {
  32 + mutex_lock(&devfreq->lock);
  33 + ret = update_devfreq(devfreq);
  34 + mutex_unlock(&devfreq->lock);
  35 + }
  36 +
  37 + return ret;
29 38 }
30 39  
31 40 const struct devfreq_governor devfreq_powersave = {
32 41 .name = "powersave",
33   - .init = powersave_init,
34 42 .get_target_freq = devfreq_powersave_func,
35   - .no_central_polling = true,
  43 + .event_handler = devfreq_powersave_handler,
36 44 };
drivers/devfreq/governor_simpleondemand.c
... ... @@ -12,6 +12,7 @@
12 12 #include <linux/errno.h>
13 13 #include <linux/devfreq.h>
14 14 #include <linux/math64.h>
  15 +#include "governor.h"
15 16  
16 17 /* Default constants for DevFreq-Simple-Ondemand (DFSO) */
17 18 #define DFSO_UPTHRESHOLD (90)
18 19  
... ... @@ -88,8 +89,31 @@
88 89 return 0;
89 90 }
90 91  
  92 +static int devfreq_simple_ondemand_handler(struct devfreq *devfreq,
  93 + unsigned int event, void *data)
  94 +{
  95 + switch (event) {
  96 + case DEVFREQ_GOV_START:
  97 + devfreq_monitor_start(devfreq);
  98 + break;
  99 +
  100 + case DEVFREQ_GOV_STOP:
  101 + devfreq_monitor_stop(devfreq);
  102 + break;
  103 +
  104 + case DEVFREQ_GOV_INTERVAL:
  105 + devfreq_interval_update(devfreq, (unsigned int *)data);
  106 + break;
  107 + default:
  108 + break;
  109 + }
  110 +
  111 + return 0;
  112 +}
  113 +
91 114 const struct devfreq_governor devfreq_simple_ondemand = {
92 115 .name = "simple_ondemand",
93 116 .get_target_freq = devfreq_simple_ondemand_func,
  117 + .event_handler = devfreq_simple_ondemand_handler,
94 118 };
drivers/devfreq/governor_userspace.c
... ... @@ -116,11 +116,28 @@
116 116 devfreq->data = NULL;
117 117 }
118 118  
  119 +static int devfreq_userspace_handler(struct devfreq *devfreq,
  120 + unsigned int event, void *data)
  121 +{
  122 + int ret = 0;
  123 +
  124 + switch (event) {
  125 + case DEVFREQ_GOV_START:
  126 + ret = userspace_init(devfreq);
  127 + break;
  128 + case DEVFREQ_GOV_STOP:
  129 + userspace_exit(devfreq);
  130 + break;
  131 + default:
  132 + break;
  133 + }
  134 +
  135 + return ret;
  136 +}
  137 +
119 138 const struct devfreq_governor devfreq_userspace = {
120 139 .name = "userspace",
121 140 .get_target_freq = devfreq_userspace_func,
122   - .init = userspace_init,
123   - .exit = userspace_exit,
124   - .no_central_polling = true,
  141 + .event_handler = devfreq_userspace_handler,
125 142 };
include/linux/devfreq.h
... ... @@ -91,25 +91,18 @@
91 91 * status of the device (load = busy_time / total_time).
92 92 * If no_central_polling is set, this callback is called
93 93 * only with update_devfreq() notified by OPP.
94   - * @init Called when the devfreq is being attached to a device
95   - * @exit Called when the devfreq is being removed from a
96   - * device. Governor should stop any internal routines
97   - * before return because related data may be
98   - * freed after exit().
99   - * @no_central_polling Do not use devfreq's central polling mechanism.
100   - * When this is set, devfreq will not call
101   - * get_target_freq with devfreq_monitor(). However,
102   - * devfreq will call get_target_freq with
103   - * devfreq_update() notified by OPP framework.
  94 + * @event_handler Callback for devfreq core framework to notify events
  95 + * to governors. Events include per device governor
  96 + * init and exit, opp changes out of devfreq, suspend
  97 + * and resume of per device devfreq during device idle.
104 98 *
105 99 * Note that the callbacks are called with devfreq->lock locked by devfreq.
106 100 */
107 101 struct devfreq_governor {
108 102 const char name[DEVFREQ_NAME_LEN];
109 103 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
110   - int (*init)(struct devfreq *this);
111   - void (*exit)(struct devfreq *this);
112   - const bool no_central_polling;
  104 + int (*event_handler)(struct devfreq *devfreq,
  105 + unsigned int event, void *data);
113 106 };
114 107  
115 108 /**
116 109  
117 110  
118 111  
... ... @@ -124,18 +117,13 @@
124 117 * @nb notifier block used to notify devfreq object that it should
125 118 * reevaluate operable frequencies. Devfreq users may use
126 119 * devfreq.nb to the corresponding register notifier call chain.
127   - * @polling_jiffies interval in jiffies.
  120 + * @work delayed work for load monitoring.
128 121 * @previous_freq previously configured frequency value.
129   - * @next_polling the number of remaining jiffies to poll with
130   - * "devfreq_monitor" executions to reevaluate
131   - * frequency/voltage of the device. Set by
132   - * profile's polling_ms interval.
133 122 * @data Private data of the governor. The devfreq framework does not
134 123 * touch this.
135   - * @being_removed a flag to mark that this object is being removed in
136   - * order to prevent trying to remove the object multiple times.
137 124 * @min_freq Limit minimum frequency requested by user (0: none)
138 125 * @max_freq Limit maximum frequency requested by user (0: none)
  126 + * @stop_polling devfreq polling status of a device.
139 127 *
140 128 * This structure stores the devfreq information for a give device.
141 129 *
142 130  
143 131  
144 132  
145 133  
... ... @@ -153,17 +141,15 @@
153 141 struct devfreq_dev_profile *profile;
154 142 const struct devfreq_governor *governor;
155 143 struct notifier_block nb;
  144 + struct delayed_work work;
156 145  
157   - unsigned long polling_jiffies;
158 146 unsigned long previous_freq;
159   - unsigned int next_polling;
160 147  
161 148 void *data; /* private data for governors */
162 149  
163   - bool being_removed;
164   -
165 150 unsigned long min_freq;
166 151 unsigned long max_freq;
  152 + bool stop_polling;
167 153 };
168 154  
169 155 #if defined(CONFIG_PM_DEVFREQ)