Commit c89b148fd3a8d6c2ea5e7c1c212716baee836af1

Authored by Linus Torvalds

Merge tag 'pm+acpi-3.9-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull ACPI and power management fixes from Rafael J Wysocki:

 - Two fixes for the new intel_pstate driver from Dirk Brandewie.

 - Fix for incorrect usage of the .find_bridge() callback from struct
   acpi_bus_type in the USB core and subsequent removal of that callback
   from Rafael J Wysocki.

 - ACPI processor driver cleanups from Chen Gang and Syam Sidhardhan.

 - ACPI initialization and error messages fix from Joe Perches.

 - Operating Performance Points documentation improvement from Nishanth
   Menon.

 - Fixes for memory leaks and potential concurrency issues and sysfs
  attributes leaks during device removal in the core device PM QoS code
  from Rafael J Wysocki.

 - Calxeda Highbank cpufreq driver simplification from Emilio López.

 - cpufreq comment cleanup from Namhyung Kim.

 - Fix for a section mismatch in Calxeda Highbank interprocessor
   communication code from Mark Langsdorf (this is not a PM fix strictly
   speaking, but the code in question went in through the PM tree).

* tag 'pm+acpi-3.9-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  cpufreq / intel_pstate: Do not load on VM that does not report max P state.
  cpufreq / intel_pstate: Fix intel_pstate_init() error path
  ACPI / glue: Drop .find_bridge() callback from struct acpi_bus_type
  ACPI / glue: Add .match() callback to struct acpi_bus_type
  ACPI / porocessor: Beautify code, pr->id is u32 which is never < 0
  ACPI / processor: Remove redundant NULL check before kfree
  ACPI / Sleep: Avoid interleaved message on errors
  PM / QoS: Remove device PM QoS sysfs attributes at the right place
  PM / QoS: Fix concurrency issues and memory leaks in device PM QoS
  cpufreq: highbank: do not initialize array with a loop
  PM / OPP: improve introductory documentation
  cpufreq: Fix a typo in comment
  mailbox, pl320-ipc: remove __init from probe function

Showing 19 changed files Side-by-side Diff

Documentation/power/opp.txt
1   -*=============*
2   -* OPP Library *
3   -*=============*
  1 +Operating Performance Points (OPP) Library
  2 +==========================================
4 3  
5 4 (C) 2009-2010 Nishanth Menon <nm@ti.com>, Texas Instruments Incorporated
6 5  
7 6  
8 7  
... ... @@ -16,14 +15,30 @@
16 15  
17 16 1. Introduction
18 17 ===============
  18 +1.1 What is an Operating Performance Point (OPP)?
  19 +
19 20 Complex SoCs of today consists of a multiple sub-modules working in conjunction.
20 21 In an operational system executing varied use cases, not all modules in the SoC
21 22 need to function at their highest performing frequency all the time. To
22 23 facilitate this, sub-modules in a SoC are grouped into domains, allowing some
23   -domains to run at lower voltage and frequency while other domains are loaded
24   -more. The set of discrete tuples consisting of frequency and voltage pairs that
  24 +domains to run at lower voltage and frequency while other domains run at
  25 +voltage/frequency pairs that are higher.
  26 +
  27 +The set of discrete tuples consisting of frequency and voltage pairs that
25 28 the device will support per domain are called Operating Performance Points or
26 29 OPPs.
  30 +
  31 +As an example:
  32 +Let us consider an MPU device which supports the following:
  33 +{300MHz at minimum voltage of 1V}, {800MHz at minimum voltage of 1.2V},
  34 +{1GHz at minimum voltage of 1.3V}
  35 +
  36 +We can represent these as three OPPs as the following {Hz, uV} tuples:
  37 +{300000000, 1000000}
  38 +{800000000, 1200000}
  39 +{1000000000, 1300000}
  40 +
  41 +1.2 Operating Performance Points Library
27 42  
28 43 OPP library provides a set of helper functions to organize and query the OPP
29 44 information. The library is located in drivers/base/power/opp.c and the header
... ... @@ -36,12 +36,11 @@
36 36 {
37 37 if (acpi_disabled)
38 38 return -ENODEV;
39   - if (type && type->bus && type->find_device) {
  39 + if (type && type->match && type->find_device) {
40 40 down_write(&bus_type_sem);
41 41 list_add_tail(&type->list, &bus_type_list);
42 42 up_write(&bus_type_sem);
43   - printk(KERN_INFO PREFIX "bus type %s registered\n",
44   - type->bus->name);
  43 + printk(KERN_INFO PREFIX "bus type %s registered\n", type->name);
45 44 return 0;
46 45 }
47 46 return -ENODEV;
48 47  
49 48  
50 49  
... ... @@ -56,24 +55,21 @@
56 55 down_write(&bus_type_sem);
57 56 list_del_init(&type->list);
58 57 up_write(&bus_type_sem);
59   - printk(KERN_INFO PREFIX "ACPI bus type %s unregistered\n",
60   - type->bus->name);
  58 + printk(KERN_INFO PREFIX "bus type %s unregistered\n",
  59 + type->name);
61 60 return 0;
62 61 }
63 62 return -ENODEV;
64 63 }
65 64 EXPORT_SYMBOL_GPL(unregister_acpi_bus_type);
66 65  
67   -static struct acpi_bus_type *acpi_get_bus_type(struct bus_type *type)
  66 +static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
68 67 {
69 68 struct acpi_bus_type *tmp, *ret = NULL;
70 69  
71   - if (!type)
72   - return NULL;
73   -
74 70 down_read(&bus_type_sem);
75 71 list_for_each_entry(tmp, &bus_type_list, list) {
76   - if (tmp->bus == type) {
  72 + if (tmp->match(dev)) {
77 73 ret = tmp;
78 74 break;
79 75 }
... ... @@ -82,22 +78,6 @@
82 78 return ret;
83 79 }
84 80  
85   -static int acpi_find_bridge_device(struct device *dev, acpi_handle * handle)
86   -{
87   - struct acpi_bus_type *tmp;
88   - int ret = -ENODEV;
89   -
90   - down_read(&bus_type_sem);
91   - list_for_each_entry(tmp, &bus_type_list, list) {
92   - if (tmp->find_bridge && !tmp->find_bridge(dev, handle)) {
93   - ret = 0;
94   - break;
95   - }
96   - }
97   - up_read(&bus_type_sem);
98   - return ret;
99   -}
100   -
101 81 static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
102 82 void *addr_p, void **ret_p)
103 83 {
104 84  
... ... @@ -261,29 +241,12 @@
261 241  
262 242 static int acpi_platform_notify(struct device *dev)
263 243 {
264   - struct acpi_bus_type *type;
  244 + struct acpi_bus_type *type = acpi_get_bus_type(dev);
265 245 acpi_handle handle;
266 246 int ret;
267 247  
268 248 ret = acpi_bind_one(dev, NULL);
269   - if (ret && (!dev->bus || !dev->parent)) {
270   - /* bridge devices genernally haven't bus or parent */
271   - ret = acpi_find_bridge_device(dev, &handle);
272   - if (!ret) {
273   - ret = acpi_bind_one(dev, handle);
274   - if (ret)
275   - goto out;
276   - }
277   - }
278   -
279   - type = acpi_get_bus_type(dev->bus);
280   - if (ret) {
281   - if (!type || !type->find_device) {
282   - DBG("No ACPI bus support for %s\n", dev_name(dev));
283   - ret = -EINVAL;
284   - goto out;
285   - }
286   -
  249 + if (ret && type) {
287 250 ret = type->find_device(dev, &handle);
288 251 if (ret) {
289 252 DBG("Unable to get handle for %s\n", dev_name(dev));
... ... @@ -316,7 +279,7 @@
316 279 {
317 280 struct acpi_bus_type *type;
318 281  
319   - type = acpi_get_bus_type(dev->bus);
  282 + type = acpi_get_bus_type(dev);
320 283 if (type && type->cleanup)
321 284 type->cleanup(dev);
322 285  
drivers/acpi/processor_core.c
... ... @@ -158,8 +158,7 @@
158 158 }
159 159  
160 160 exit:
161   - if (buffer.pointer)
162   - kfree(buffer.pointer);
  161 + kfree(buffer.pointer);
163 162 return apic_id;
164 163 }
165 164  
drivers/acpi/processor_driver.c
... ... @@ -559,7 +559,7 @@
559 559 return 0;
560 560 #endif
561 561  
562   - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
  562 + BUG_ON(pr->id >= nr_cpu_ids);
563 563  
564 564 /*
565 565 * Buggy BIOS check
drivers/acpi/sleep.c
... ... @@ -599,7 +599,6 @@
599 599 status = acpi_get_sleep_type_data(i, &type_a, &type_b);
600 600 if (ACPI_SUCCESS(status)) {
601 601 sleep_states[i] = 1;
602   - pr_cont(" S%d", i);
603 602 }
604 603 }
605 604  
... ... @@ -742,7 +741,6 @@
742 741 hibernation_set_ops(old_suspend_ordering ?
743 742 &acpi_hibernation_ops_old : &acpi_hibernation_ops);
744 743 sleep_states[ACPI_STATE_S4] = 1;
745   - pr_cont(KERN_CONT " S4");
746 744 if (nosigcheck)
747 745 return;
748 746  
... ... @@ -788,6 +786,9 @@
788 786 {
789 787 acpi_status status;
790 788 u8 type_a, type_b;
  789 + char supported[ACPI_S_STATE_COUNT * 3 + 1];
  790 + char *pos = supported;
  791 + int i;
791 792  
792 793 if (acpi_disabled)
793 794 return 0;
... ... @@ -795,7 +796,6 @@
795 796 acpi_sleep_dmi_check();
796 797  
797 798 sleep_states[ACPI_STATE_S0] = 1;
798   - pr_info(PREFIX "(supports S0");
799 799  
800 800 acpi_sleep_suspend_setup();
801 801 acpi_sleep_hibernate_setup();
802 802  
... ... @@ -803,11 +803,17 @@
803 803 status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
804 804 if (ACPI_SUCCESS(status)) {
805 805 sleep_states[ACPI_STATE_S5] = 1;
806   - pr_cont(" S5");
807 806 pm_power_off_prepare = acpi_power_off_prepare;
808 807 pm_power_off = acpi_power_off;
809 808 }
810   - pr_cont(")\n");
  809 +
  810 + supported[0] = 0;
  811 + for (i = 0; i < ACPI_S_STATE_COUNT; i++) {
  812 + if (sleep_states[i])
  813 + pos += sprintf(pos, " S%d", i);
  814 + }
  815 + pr_info(PREFIX "(supports%s)\n", supported);
  816 +
811 817 /*
812 818 * Register the tts_notifier to reboot notifier list so that the _TTS
813 819 * object can also be evaluated when the system enters S5.
drivers/ata/libata-acpi.c
... ... @@ -1144,13 +1144,8 @@
1144 1144 return -ENODEV;
1145 1145 }
1146 1146  
1147   -static int ata_acpi_find_dummy(struct device *dev, acpi_handle *handle)
1148   -{
1149   - return -ENODEV;
1150   -}
1151   -
1152 1147 static struct acpi_bus_type ata_acpi_bus = {
1153   - .find_bridge = ata_acpi_find_dummy,
  1148 + .name = "ATA",
1154 1149 .find_device = ata_acpi_find_device,
1155 1150 };
1156 1151  
drivers/base/power/main.c
... ... @@ -99,7 +99,6 @@
99 99 dev_warn(dev, "parent %s should not be sleeping\n",
100 100 dev_name(dev->parent));
101 101 list_add_tail(&dev->power.entry, &dpm_list);
102   - dev_pm_qos_constraints_init(dev);
103 102 mutex_unlock(&dpm_list_mtx);
104 103 }
105 104  
... ... @@ -113,7 +112,6 @@
113 112 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
114 113 complete_all(&dev->power.completion);
115 114 mutex_lock(&dpm_list_mtx);
116   - dev_pm_qos_constraints_destroy(dev);
117 115 list_del_init(&dev->power.entry);
118 116 mutex_unlock(&dpm_list_mtx);
119 117 device_wakeup_disable(dev);
drivers/base/power/power.h
... ... @@ -4,7 +4,7 @@
4 4 {
5 5 if (!dev->power.early_init) {
6 6 spin_lock_init(&dev->power.lock);
7   - dev->power.power_state = PMSG_INVALID;
  7 + dev->power.qos = NULL;
8 8 dev->power.early_init = true;
9 9 }
10 10 }
11 11  
... ... @@ -56,14 +56,10 @@
56 56  
57 57 static inline void device_pm_sleep_init(struct device *dev) {}
58 58  
59   -static inline void device_pm_add(struct device *dev)
60   -{
61   - dev_pm_qos_constraints_init(dev);
62   -}
  59 +static inline void device_pm_add(struct device *dev) {}
63 60  
64 61 static inline void device_pm_remove(struct device *dev)
65 62 {
66   - dev_pm_qos_constraints_destroy(dev);
67 63 pm_runtime_remove(dev);
68 64 }
69 65  
drivers/base/power/qos.c
... ... @@ -41,6 +41,7 @@
41 41 #include <linux/mutex.h>
42 42 #include <linux/export.h>
43 43 #include <linux/pm_runtime.h>
  44 +#include <linux/err.h>
44 45  
45 46 #include "power.h"
46 47  
... ... @@ -61,7 +62,7 @@
61 62 struct pm_qos_flags *pqf;
62 63 s32 val;
63 64  
64   - if (!qos)
  65 + if (IS_ERR_OR_NULL(qos))
65 66 return PM_QOS_FLAGS_UNDEFINED;
66 67  
67 68 pqf = &qos->flags;
... ... @@ -101,7 +102,8 @@
101 102 */
102 103 s32 __dev_pm_qos_read_value(struct device *dev)
103 104 {
104   - return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0;
  105 + return IS_ERR_OR_NULL(dev->power.qos) ?
  106 + 0 : pm_qos_read_value(&dev->power.qos->latency);
105 107 }
106 108  
107 109 /**
... ... @@ -198,20 +200,8 @@
198 200 return 0;
199 201 }
200 202  
201   -/**
202   - * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer.
203   - * @dev: target device
204   - *
205   - * Called from the device PM subsystem during device insertion under
206   - * device_pm_lock().
207   - */
208   -void dev_pm_qos_constraints_init(struct device *dev)
209   -{
210   - mutex_lock(&dev_pm_qos_mtx);
211   - dev->power.qos = NULL;
212   - dev->power.power_state = PMSG_ON;
213   - mutex_unlock(&dev_pm_qos_mtx);
214   -}
  203 +static void __dev_pm_qos_hide_latency_limit(struct device *dev);
  204 +static void __dev_pm_qos_hide_flags(struct device *dev);
215 205  
216 206 /**
217 207 * dev_pm_qos_constraints_destroy
218 208  
219 209  
... ... @@ -226,16 +216,15 @@
226 216 struct pm_qos_constraints *c;
227 217 struct pm_qos_flags *f;
228 218  
  219 + mutex_lock(&dev_pm_qos_mtx);
  220 +
229 221 /*
230 222 * If the device's PM QoS resume latency limit or PM QoS flags have been
231 223 * exposed to user space, they have to be hidden at this point.
232 224 */
233   - dev_pm_qos_hide_latency_limit(dev);
234   - dev_pm_qos_hide_flags(dev);
  225 + __dev_pm_qos_hide_latency_limit(dev);
  226 + __dev_pm_qos_hide_flags(dev);
235 227  
236   - mutex_lock(&dev_pm_qos_mtx);
237   -
238   - dev->power.power_state = PMSG_INVALID;
239 228 qos = dev->power.qos;
240 229 if (!qos)
241 230 goto out;
... ... @@ -257,7 +246,7 @@
257 246 }
258 247  
259 248 spin_lock_irq(&dev->power.lock);
260   - dev->power.qos = NULL;
  249 + dev->power.qos = ERR_PTR(-ENODEV);
261 250 spin_unlock_irq(&dev->power.lock);
262 251  
263 252 kfree(c->notifiers);
264 253  
265 254  
266 255  
... ... @@ -301,32 +290,19 @@
301 290 "%s() called for already added request\n", __func__))
302 291 return -EINVAL;
303 292  
304   - req->dev = dev;
305   -
306 293 mutex_lock(&dev_pm_qos_mtx);
307 294  
308   - if (!dev->power.qos) {
309   - if (dev->power.power_state.event == PM_EVENT_INVALID) {
310   - /* The device has been removed from the system. */
311   - req->dev = NULL;
312   - ret = -ENODEV;
313   - goto out;
314   - } else {
315   - /*
316   - * Allocate the constraints data on the first call to
317   - * add_request, i.e. only if the data is not already
318   - * allocated and if the device has not been removed.
319   - */
320   - ret = dev_pm_qos_constraints_allocate(dev);
321   - }
322   - }
  295 + if (IS_ERR(dev->power.qos))
  296 + ret = -ENODEV;
  297 + else if (!dev->power.qos)
  298 + ret = dev_pm_qos_constraints_allocate(dev);
323 299  
324 300 if (!ret) {
  301 + req->dev = dev;
325 302 req->type = type;
326 303 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
327 304 }
328 305  
329   - out:
330 306 mutex_unlock(&dev_pm_qos_mtx);
331 307  
332 308 return ret;
... ... @@ -344,7 +320,14 @@
344 320 s32 curr_value;
345 321 int ret = 0;
346 322  
347   - if (!req->dev->power.qos)
  323 + if (!req) /*guard against callers passing in null */
  324 + return -EINVAL;
  325 +
  326 + if (WARN(!dev_pm_qos_request_active(req),
  327 + "%s() called for unknown object\n", __func__))
  328 + return -EINVAL;
  329 +
  330 + if (IS_ERR_OR_NULL(req->dev->power.qos))
348 331 return -ENODEV;
349 332  
350 333 switch(req->type) {
... ... @@ -386,6 +369,17 @@
386 369 {
387 370 int ret;
388 371  
  372 + mutex_lock(&dev_pm_qos_mtx);
  373 + ret = __dev_pm_qos_update_request(req, new_value);
  374 + mutex_unlock(&dev_pm_qos_mtx);
  375 + return ret;
  376 +}
  377 +EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
  378 +
  379 +static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
  380 +{
  381 + int ret;
  382 +
389 383 if (!req) /*guard against callers passing in null */
390 384 return -EINVAL;
391 385  
392 386  
393 387  
... ... @@ -393,13 +387,13 @@
393 387 "%s() called for unknown object\n", __func__))
394 388 return -EINVAL;
395 389  
396   - mutex_lock(&dev_pm_qos_mtx);
397   - ret = __dev_pm_qos_update_request(req, new_value);
398   - mutex_unlock(&dev_pm_qos_mtx);
  390 + if (IS_ERR_OR_NULL(req->dev->power.qos))
  391 + return -ENODEV;
399 392  
  393 + ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
  394 + memset(req, 0, sizeof(*req));
400 395 return ret;
401 396 }
402   -EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
403 397  
404 398 /**
405 399 * dev_pm_qos_remove_request - modifies an existing qos request
406 400  
407 401  
... ... @@ -418,26 +412,10 @@
418 412 */
419 413 int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
420 414 {
421   - int ret = 0;
  415 + int ret;
422 416  
423   - if (!req) /*guard against callers passing in null */
424   - return -EINVAL;
425   -
426   - if (WARN(!dev_pm_qos_request_active(req),
427   - "%s() called for unknown object\n", __func__))
428   - return -EINVAL;
429   -
430 417 mutex_lock(&dev_pm_qos_mtx);
431   -
432   - if (req->dev->power.qos) {
433   - ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
434   - PM_QOS_DEFAULT_VALUE);
435   - memset(req, 0, sizeof(*req));
436   - } else {
437   - /* Return if the device has been removed */
438   - ret = -ENODEV;
439   - }
440   -
  418 + ret = __dev_pm_qos_remove_request(req);
441 419 mutex_unlock(&dev_pm_qos_mtx);
442 420 return ret;
443 421 }
... ... @@ -462,9 +440,10 @@
462 440  
463 441 mutex_lock(&dev_pm_qos_mtx);
464 442  
465   - if (!dev->power.qos)
466   - ret = dev->power.power_state.event != PM_EVENT_INVALID ?
467   - dev_pm_qos_constraints_allocate(dev) : -ENODEV;
  443 + if (IS_ERR(dev->power.qos))
  444 + ret = -ENODEV;
  445 + else if (!dev->power.qos)
  446 + ret = dev_pm_qos_constraints_allocate(dev);
468 447  
469 448 if (!ret)
470 449 ret = blocking_notifier_chain_register(
... ... @@ -493,7 +472,7 @@
493 472 mutex_lock(&dev_pm_qos_mtx);
494 473  
495 474 /* Silently return if the constraints object is not present. */
496   - if (dev->power.qos)
  475 + if (!IS_ERR_OR_NULL(dev->power.qos))
497 476 retval = blocking_notifier_chain_unregister(
498 477 dev->power.qos->latency.notifiers,
499 478 notifier);
500 479  
501 480  
502 481  
... ... @@ -563,16 +542,20 @@
563 542 static void __dev_pm_qos_drop_user_request(struct device *dev,
564 543 enum dev_pm_qos_req_type type)
565 544 {
  545 + struct dev_pm_qos_request *req = NULL;
  546 +
566 547 switch(type) {
567 548 case DEV_PM_QOS_LATENCY:
568   - dev_pm_qos_remove_request(dev->power.qos->latency_req);
  549 + req = dev->power.qos->latency_req;
569 550 dev->power.qos->latency_req = NULL;
570 551 break;
571 552 case DEV_PM_QOS_FLAGS:
572   - dev_pm_qos_remove_request(dev->power.qos->flags_req);
  553 + req = dev->power.qos->flags_req;
573 554 dev->power.qos->flags_req = NULL;
574 555 break;
575 556 }
  557 + __dev_pm_qos_remove_request(req);
  558 + kfree(req);
576 559 }
577 560  
578 561 /**
579 562  
580 563  
581 564  
582 565  
583 566  
584 567  
... ... @@ -588,36 +571,57 @@
588 571 if (!device_is_registered(dev) || value < 0)
589 572 return -EINVAL;
590 573  
591   - if (dev->power.qos && dev->power.qos->latency_req)
592   - return -EEXIST;
593   -
594 574 req = kzalloc(sizeof(*req), GFP_KERNEL);
595 575 if (!req)
596 576 return -ENOMEM;
597 577  
598 578 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
599   - if (ret < 0)
  579 + if (ret < 0) {
  580 + kfree(req);
600 581 return ret;
  582 + }
601 583  
  584 + mutex_lock(&dev_pm_qos_mtx);
  585 +
  586 + if (IS_ERR_OR_NULL(dev->power.qos))
  587 + ret = -ENODEV;
  588 + else if (dev->power.qos->latency_req)
  589 + ret = -EEXIST;
  590 +
  591 + if (ret < 0) {
  592 + __dev_pm_qos_remove_request(req);
  593 + kfree(req);
  594 + goto out;
  595 + }
  596 +
602 597 dev->power.qos->latency_req = req;
603 598 ret = pm_qos_sysfs_add_latency(dev);
604 599 if (ret)
605 600 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
606 601  
  602 + out:
  603 + mutex_unlock(&dev_pm_qos_mtx);
607 604 return ret;
608 605 }
609 606 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
610 607  
  608 +static void __dev_pm_qos_hide_latency_limit(struct device *dev)
  609 +{
  610 + if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
  611 + pm_qos_sysfs_remove_latency(dev);
  612 + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
  613 + }
  614 +}
  615 +
611 616 /**
612 617 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
613 618 * @dev: Device whose PM QoS latency limit is to be hidden from user space.
614 619 */
615 620 void dev_pm_qos_hide_latency_limit(struct device *dev)
616 621 {
617   - if (dev->power.qos && dev->power.qos->latency_req) {
618   - pm_qos_sysfs_remove_latency(dev);
619   - __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
620   - }
  622 + mutex_lock(&dev_pm_qos_mtx);
  623 + __dev_pm_qos_hide_latency_limit(dev);
  624 + mutex_unlock(&dev_pm_qos_mtx);
621 625 }
622 626 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
623 627  
624 628  
625 629  
626 630  
627 631  
628 632  
629 633  
... ... @@ -634,41 +638,61 @@
634 638 if (!device_is_registered(dev))
635 639 return -EINVAL;
636 640  
637   - if (dev->power.qos && dev->power.qos->flags_req)
638   - return -EEXIST;
639   -
640 641 req = kzalloc(sizeof(*req), GFP_KERNEL);
641 642 if (!req)
642 643 return -ENOMEM;
643 644  
644   - pm_runtime_get_sync(dev);
645 645 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
646   - if (ret < 0)
647   - goto fail;
  646 + if (ret < 0) {
  647 + kfree(req);
  648 + return ret;
  649 + }
648 650  
  651 + pm_runtime_get_sync(dev);
  652 + mutex_lock(&dev_pm_qos_mtx);
  653 +
  654 + if (IS_ERR_OR_NULL(dev->power.qos))
  655 + ret = -ENODEV;
  656 + else if (dev->power.qos->flags_req)
  657 + ret = -EEXIST;
  658 +
  659 + if (ret < 0) {
  660 + __dev_pm_qos_remove_request(req);
  661 + kfree(req);
  662 + goto out;
  663 + }
  664 +
649 665 dev->power.qos->flags_req = req;
650 666 ret = pm_qos_sysfs_add_flags(dev);
651 667 if (ret)
652 668 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
653 669  
654   -fail:
  670 + out:
  671 + mutex_unlock(&dev_pm_qos_mtx);
655 672 pm_runtime_put(dev);
656 673 return ret;
657 674 }
658 675 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
659 676  
  677 +static void __dev_pm_qos_hide_flags(struct device *dev)
  678 +{
  679 + if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
  680 + pm_qos_sysfs_remove_flags(dev);
  681 + __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
  682 + }
  683 +}
  684 +
660 685 /**
661 686 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
662 687 * @dev: Device whose PM QoS flags are to be hidden from user space.
663 688 */
664 689 void dev_pm_qos_hide_flags(struct device *dev)
665 690 {
666   - if (dev->power.qos && dev->power.qos->flags_req) {
667   - pm_qos_sysfs_remove_flags(dev);
668   - pm_runtime_get_sync(dev);
669   - __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
670   - pm_runtime_put(dev);
671   - }
  691 + pm_runtime_get_sync(dev);
  692 + mutex_lock(&dev_pm_qos_mtx);
  693 + __dev_pm_qos_hide_flags(dev);
  694 + mutex_unlock(&dev_pm_qos_mtx);
  695 + pm_runtime_put(dev);
672 696 }
673 697 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
674 698  
675 699  
... ... @@ -683,12 +707,14 @@
683 707 s32 value;
684 708 int ret;
685 709  
686   - if (!dev->power.qos || !dev->power.qos->flags_req)
687   - return -EINVAL;
688   -
689 710 pm_runtime_get_sync(dev);
690 711 mutex_lock(&dev_pm_qos_mtx);
691 712  
  713 + if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
  714 + ret = -EINVAL;
  715 + goto out;
  716 + }
  717 +
692 718 value = dev_pm_qos_requested_flags(dev);
693 719 if (set)
694 720 value |= mask;
695 721  
696 722  
... ... @@ -697,10 +723,13 @@
697 723  
698 724 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
699 725  
  726 + out:
700 727 mutex_unlock(&dev_pm_qos_mtx);
701 728 pm_runtime_put(dev);
702   -
703 729 return ret;
704 730 }
  731 +#else /* !CONFIG_PM_RUNTIME */
  732 +static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
  733 +static void __dev_pm_qos_hide_flags(struct device *dev) {}
705 734 #endif /* CONFIG_PM_RUNTIME */
drivers/base/power/sysfs.c
... ... @@ -708,6 +708,7 @@
708 708  
709 709 void dpm_sysfs_remove(struct device *dev)
710 710 {
  711 + dev_pm_qos_constraints_destroy(dev);
711 712 rpm_sysfs_remove(dev);
712 713 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
713 714 sysfs_remove_group(&dev->kobj, &pm_attr_group);
drivers/cpufreq/cpufreq_governor.h
... ... @@ -64,7 +64,7 @@
64 64 * dbs: used as a shortform for demand based switching It helps to keep variable
65 65 * names smaller, simpler
66 66 * cdbs: common dbs
67   - * on_*: On-demand governor
  67 + * od_*: On-demand governor
68 68 * cs_*: Conservative governor
69 69 */
70 70  
drivers/cpufreq/highbank-cpufreq.c
... ... @@ -28,13 +28,7 @@
28 28  
29 29 static int hb_voltage_change(unsigned int freq)
30 30 {
31   - int i;
32   - u32 msg[HB_CPUFREQ_IPC_LEN];
33   -
34   - msg[0] = HB_CPUFREQ_CHANGE_NOTE;
35   - msg[1] = freq / 1000000;
36   - for (i = 2; i < HB_CPUFREQ_IPC_LEN; i++)
37   - msg[i] = 0;
  31 + u32 msg[HB_CPUFREQ_IPC_LEN] = {HB_CPUFREQ_CHANGE_NOTE, freq / 1000000};
38 32  
39 33 return pl320_ipc_transmit(msg);
40 34 }
drivers/cpufreq/intel_pstate.c
... ... @@ -662,6 +662,9 @@
662 662  
663 663 cpu = all_cpu_data[policy->cpu];
664 664  
  665 + if (!policy->cpuinfo.max_freq)
  666 + return -ENODEV;
  667 +
665 668 intel_pstate_get_min_max(cpu, &min, &max);
666 669  
667 670 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
668 671  
... ... @@ -747,37 +750,11 @@
747 750 .owner = THIS_MODULE,
748 751 };
749 752  
750   -static void intel_pstate_exit(void)
751   -{
752   - int cpu;
753   -
754   - sysfs_remove_group(intel_pstate_kobject,
755   - &intel_pstate_attr_group);
756   - debugfs_remove_recursive(debugfs_parent);
757   -
758   - cpufreq_unregister_driver(&intel_pstate_driver);
759   -
760   - if (!all_cpu_data)
761   - return;
762   -
763   - get_online_cpus();
764   - for_each_online_cpu(cpu) {
765   - if (all_cpu_data[cpu]) {
766   - del_timer_sync(&all_cpu_data[cpu]->timer);
767   - kfree(all_cpu_data[cpu]);
768   - }
769   - }
770   -
771   - put_online_cpus();
772   - vfree(all_cpu_data);
773   -}
774   -module_exit(intel_pstate_exit);
775   -
776 753 static int __initdata no_load;
777 754  
778 755 static int __init intel_pstate_init(void)
779 756 {
780   - int rc = 0;
  757 + int cpu, rc = 0;
781 758 const struct x86_cpu_id *id;
782 759  
783 760 if (no_load)
... ... @@ -802,7 +779,16 @@
802 779 intel_pstate_sysfs_expose_params();
803 780 return rc;
804 781 out:
805   - intel_pstate_exit();
  782 + get_online_cpus();
  783 + for_each_online_cpu(cpu) {
  784 + if (all_cpu_data[cpu]) {
  785 + del_timer_sync(&all_cpu_data[cpu]->timer);
  786 + kfree(all_cpu_data[cpu]);
  787 + }
  788 + }
  789 +
  790 + put_online_cpus();
  791 + vfree(all_cpu_data);
806 792 return -ENODEV;
807 793 }
808 794 device_initcall(intel_pstate_init);
drivers/mailbox/pl320-ipc.c
... ... @@ -138,8 +138,7 @@
138 138 }
139 139 EXPORT_SYMBOL_GPL(pl320_ipc_unregister_notifier);
140 140  
141   -static int __init pl320_probe(struct amba_device *adev,
142   - const struct amba_id *id)
  141 +static int pl320_probe(struct amba_device *adev, const struct amba_id *id)
143 142 {
144 143 int ret;
145 144  
drivers/pci/pci-acpi.c
... ... @@ -331,8 +331,14 @@
331 331 }
332 332 }
333 333  
  334 +static bool pci_acpi_bus_match(struct device *dev)
  335 +{
  336 + return dev->bus == &pci_bus_type;
  337 +}
  338 +
334 339 static struct acpi_bus_type acpi_pci_bus = {
335   - .bus = &pci_bus_type,
  340 + .name = "PCI",
  341 + .match = pci_acpi_bus_match,
336 342 .find_device = acpi_pci_find_device,
337 343 .setup = pci_acpi_setup,
338 344 .cleanup = pci_acpi_cleanup,
drivers/pnp/pnpacpi/core.c
... ... @@ -353,8 +353,14 @@
353 353 /* complete initialization of a PNPACPI device includes having
354 354 * pnpdev->dev.archdata.acpi_handle point to its ACPI sibling.
355 355 */
  356 +static bool acpi_pnp_bus_match(struct device *dev)
  357 +{
  358 + return dev->bus == &pnp_bus_type;
  359 +}
  360 +
356 361 static struct acpi_bus_type __initdata acpi_pnp_bus = {
357   - .bus = &pnp_bus_type,
  362 + .name = "PNP",
  363 + .match = acpi_pnp_bus_match,
358 364 .find_device = acpi_pnp_find_device,
359 365 };
360 366  
drivers/scsi/scsi_lib.c
... ... @@ -71,9 +71,14 @@
71 71 #ifdef CONFIG_ACPI
72 72 #include <acpi/acpi_bus.h>
73 73  
  74 +static bool acpi_scsi_bus_match(struct device *dev)
  75 +{
  76 + return dev->bus == &scsi_bus_type;
  77 +}
  78 +
74 79 int scsi_register_acpi_bus_type(struct acpi_bus_type *bus)
75 80 {
76   - bus->bus = &scsi_bus_type;
  81 + bus->match = acpi_scsi_bus_match;
77 82 return register_acpi_bus_type(bus);
78 83 }
79 84 EXPORT_SYMBOL_GPL(scsi_register_acpi_bus_type);
drivers/usb/core/usb-acpi.c
... ... @@ -210,9 +210,14 @@
210 210 return 0;
211 211 }
212 212  
  213 +static bool usb_acpi_bus_match(struct device *dev)
  214 +{
  215 + return is_usb_device(dev) || is_usb_port(dev);
  216 +}
  217 +
213 218 static struct acpi_bus_type usb_acpi_bus = {
214   - .bus = &usb_bus_type,
215   - .find_bridge = usb_acpi_find_device,
  219 + .name = "USB",
  220 + .match = usb_acpi_bus_match,
216 221 .find_device = usb_acpi_find_device,
217 222 };
218 223  
include/acpi/acpi_bus.h
... ... @@ -437,11 +437,9 @@
437 437 */
438 438 struct acpi_bus_type {
439 439 struct list_head list;
440   - struct bus_type *bus;
441   - /* For general devices under the bus */
  440 + const char *name;
  441 + bool (*match)(struct device *dev);
442 442 int (*find_device) (struct device *, acpi_handle *);
443   - /* For bridges, such as PCI root bridge, IDE controller */
444   - int (*find_bridge) (struct device *, acpi_handle *);
445 443 void (*setup)(struct device *);
446 444 void (*cleanup)(struct device *);
447 445 };