Commit df90dcd1007bc498927afea18ccfaae02e361707

Authored by Linus Torvalds

Merge tag 'pm+acpi-3.19-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull power management and ACPI material from Rafael J Wysocki:
 "These are fixes (operating performance points library, cpufreq-dt
  driver, cpufreq core, ACPI backlight, cpupower tool), cleanups
  (cpuidle), new processor IDs for the RAPL (Running Average Power
  Limit) power capping driver, and a modification of the generic power
  domains framework allowing modular drivers to call one of its helper
  functions.

  Specifics:

   - Fix for a potential NULL pointer dereference in the cpufreq core
     due to an initialization race condition (Ethan Zhao).

   - Fixes for abuse of the OPP (Operating Performance Points) API
     related to RCU and other minor issues in the OPP library and the
     cpufreq-dt driver (Dmitry Torokhov).

   - cpuidle governors cleanup making them measure idle duration in a
     better way without using the CPUIDLE_FLAG_TIME_INVALID flag which
     allows that flag to be dropped from the ACPI cpuidle driver and
     from the core too (Len Brown).

   - New ACPI backlight blacklist entries for Samsung machines without a
     working native backlight interface that need to use the ACPI
     backlight instead (Aaron Lu).

   - New CPU IDs of future Intel Xeon CPUs for the Intel RAPL power
     capping driver (Jacob Pan).

   - Generic power domains framework modification to export the
     of_genpd_get_from_provider() function to modular drivers that will
     allow future driver modifications to be based on the mainline (Amit
     Daniel Kachhap).

   - Two fixes for the cpupower tool (Michal Privoznik, Prarit
     Bhargava)"

* tag 'pm+acpi-3.19-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  ACPI / video: Add some Samsung models to disable_native_backlight list
  tools / cpupower: Fix no idle state information return value
  tools / cpupower: Correctly detect if running as root
  cpufreq: fix a NULL pointer dereference in __cpufreq_governor()
  cpufreq-dt: defer probing if OPP table is not ready
  PM / OPP: take RCU lock in dev_pm_opp_get_opp_count
  PM / OPP: fix warning in of_free_opp_table()
  PM / OPP: add some lockdep annotations
  powercap / RAPL: add IDs for future Xeon CPUs
  PM / Domains: Export of_genpd_get_from_provider function
  cpuidle / ACPI: remove unused CPUIDLE_FLAG_TIME_INVALID
  cpuidle: ladder: Better idle duration measurement without using CPUIDLE_FLAG_TIME_INVALID
  cpuidle: menu: Better idle duration measurement without using CPUIDLE_FLAG_TIME_INVALID

Showing 13 changed files Side-by-side Diff

drivers/acpi/processor_idle.c
... ... @@ -985,8 +985,6 @@
985 985 state->flags = 0;
986 986 switch (cx->type) {
987 987 case ACPI_STATE_C1:
988   - if (cx->entry_method != ACPI_CSTATE_FFH)
989   - state->flags |= CPUIDLE_FLAG_TIME_INVALID;
990 988  
991 989 state->enter = acpi_idle_enter_c1;
992 990 state->enter_dead = acpi_idle_play_dead;
drivers/acpi/video.c
... ... @@ -505,6 +505,23 @@
505 505 DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"),
506 506 },
507 507 },
  508 +
  509 + {
  510 + .callback = video_disable_native_backlight,
  511 + .ident = "SAMSUNG 870Z5E/880Z5E/680Z5E",
  512 + .matches = {
  513 + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
  514 + DMI_MATCH(DMI_PRODUCT_NAME, "870Z5E/880Z5E/680Z5E"),
  515 + },
  516 + },
  517 + {
  518 + .callback = video_disable_native_backlight,
  519 + .ident = "SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V",
  520 + .matches = {
  521 + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
  522 + DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"),
  523 + },
  524 + },
508 525 {}
509 526 };
510 527  
drivers/base/power/domain.c
... ... @@ -2088,7 +2088,7 @@
2088 2088 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2089 2089 * on failure.
2090 2090 */
2091   -static struct generic_pm_domain *of_genpd_get_from_provider(
  2091 +struct generic_pm_domain *of_genpd_get_from_provider(
2092 2092 struct of_phandle_args *genpdspec)
2093 2093 {
2094 2094 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
... ... @@ -2108,6 +2108,7 @@
2108 2108  
2109 2109 return genpd;
2110 2110 }
  2111 +EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
2111 2112  
2112 2113 /**
2113 2114 * genpd_dev_pm_detach - Detach a device from its PM domain.
drivers/base/power/opp.c
... ... @@ -108,6 +108,14 @@
108 108 /* Lock to allow exclusive modification to the device and opp lists */
109 109 static DEFINE_MUTEX(dev_opp_list_lock);
110 110  
  111 +#define opp_rcu_lockdep_assert() \
  112 +do { \
  113 + rcu_lockdep_assert(rcu_read_lock_held() || \
  114 + lockdep_is_held(&dev_opp_list_lock), \
  115 + "Missing rcu_read_lock() or " \
  116 + "dev_opp_list_lock protection"); \
  117 +} while (0)
  118 +
111 119 /**
112 120 * find_device_opp() - find device_opp struct using device pointer
113 121 * @dev: device pointer used to lookup device OPPs
... ... @@ -208,9 +216,7 @@
208 216 * This function returns the number of available opps if there are any,
209 217 * else returns 0 if none or the corresponding error value.
210 218 *
211   - * Locking: This function must be called under rcu_read_lock(). This function
212   - * internally references two RCU protected structures: device_opp and opp which
213   - * are safe as long as we are under a common RCU locked section.
  219 + * Locking: This function takes rcu_read_lock().
214 220 */
215 221 int dev_pm_opp_get_opp_count(struct device *dev)
216 222 {
217 223  
... ... @@ -218,11 +224,14 @@
218 224 struct dev_pm_opp *temp_opp;
219 225 int count = 0;
220 226  
  227 + rcu_read_lock();
  228 +
221 229 dev_opp = find_device_opp(dev);
222 230 if (IS_ERR(dev_opp)) {
223   - int r = PTR_ERR(dev_opp);
224   - dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
225   - return r;
  231 + count = PTR_ERR(dev_opp);
  232 + dev_err(dev, "%s: device OPP not found (%d)\n",
  233 + __func__, count);
  234 + goto out_unlock;
226 235 }
227 236  
228 237 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
... ... @@ -230,6 +239,8 @@
230 239 count++;
231 240 }
232 241  
  242 +out_unlock:
  243 + rcu_read_unlock();
233 244 return count;
234 245 }
235 246 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
... ... @@ -267,6 +278,8 @@
267 278 struct device_opp *dev_opp;
268 279 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
269 280  
  281 + opp_rcu_lockdep_assert();
  282 +
270 283 dev_opp = find_device_opp(dev);
271 284 if (IS_ERR(dev_opp)) {
272 285 int r = PTR_ERR(dev_opp);
... ... @@ -313,6 +326,8 @@
313 326 struct device_opp *dev_opp;
314 327 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
315 328  
  329 + opp_rcu_lockdep_assert();
  330 +
316 331 if (!dev || !freq) {
317 332 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
318 333 return ERR_PTR(-EINVAL);
... ... @@ -361,6 +376,8 @@
361 376 struct device_opp *dev_opp;
362 377 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
363 378  
  379 + opp_rcu_lockdep_assert();
  380 +
364 381 if (!dev || !freq) {
365 382 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
366 383 return ERR_PTR(-EINVAL);
367 384  
... ... @@ -783,9 +800,15 @@
783 800  
784 801 /* Check for existing list for 'dev' */
785 802 dev_opp = find_device_opp(dev);
786   - if (WARN(IS_ERR(dev_opp), "%s: dev_opp: %ld\n", dev_name(dev),
787   - PTR_ERR(dev_opp)))
  803 + if (IS_ERR(dev_opp)) {
  804 + int error = PTR_ERR(dev_opp);
  805 + if (error != -ENODEV)
  806 + WARN(1, "%s: dev_opp: %d\n",
  807 + IS_ERR_OR_NULL(dev) ?
  808 + "Invalid device" : dev_name(dev),
  809 + error);
788 810 return;
  811 + }
789 812  
790 813 /* Hold our list modification lock here */
791 814 mutex_lock(&dev_opp_list_lock);
drivers/cpufreq/cpufreq-dt.c
... ... @@ -211,6 +211,17 @@
211 211 /* OPPs might be populated at runtime, don't check for error here */
212 212 of_init_opp_table(cpu_dev);
213 213  
  214 + /*
  215 + * But we need OPP table to function so if it is not there let's
  216 + * give platform code chance to provide it for us.
  217 + */
  218 + ret = dev_pm_opp_get_opp_count(cpu_dev);
  219 + if (ret <= 0) {
  220 + pr_debug("OPP table is not ready, deferring probe\n");
  221 + ret = -EPROBE_DEFER;
  222 + goto out_free_opp;
  223 + }
  224 +
214 225 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
215 226 if (!priv) {
216 227 ret = -ENOMEM;
drivers/cpufreq/cpufreq.c
... ... @@ -2028,6 +2028,12 @@
2028 2028 /* Don't start any governor operations if we are entering suspend */
2029 2029 if (cpufreq_suspended)
2030 2030 return 0;
  2031 + /*
  2032 + * Governor might not be initiated here if ACPI _PPC changed
  2033 + * notification happened, so check it.
  2034 + */
  2035 + if (!policy->governor)
  2036 + return -EINVAL;
2031 2037  
2032 2038 if (policy->governor->max_transition_latency &&
2033 2039 policy->cpuinfo.transition_latency >
drivers/cpuidle/governors/ladder.c
... ... @@ -79,12 +79,7 @@
79 79  
80 80 last_state = &ldev->states[last_idx];
81 81  
82   - if (!(drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_INVALID)) {
83   - last_residency = cpuidle_get_last_residency(dev) - \
84   - drv->states[last_idx].exit_latency;
85   - }
86   - else
87   - last_residency = last_state->threshold.promotion_time + 1;
  82 + last_residency = cpuidle_get_last_residency(dev) - drv->states[last_idx].exit_latency;
88 83  
89 84 /* consider promotion */
90 85 if (last_idx < drv->state_count - 1 &&
drivers/cpuidle/governors/menu.c
... ... @@ -396,8 +396,8 @@
396 396 * power state and occurrence of the wakeup event.
397 397 *
398 398 * If the entered idle state didn't support residency measurements,
399   - * we are basically lost in the dark how much time passed.
400   - * As a compromise, assume we slept for the whole expected time.
  399 + * we use them anyway if they are short, and if long,
  400 + * truncate to the whole expected time.
401 401 *
402 402 * Any measured amount of time will include the exit latency.
403 403 * Since we are interested in when the wakeup begun, not when it
404 404  
405 405  
406 406  
... ... @@ -405,22 +405,17 @@
405 405 * the measured amount of time is less than the exit latency,
406 406 * assume the state was never reached and the exit latency is 0.
407 407 */
408   - if (unlikely(target->flags & CPUIDLE_FLAG_TIME_INVALID)) {
409   - /* Use timer value as is */
410   - measured_us = data->next_timer_us;
411 408  
412   - } else {
413   - /* Use measured value */
414   - measured_us = cpuidle_get_last_residency(dev);
  409 + /* measured value */
  410 + measured_us = cpuidle_get_last_residency(dev);
415 411  
416   - /* Deduct exit latency */
417   - if (measured_us > target->exit_latency)
418   - measured_us -= target->exit_latency;
  412 + /* Deduct exit latency */
  413 + if (measured_us > target->exit_latency)
  414 + measured_us -= target->exit_latency;
419 415  
420   - /* Make sure our coefficients do not exceed unity */
421   - if (measured_us > data->next_timer_us)
422   - measured_us = data->next_timer_us;
423   - }
  416 + /* Make sure our coefficients do not exceed unity */
  417 + if (measured_us > data->next_timer_us)
  418 + measured_us = data->next_timer_us;
424 419  
425 420 /* Update our correction ratio */
426 421 new_factor = data->correction_factor[data->bucket];
drivers/powercap/intel_rapl.c
... ... @@ -1041,6 +1041,7 @@
1041 1041 RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
1042 1042 RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */
1043 1043 RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
  1044 + RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */
1044 1045 RAPL_CPU(0x5A, rapl_defaults_atom),/* Annidale */
1045 1046 {}
1046 1047 };
include/linux/cpuidle.h
... ... @@ -53,7 +53,6 @@
53 53 };
54 54  
55 55 /* Idle State Flags */
56   -#define CPUIDLE_FLAG_TIME_INVALID (0x01) /* is residency time measurable? */
57 56 #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
58 57 #define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
59 58  
... ... @@ -89,8 +88,6 @@
89 88 /**
90 89 * cpuidle_get_last_residency - retrieves the last state's residency time
91 90 * @dev: the target CPU
92   - *
93   - * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_INVALID is set
94 91 */
95 92 static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
96 93 {
include/linux/pm_domain.h
... ... @@ -271,6 +271,8 @@
271 271 int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
272 272 void *data);
273 273 void of_genpd_del_provider(struct device_node *np);
  274 +struct generic_pm_domain *of_genpd_get_from_provider(
  275 + struct of_phandle_args *genpdspec);
274 276  
275 277 struct generic_pm_domain *__of_genpd_xlate_simple(
276 278 struct of_phandle_args *genpdspec,
... ... @@ -287,6 +289,12 @@
287 289 return 0;
288 290 }
289 291 static inline void of_genpd_del_provider(struct device_node *np) {}
  292 +
  293 +static inline struct generic_pm_domain *of_genpd_get_from_provider(
  294 + struct of_phandle_args *genpdspec)
  295 +{
  296 + return NULL;
  297 +}
290 298  
291 299 #define __of_genpd_xlate_simple NULL
292 300 #define __of_genpd_xlate_onecell NULL
tools/power/cpupower/utils/cpupower.c
... ... @@ -199,7 +199,7 @@
199 199 }
200 200  
201 201 get_cpu_info(0, &cpupower_cpu_info);
202   - run_as_root = !getuid();
  202 + run_as_root = !geteuid();
203 203 if (run_as_root) {
204 204 ret = uname(&uts);
205 205 if (!ret && !strcmp(uts.machine, "x86_64") &&
tools/power/cpupower/utils/helpers/sysfs.c
... ... @@ -361,7 +361,7 @@
361 361  
362 362 snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpuidle");
363 363 if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
364   - return -ENODEV;
  364 + return 0;
365 365  
366 366 snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu);
367 367 if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))