Commit acf63867ae06ef95eea7bf445ded2f05528a81b1

Authored by Len Brown

Merge branches 'release', 'cpuidle-2.6.25' and 'idle' into release

Showing 6 changed files Side-by-side Diff

... ... @@ -98,12 +98,18 @@
98 98 config ARCH_HAS_ILOG2_U64
99 99 def_bool n
100 100  
  101 +config ARCH_HAS_CPU_IDLE_WAIT
  102 + def_bool y
  103 +
101 104 config GENERIC_CALIBRATE_DELAY
102 105 def_bool y
103 106  
104 107 config GENERIC_TIME_VSYSCALL
105 108 bool
106 109 default X86_64
  110 +
  111 +config ARCH_HAS_CPU_RELAX
  112 + def_bool y
107 113  
108 114 config HAVE_SETUP_PER_CPU_AREA
109 115 def_bool X86_64
drivers/acpi/processor_idle.c
... ... @@ -98,6 +98,9 @@
98 98  
99 99 static int acpi_processor_set_power_policy(struct acpi_processor *pr);
100 100  
  101 +#else /* CONFIG_CPU_IDLE */
  102 +static unsigned int latency_factor __read_mostly = 2;
  103 +module_param(latency_factor, uint, 0644);
101 104 #endif
102 105  
103 106 /*
... ... @@ -201,6 +204,10 @@
201 204 return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
202 205 }
203 206  
  207 +/*
  208 + * Callers should disable interrupts before the call and enable
  209 + * interrupts after return.
  210 + */
204 211 static void acpi_safe_halt(void)
205 212 {
206 213 current_thread_info()->status &= ~TS_POLLING;
... ... @@ -261,7 +268,7 @@
261 268 /* Common C-state entry for C2, C3, .. */
262 269 static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
263 270 {
264   - if (cstate->space_id == ACPI_CSTATE_FFH) {
  271 + if (cstate->entry_method == ACPI_CSTATE_FFH) {
265 272 /* Call into architectural FFH based C-state */
266 273 acpi_processor_ffh_cstate_enter(cstate);
267 274 } else {
... ... @@ -413,6 +420,8 @@
413 420 pm_idle_save();
414 421 else
415 422 acpi_safe_halt();
  423 +
  424 + local_irq_enable();
416 425 return;
417 426 }
418 427  
... ... @@ -521,6 +530,7 @@
521 530 * skew otherwise.
522 531 */
523 532 sleep_ticks = 0xFFFFFFFF;
  533 + local_irq_enable();
524 534 break;
525 535  
526 536 case ACPI_STATE_C2:
527 537  
528 538  
529 539  
... ... @@ -922,20 +932,20 @@
922 932 cx.address = reg->address;
923 933 cx.index = current_count + 1;
924 934  
925   - cx.space_id = ACPI_CSTATE_SYSTEMIO;
  935 + cx.entry_method = ACPI_CSTATE_SYSTEMIO;
926 936 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
927 937 if (acpi_processor_ffh_cstate_probe
928 938 (pr->id, &cx, reg) == 0) {
929   - cx.space_id = ACPI_CSTATE_FFH;
930   - } else if (cx.type != ACPI_STATE_C1) {
  939 + cx.entry_method = ACPI_CSTATE_FFH;
  940 + } else if (cx.type == ACPI_STATE_C1) {
931 941 /*
932 942 * C1 is a special case where FIXED_HARDWARE
933 943 * can be handled in non-MWAIT way as well.
934 944 * In that case, save this _CST entry info.
935   - * That is, we retain space_id of SYSTEM_IO for
936   - * halt based C1.
937 945 * Otherwise, ignore this info and continue.
938 946 */
  947 + cx.entry_method = ACPI_CSTATE_HALT;
  948 + } else {
939 949 continue;
940 950 }
941 951 }
942 952  
943 953  
... ... @@ -1369,12 +1379,16 @@
1369 1379 /**
1370 1380 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
1371 1381 * @cx: cstate data
  1382 + *
  1383 + * Caller disables interrupt before call and enables interrupt after return.
1372 1384 */
1373 1385 static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
1374 1386 {
1375   - if (cx->space_id == ACPI_CSTATE_FFH) {
  1387 + if (cx->entry_method == ACPI_CSTATE_FFH) {
1376 1388 /* Call into architectural FFH based C-state */
1377 1389 acpi_processor_ffh_cstate_enter(cx);
  1390 + } else if (cx->entry_method == ACPI_CSTATE_HALT) {
  1391 + acpi_safe_halt();
1378 1392 } else {
1379 1393 int unused;
1380 1394 /* IO port based C-state */
1381 1395  
1382 1396  
1383 1397  
1384 1398  
1385 1399  
... ... @@ -1396,21 +1410,27 @@
1396 1410 static int acpi_idle_enter_c1(struct cpuidle_device *dev,
1397 1411 struct cpuidle_state *state)
1398 1412 {
  1413 + u32 t1, t2;
1399 1414 struct acpi_processor *pr;
1400 1415 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
  1416 +
1401 1417 pr = processors[smp_processor_id()];
1402 1418  
1403 1419 if (unlikely(!pr))
1404 1420 return 0;
1405 1421  
  1422 + local_irq_disable();
1406 1423 if (pr->flags.bm_check)
1407 1424 acpi_idle_update_bm_rld(pr, cx);
1408 1425  
1409   - acpi_safe_halt();
  1426 + t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
  1427 + acpi_idle_do_entry(cx);
  1428 + t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1410 1429  
  1430 + local_irq_enable();
1411 1431 cx->usage++;
1412 1432  
1413   - return 0;
  1433 + return ticks_elapsed_in_us(t1, t2);
1414 1434 }
1415 1435  
1416 1436 /**
1417 1437  
... ... @@ -1517,7 +1537,9 @@
1517 1537 if (dev->safe_state) {
1518 1538 return dev->safe_state->enter(dev, dev->safe_state);
1519 1539 } else {
  1540 + local_irq_disable();
1520 1541 acpi_safe_halt();
  1542 + local_irq_enable();
1521 1543 return 0;
1522 1544 }
1523 1545 }
... ... @@ -1609,7 +1631,7 @@
1609 1631 */
1610 1632 static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1611 1633 {
1612   - int i, count = 0;
  1634 + int i, count = CPUIDLE_DRIVER_STATE_START;
1613 1635 struct acpi_processor_cx *cx;
1614 1636 struct cpuidle_state *state;
1615 1637 struct cpuidle_device *dev = &pr->power.dev;
1616 1638  
... ... @@ -1638,13 +1660,14 @@
1638 1660  
1639 1661 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
1640 1662 state->exit_latency = cx->latency;
1641   - state->target_residency = cx->latency * 6;
  1663 + state->target_residency = cx->latency * latency_factor;
1642 1664 state->power_usage = cx->power;
1643 1665  
1644 1666 state->flags = 0;
1645 1667 switch (cx->type) {
1646 1668 case ACPI_STATE_C1:
1647 1669 state->flags |= CPUIDLE_FLAG_SHALLOW;
  1670 + state->flags |= CPUIDLE_FLAG_TIME_VALID;
1648 1671 state->enter = acpi_idle_enter_c1;
1649 1672 dev->safe_state = state;
1650 1673 break;
... ... @@ -1667,6 +1690,8 @@
1667 1690 }
1668 1691  
1669 1692 count++;
  1693 + if (count == CPUIDLE_STATE_MAX)
  1694 + break;
1670 1695 }
1671 1696  
1672 1697 dev->state_count = count;
drivers/cpuidle/Kconfig
1 1  
2 2 config CPU_IDLE
3 3 bool "CPU idle PM support"
  4 + default ACPI
4 5 help
5 6 CPU idle is a generic framework for supporting software-controlled
6 7 idle processor power management. It includes modular cross-platform
7 8 governors that can be swapped during runtime.
8 9  
9   - If you're using a mobile platform that supports CPU idle PM (e.g.
10   - an ACPI-capable notebook), you should say Y here.
  10 + If you're using an ACPI-enabled platform, you should say Y here.
11 11  
12 12 config CPU_IDLE_GOV_LADDER
13 13 bool
drivers/cpuidle/cpuidle.c
... ... @@ -15,6 +15,7 @@
15 15 #include <linux/pm_qos_params.h>
16 16 #include <linux/cpu.h>
17 17 #include <linux/cpuidle.h>
  18 +#include <linux/ktime.h>
18 19  
19 20 #include "cpuidle.h"
20 21  
... ... @@ -82,7 +83,7 @@
82 83 {
83 84 if (enabled_devices && (pm_idle != pm_idle_old)) {
84 85 pm_idle = pm_idle_old;
85   - cpu_idle_wait();
  86 + cpuidle_kick_cpus();
86 87 }
87 88 }
88 89  
... ... @@ -180,6 +181,44 @@
180 181  
181 182 EXPORT_SYMBOL_GPL(cpuidle_disable_device);
182 183  
  184 +#ifdef CONFIG_ARCH_HAS_CPU_RELAX
  185 +static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
  186 +{
  187 + ktime_t t1, t2;
  188 + s64 diff;
  189 + int ret;
  190 +
  191 + t1 = ktime_get();
  192 + local_irq_enable();
  193 + while (!need_resched())
  194 + cpu_relax();
  195 +
  196 + t2 = ktime_get();
  197 + diff = ktime_to_us(ktime_sub(t2, t1));
  198 + if (diff > INT_MAX)
  199 + diff = INT_MAX;
  200 +
  201 + ret = (int) diff;
  202 + return ret;
  203 +}
  204 +
  205 +static void poll_idle_init(struct cpuidle_device *dev)
  206 +{
  207 + struct cpuidle_state *state = &dev->states[0];
  208 +
  209 + cpuidle_set_statedata(state, NULL);
  210 +
  211 + snprintf(state->name, CPUIDLE_NAME_LEN, "C0 (poll idle)");
  212 + state->exit_latency = 0;
  213 + state->target_residency = 0;
  214 + state->power_usage = -1;
  215 + state->flags = CPUIDLE_FLAG_POLL | CPUIDLE_FLAG_TIME_VALID;
  216 + state->enter = poll_idle;
  217 +}
  218 +#else
  219 +static void poll_idle_init(struct cpuidle_device *dev) {}
  220 +#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
  221 +
183 222 /**
184 223 * cpuidle_register_device - registers a CPU's idle PM feature
185 224 * @dev: the cpu
... ... @@ -197,6 +236,8 @@
197 236 init_completion(&dev->kobj_unregister);
198 237  
199 238 mutex_lock(&cpuidle_lock);
  239 +
  240 + poll_idle_init(dev);
200 241  
201 242 per_cpu(cpuidle_devices, dev->cpu) = dev;
202 243 list_add(&dev->device_list, &cpuidle_detected_devices);
include/acpi/processor.h
... ... @@ -34,6 +34,7 @@
34 34  
35 35 #define ACPI_CSTATE_SYSTEMIO (0)
36 36 #define ACPI_CSTATE_FFH (1)
  37 +#define ACPI_CSTATE_HALT (2)
37 38  
38 39 /* Power Management */
39 40  
... ... @@ -64,7 +65,7 @@
64 65 u8 valid;
65 66 u8 type;
66 67 u32 address;
67   - u8 space_id;
  68 + u8 entry_method;
68 69 u8 index;
69 70 u32 latency;
70 71 u32 latency_ticks;
include/linux/cpuidle.h
... ... @@ -46,9 +46,10 @@
46 46 /* Idle State Flags */
47 47 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
48 48 #define CPUIDLE_FLAG_CHECK_BM (0x02) /* BM activity will exit state */
49   -#define CPUIDLE_FLAG_SHALLOW (0x10) /* low latency, minimal savings */
50   -#define CPUIDLE_FLAG_BALANCED (0x20) /* medium latency, moderate savings */
51   -#define CPUIDLE_FLAG_DEEP (0x40) /* high latency, large savings */
  49 +#define CPUIDLE_FLAG_POLL (0x10) /* no latency, no savings */
  50 +#define CPUIDLE_FLAG_SHALLOW (0x20) /* low latency, minimal savings */
  51 +#define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */
  52 +#define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */
52 53  
53 54 #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
54 55  
... ... @@ -72,6 +73,19 @@
72 73 state->driver_data = data;
73 74 }
74 75  
  76 +#ifdef CONFIG_SMP
  77 +#ifdef CONFIG_ARCH_HAS_CPU_IDLE_WAIT
  78 +static inline void cpuidle_kick_cpus(void)
  79 +{
  80 + cpu_idle_wait();
  81 +}
  82 +#else /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT */
  83 +#error "Arch needs cpu_idle_wait() equivalent here"
  84 +#endif /* !CONFIG_ARCH_HAS_CPU_IDLE_WAIT */
  85 +#else /* !CONFIG_SMP */
  86 +static inline void cpuidle_kick_cpus(void) {}
  87 +#endif /* !CONFIG_SMP */
  88 +
75 89 struct cpuidle_state_kobj {
76 90 struct cpuidle_state *state;
77 91 struct completion kobj_unregister;
... ... @@ -176,6 +190,12 @@
176 190 {return 0;}
177 191 static inline void cpuidle_unregister_governor(struct cpuidle_governor *gov) { }
178 192  
  193 +#endif
  194 +
  195 +#ifdef CONFIG_ARCH_HAS_CPU_RELAX
  196 +#define CPUIDLE_DRIVER_STATE_START 1
  197 +#else
  198 +#define CPUIDLE_DRIVER_STATE_START 0
179 199 #endif
180 200  
181 201 #endif /* _LINUX_CPUIDLE_H */