Commit 15e123e5d7e8ee9ba3717e743d8eb5fd0fe57712
Committed by
Len Brown
1 parent
bfb53ccf1c
Exists in
master
and in
7 other branches
intel_idle: Rename cpuidle states
Userspace apps might have to cut off parts off the idle state name for display reasons. Switch NHM-C1 to C1-NHM (and others) so that a cut off name is unique and makes sense to the user. Signed-off-by: Thomas Renninger <trenn@suse.de> CC: lenb@kernel.org Signed-off-by: Len Brown <len.brown@intel.com>
Showing 1 changed file with 11 additions and 11 deletions Inline Diff
drivers/idle/intel_idle.c
1 | /* | 1 | /* |
2 | * intel_idle.c - native hardware idle loop for modern Intel processors | 2 | * intel_idle.c - native hardware idle loop for modern Intel processors |
3 | * | 3 | * |
4 | * Copyright (c) 2010, Intel Corporation. | 4 | * Copyright (c) 2010, Intel Corporation. |
5 | * Len Brown <len.brown@intel.com> | 5 | * Len Brown <len.brown@intel.com> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
8 | * under the terms and conditions of the GNU General Public License, | 8 | * under the terms and conditions of the GNU General Public License, |
9 | * version 2, as published by the Free Software Foundation. | 9 | * version 2, as published by the Free Software Foundation. |
10 | * | 10 | * |
11 | * This program is distributed in the hope it will be useful, but WITHOUT | 11 | * This program is distributed in the hope it will be useful, but WITHOUT |
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
14 | * more details. | 14 | * more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License along with | 16 | * You should have received a copy of the GNU General Public License along with |
17 | * this program; if not, write to the Free Software Foundation, Inc., | 17 | * this program; if not, write to the Free Software Foundation, Inc., |
18 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | 18 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * intel_idle is a cpuidle driver that loads on specific Intel processors | 22 | * intel_idle is a cpuidle driver that loads on specific Intel processors |
23 | * in lieu of the legacy ACPI processor_idle driver. The intent is to | 23 | * in lieu of the legacy ACPI processor_idle driver. The intent is to |
24 | * make Linux more efficient on these processors, as intel_idle knows | 24 | * make Linux more efficient on these processors, as intel_idle knows |
25 | * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs. | 25 | * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Design Assumptions | 29 | * Design Assumptions |
30 | * | 30 | * |
31 | * All CPUs have same idle states as boot CPU | 31 | * All CPUs have same idle states as boot CPU |
32 | * | 32 | * |
33 | * Chipset BM_STS (bus master status) bit is a NOP | 33 | * Chipset BM_STS (bus master status) bit is a NOP |
34 | * for preventing entry into deep C-stats | 34 | * for preventing entry into deep C-stats |
35 | */ | 35 | */ |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Known limitations | 38 | * Known limitations |
39 | * | 39 | * |
40 | * The driver currently initializes for_each_online_cpu() upon modprobe. | 40 | * The driver currently initializes for_each_online_cpu() upon modprobe. |
41 | * It it unaware of subsequent processors hot-added to the system. | 41 | * It it unaware of subsequent processors hot-added to the system. |
42 | * This means that if you boot with maxcpus=n and later online | 42 | * This means that if you boot with maxcpus=n and later online |
43 | * processors above n, those processors will use C1 only. | 43 | * processors above n, those processors will use C1 only. |
44 | * | 44 | * |
45 | * ACPI has a .suspend hack to turn off deep c-statees during suspend | 45 | * ACPI has a .suspend hack to turn off deep c-statees during suspend |
46 | * to avoid complications with the lapic timer workaround. | 46 | * to avoid complications with the lapic timer workaround. |
47 | * Have not seen issues with suspend, but may need same workaround here. | 47 | * Have not seen issues with suspend, but may need same workaround here. |
48 | * | 48 | * |
49 | * There is currently no kernel-based automatic probing/loading mechanism | 49 | * There is currently no kernel-based automatic probing/loading mechanism |
50 | * if the driver is built as a module. | 50 | * if the driver is built as a module. |
51 | */ | 51 | */ |
52 | 52 | ||
53 | /* un-comment DEBUG to enable pr_debug() statements */ | 53 | /* un-comment DEBUG to enable pr_debug() statements */ |
54 | #define DEBUG | 54 | #define DEBUG |
55 | 55 | ||
56 | #include <linux/kernel.h> | 56 | #include <linux/kernel.h> |
57 | #include <linux/cpuidle.h> | 57 | #include <linux/cpuidle.h> |
58 | #include <linux/clockchips.h> | 58 | #include <linux/clockchips.h> |
59 | #include <linux/hrtimer.h> /* ktime_get_real() */ | 59 | #include <linux/hrtimer.h> /* ktime_get_real() */ |
60 | #include <trace/events/power.h> | 60 | #include <trace/events/power.h> |
61 | #include <linux/sched.h> | 61 | #include <linux/sched.h> |
62 | #include <linux/notifier.h> | 62 | #include <linux/notifier.h> |
63 | #include <linux/cpu.h> | 63 | #include <linux/cpu.h> |
64 | #include <asm/mwait.h> | 64 | #include <asm/mwait.h> |
65 | #include <asm/msr.h> | 65 | #include <asm/msr.h> |
66 | 66 | ||
67 | #define INTEL_IDLE_VERSION "0.4" | 67 | #define INTEL_IDLE_VERSION "0.4" |
68 | #define PREFIX "intel_idle: " | 68 | #define PREFIX "intel_idle: " |
69 | 69 | ||
70 | static struct cpuidle_driver intel_idle_driver = { | 70 | static struct cpuidle_driver intel_idle_driver = { |
71 | .name = "intel_idle", | 71 | .name = "intel_idle", |
72 | .owner = THIS_MODULE, | 72 | .owner = THIS_MODULE, |
73 | }; | 73 | }; |
74 | /* intel_idle.max_cstate=0 disables driver */ | 74 | /* intel_idle.max_cstate=0 disables driver */ |
75 | static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; | 75 | static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; |
76 | 76 | ||
77 | static unsigned int mwait_substates; | 77 | static unsigned int mwait_substates; |
78 | 78 | ||
79 | #define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF | 79 | #define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF |
80 | /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ | 80 | /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ |
81 | static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ | 81 | static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ |
82 | 82 | ||
83 | static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; | 83 | static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; |
84 | static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); | 84 | static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); |
85 | 85 | ||
86 | static struct cpuidle_state *cpuidle_state_table; | 86 | static struct cpuidle_state *cpuidle_state_table; |
87 | 87 | ||
88 | /* | 88 | /* |
89 | * Hardware C-state auto-demotion may not always be optimal. | 89 | * Hardware C-state auto-demotion may not always be optimal. |
90 | * Indicate which enable bits to clear here. | 90 | * Indicate which enable bits to clear here. |
91 | */ | 91 | */ |
92 | static unsigned long long auto_demotion_disable_flags; | 92 | static unsigned long long auto_demotion_disable_flags; |
93 | 93 | ||
94 | /* | 94 | /* |
95 | * Set this flag for states where the HW flushes the TLB for us | 95 | * Set this flag for states where the HW flushes the TLB for us |
96 | * and so we don't need cross-calls to keep it consistent. | 96 | * and so we don't need cross-calls to keep it consistent. |
97 | * If this flag is set, SW flushes the TLB, so even if the | 97 | * If this flag is set, SW flushes the TLB, so even if the |
98 | * HW doesn't do the flushing, this flag is safe to use. | 98 | * HW doesn't do the flushing, this flag is safe to use. |
99 | */ | 99 | */ |
100 | #define CPUIDLE_FLAG_TLB_FLUSHED 0x10000 | 100 | #define CPUIDLE_FLAG_TLB_FLUSHED 0x10000 |
101 | 101 | ||
102 | /* | 102 | /* |
103 | * States are indexed by the cstate number, | 103 | * States are indexed by the cstate number, |
104 | * which is also the index into the MWAIT hint array. | 104 | * which is also the index into the MWAIT hint array. |
105 | * Thus C0 is a dummy. | 105 | * Thus C0 is a dummy. |
106 | */ | 106 | */ |
107 | static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { | 107 | static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { |
108 | { /* MWAIT C0 */ }, | 108 | { /* MWAIT C0 */ }, |
109 | { /* MWAIT C1 */ | 109 | { /* MWAIT C1 */ |
110 | .name = "NHM-C1", | 110 | .name = "C1-NHM", |
111 | .desc = "MWAIT 0x00", | 111 | .desc = "MWAIT 0x00", |
112 | .driver_data = (void *) 0x00, | 112 | .driver_data = (void *) 0x00, |
113 | .flags = CPUIDLE_FLAG_TIME_VALID, | 113 | .flags = CPUIDLE_FLAG_TIME_VALID, |
114 | .exit_latency = 3, | 114 | .exit_latency = 3, |
115 | .target_residency = 6, | 115 | .target_residency = 6, |
116 | .enter = &intel_idle }, | 116 | .enter = &intel_idle }, |
117 | { /* MWAIT C2 */ | 117 | { /* MWAIT C2 */ |
118 | .name = "NHM-C3", | 118 | .name = "C3-NHM", |
119 | .desc = "MWAIT 0x10", | 119 | .desc = "MWAIT 0x10", |
120 | .driver_data = (void *) 0x10, | 120 | .driver_data = (void *) 0x10, |
121 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 121 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
122 | .exit_latency = 20, | 122 | .exit_latency = 20, |
123 | .target_residency = 80, | 123 | .target_residency = 80, |
124 | .enter = &intel_idle }, | 124 | .enter = &intel_idle }, |
125 | { /* MWAIT C3 */ | 125 | { /* MWAIT C3 */ |
126 | .name = "NHM-C6", | 126 | .name = "C6-NHM", |
127 | .desc = "MWAIT 0x20", | 127 | .desc = "MWAIT 0x20", |
128 | .driver_data = (void *) 0x20, | 128 | .driver_data = (void *) 0x20, |
129 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 129 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
130 | .exit_latency = 200, | 130 | .exit_latency = 200, |
131 | .target_residency = 800, | 131 | .target_residency = 800, |
132 | .enter = &intel_idle }, | 132 | .enter = &intel_idle }, |
133 | }; | 133 | }; |
134 | 134 | ||
135 | static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { | 135 | static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { |
136 | { /* MWAIT C0 */ }, | 136 | { /* MWAIT C0 */ }, |
137 | { /* MWAIT C1 */ | 137 | { /* MWAIT C1 */ |
138 | .name = "SNB-C1", | 138 | .name = "C1-SNB", |
139 | .desc = "MWAIT 0x00", | 139 | .desc = "MWAIT 0x00", |
140 | .driver_data = (void *) 0x00, | 140 | .driver_data = (void *) 0x00, |
141 | .flags = CPUIDLE_FLAG_TIME_VALID, | 141 | .flags = CPUIDLE_FLAG_TIME_VALID, |
142 | .exit_latency = 1, | 142 | .exit_latency = 1, |
143 | .target_residency = 1, | 143 | .target_residency = 1, |
144 | .enter = &intel_idle }, | 144 | .enter = &intel_idle }, |
145 | { /* MWAIT C2 */ | 145 | { /* MWAIT C2 */ |
146 | .name = "SNB-C3", | 146 | .name = "C3-SNB", |
147 | .desc = "MWAIT 0x10", | 147 | .desc = "MWAIT 0x10", |
148 | .driver_data = (void *) 0x10, | 148 | .driver_data = (void *) 0x10, |
149 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 149 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
150 | .exit_latency = 80, | 150 | .exit_latency = 80, |
151 | .target_residency = 211, | 151 | .target_residency = 211, |
152 | .enter = &intel_idle }, | 152 | .enter = &intel_idle }, |
153 | { /* MWAIT C3 */ | 153 | { /* MWAIT C3 */ |
154 | .name = "SNB-C6", | 154 | .name = "C6-SNB", |
155 | .desc = "MWAIT 0x20", | 155 | .desc = "MWAIT 0x20", |
156 | .driver_data = (void *) 0x20, | 156 | .driver_data = (void *) 0x20, |
157 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 157 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
158 | .exit_latency = 104, | 158 | .exit_latency = 104, |
159 | .target_residency = 345, | 159 | .target_residency = 345, |
160 | .enter = &intel_idle }, | 160 | .enter = &intel_idle }, |
161 | { /* MWAIT C4 */ | 161 | { /* MWAIT C4 */ |
162 | .name = "SNB-C7", | 162 | .name = "C7-SNB", |
163 | .desc = "MWAIT 0x30", | 163 | .desc = "MWAIT 0x30", |
164 | .driver_data = (void *) 0x30, | 164 | .driver_data = (void *) 0x30, |
165 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 165 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
166 | .exit_latency = 109, | 166 | .exit_latency = 109, |
167 | .target_residency = 345, | 167 | .target_residency = 345, |
168 | .enter = &intel_idle }, | 168 | .enter = &intel_idle }, |
169 | }; | 169 | }; |
170 | 170 | ||
171 | static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { | 171 | static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { |
172 | { /* MWAIT C0 */ }, | 172 | { /* MWAIT C0 */ }, |
173 | { /* MWAIT C1 */ | 173 | { /* MWAIT C1 */ |
174 | .name = "ATM-C1", | 174 | .name = "C1-ATM", |
175 | .desc = "MWAIT 0x00", | 175 | .desc = "MWAIT 0x00", |
176 | .driver_data = (void *) 0x00, | 176 | .driver_data = (void *) 0x00, |
177 | .flags = CPUIDLE_FLAG_TIME_VALID, | 177 | .flags = CPUIDLE_FLAG_TIME_VALID, |
178 | .exit_latency = 1, | 178 | .exit_latency = 1, |
179 | .target_residency = 4, | 179 | .target_residency = 4, |
180 | .enter = &intel_idle }, | 180 | .enter = &intel_idle }, |
181 | { /* MWAIT C2 */ | 181 | { /* MWAIT C2 */ |
182 | .name = "ATM-C2", | 182 | .name = "C2-ATM", |
183 | .desc = "MWAIT 0x10", | 183 | .desc = "MWAIT 0x10", |
184 | .driver_data = (void *) 0x10, | 184 | .driver_data = (void *) 0x10, |
185 | .flags = CPUIDLE_FLAG_TIME_VALID, | 185 | .flags = CPUIDLE_FLAG_TIME_VALID, |
186 | .exit_latency = 20, | 186 | .exit_latency = 20, |
187 | .target_residency = 80, | 187 | .target_residency = 80, |
188 | .enter = &intel_idle }, | 188 | .enter = &intel_idle }, |
189 | { /* MWAIT C3 */ }, | 189 | { /* MWAIT C3 */ }, |
190 | { /* MWAIT C4 */ | 190 | { /* MWAIT C4 */ |
191 | .name = "ATM-C4", | 191 | .name = "C4-ATM", |
192 | .desc = "MWAIT 0x30", | 192 | .desc = "MWAIT 0x30", |
193 | .driver_data = (void *) 0x30, | 193 | .driver_data = (void *) 0x30, |
194 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 194 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
195 | .exit_latency = 100, | 195 | .exit_latency = 100, |
196 | .target_residency = 400, | 196 | .target_residency = 400, |
197 | .enter = &intel_idle }, | 197 | .enter = &intel_idle }, |
198 | { /* MWAIT C5 */ }, | 198 | { /* MWAIT C5 */ }, |
199 | { /* MWAIT C6 */ | 199 | { /* MWAIT C6 */ |
200 | .name = "ATM-C6", | 200 | .name = "C6-ATM", |
201 | .desc = "MWAIT 0x52", | 201 | .desc = "MWAIT 0x52", |
202 | .driver_data = (void *) 0x52, | 202 | .driver_data = (void *) 0x52, |
203 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 203 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
204 | .exit_latency = 140, | 204 | .exit_latency = 140, |
205 | .target_residency = 560, | 205 | .target_residency = 560, |
206 | .enter = &intel_idle }, | 206 | .enter = &intel_idle }, |
207 | }; | 207 | }; |
208 | 208 | ||
209 | /** | 209 | /** |
210 | * intel_idle | 210 | * intel_idle |
211 | * @dev: cpuidle_device | 211 | * @dev: cpuidle_device |
212 | * @state: cpuidle state | 212 | * @state: cpuidle state |
213 | * | 213 | * |
214 | */ | 214 | */ |
215 | static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) | 215 | static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state) |
216 | { | 216 | { |
217 | unsigned long ecx = 1; /* break on interrupt flag */ | 217 | unsigned long ecx = 1; /* break on interrupt flag */ |
218 | unsigned long eax = (unsigned long)cpuidle_get_statedata(state); | 218 | unsigned long eax = (unsigned long)cpuidle_get_statedata(state); |
219 | unsigned int cstate; | 219 | unsigned int cstate; |
220 | ktime_t kt_before, kt_after; | 220 | ktime_t kt_before, kt_after; |
221 | s64 usec_delta; | 221 | s64 usec_delta; |
222 | int cpu = smp_processor_id(); | 222 | int cpu = smp_processor_id(); |
223 | 223 | ||
224 | cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; | 224 | cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; |
225 | 225 | ||
226 | local_irq_disable(); | 226 | local_irq_disable(); |
227 | 227 | ||
228 | /* | 228 | /* |
229 | * leave_mm() to avoid costly and often unnecessary wakeups | 229 | * leave_mm() to avoid costly and often unnecessary wakeups |
230 | * for flushing the user TLB's associated with the active mm. | 230 | * for flushing the user TLB's associated with the active mm. |
231 | */ | 231 | */ |
232 | if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED) | 232 | if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED) |
233 | leave_mm(cpu); | 233 | leave_mm(cpu); |
234 | 234 | ||
235 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) | 235 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) |
236 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); | 236 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); |
237 | 237 | ||
238 | kt_before = ktime_get_real(); | 238 | kt_before = ktime_get_real(); |
239 | 239 | ||
240 | stop_critical_timings(); | 240 | stop_critical_timings(); |
241 | if (!need_resched()) { | 241 | if (!need_resched()) { |
242 | 242 | ||
243 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 243 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
244 | smp_mb(); | 244 | smp_mb(); |
245 | if (!need_resched()) | 245 | if (!need_resched()) |
246 | __mwait(eax, ecx); | 246 | __mwait(eax, ecx); |
247 | } | 247 | } |
248 | 248 | ||
249 | start_critical_timings(); | 249 | start_critical_timings(); |
250 | 250 | ||
251 | kt_after = ktime_get_real(); | 251 | kt_after = ktime_get_real(); |
252 | usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before)); | 252 | usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before)); |
253 | 253 | ||
254 | local_irq_enable(); | 254 | local_irq_enable(); |
255 | 255 | ||
256 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) | 256 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) |
257 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); | 257 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); |
258 | 258 | ||
259 | return usec_delta; | 259 | return usec_delta; |
260 | } | 260 | } |
261 | 261 | ||
262 | static void __setup_broadcast_timer(void *arg) | 262 | static void __setup_broadcast_timer(void *arg) |
263 | { | 263 | { |
264 | unsigned long reason = (unsigned long)arg; | 264 | unsigned long reason = (unsigned long)arg; |
265 | int cpu = smp_processor_id(); | 265 | int cpu = smp_processor_id(); |
266 | 266 | ||
267 | reason = reason ? | 267 | reason = reason ? |
268 | CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; | 268 | CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; |
269 | 269 | ||
270 | clockevents_notify(reason, &cpu); | 270 | clockevents_notify(reason, &cpu); |
271 | } | 271 | } |
272 | 272 | ||
273 | static int setup_broadcast_cpuhp_notify(struct notifier_block *n, | 273 | static int setup_broadcast_cpuhp_notify(struct notifier_block *n, |
274 | unsigned long action, void *hcpu) | 274 | unsigned long action, void *hcpu) |
275 | { | 275 | { |
276 | int hotcpu = (unsigned long)hcpu; | 276 | int hotcpu = (unsigned long)hcpu; |
277 | 277 | ||
278 | switch (action & 0xf) { | 278 | switch (action & 0xf) { |
279 | case CPU_ONLINE: | 279 | case CPU_ONLINE: |
280 | smp_call_function_single(hotcpu, __setup_broadcast_timer, | 280 | smp_call_function_single(hotcpu, __setup_broadcast_timer, |
281 | (void *)true, 1); | 281 | (void *)true, 1); |
282 | break; | 282 | break; |
283 | } | 283 | } |
284 | return NOTIFY_OK; | 284 | return NOTIFY_OK; |
285 | } | 285 | } |
286 | 286 | ||
287 | static struct notifier_block setup_broadcast_notifier = { | 287 | static struct notifier_block setup_broadcast_notifier = { |
288 | .notifier_call = setup_broadcast_cpuhp_notify, | 288 | .notifier_call = setup_broadcast_cpuhp_notify, |
289 | }; | 289 | }; |
290 | 290 | ||
291 | static void auto_demotion_disable(void *dummy) | 291 | static void auto_demotion_disable(void *dummy) |
292 | { | 292 | { |
293 | unsigned long long msr_bits; | 293 | unsigned long long msr_bits; |
294 | 294 | ||
295 | rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); | 295 | rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); |
296 | msr_bits &= ~auto_demotion_disable_flags; | 296 | msr_bits &= ~auto_demotion_disable_flags; |
297 | wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); | 297 | wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); |
298 | } | 298 | } |
299 | 299 | ||
300 | /* | 300 | /* |
301 | * intel_idle_probe() | 301 | * intel_idle_probe() |
302 | */ | 302 | */ |
303 | static int intel_idle_probe(void) | 303 | static int intel_idle_probe(void) |
304 | { | 304 | { |
305 | unsigned int eax, ebx, ecx; | 305 | unsigned int eax, ebx, ecx; |
306 | 306 | ||
307 | if (max_cstate == 0) { | 307 | if (max_cstate == 0) { |
308 | pr_debug(PREFIX "disabled\n"); | 308 | pr_debug(PREFIX "disabled\n"); |
309 | return -EPERM; | 309 | return -EPERM; |
310 | } | 310 | } |
311 | 311 | ||
312 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) | 312 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) |
313 | return -ENODEV; | 313 | return -ENODEV; |
314 | 314 | ||
315 | if (!boot_cpu_has(X86_FEATURE_MWAIT)) | 315 | if (!boot_cpu_has(X86_FEATURE_MWAIT)) |
316 | return -ENODEV; | 316 | return -ENODEV; |
317 | 317 | ||
318 | if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) | 318 | if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) |
319 | return -ENODEV; | 319 | return -ENODEV; |
320 | 320 | ||
321 | cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); | 321 | cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); |
322 | 322 | ||
323 | if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || | 323 | if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || |
324 | !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) | 324 | !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) |
325 | return -ENODEV; | 325 | return -ENODEV; |
326 | 326 | ||
327 | pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates); | 327 | pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates); |
328 | 328 | ||
329 | 329 | ||
330 | if (boot_cpu_data.x86 != 6) /* family 6 */ | 330 | if (boot_cpu_data.x86 != 6) /* family 6 */ |
331 | return -ENODEV; | 331 | return -ENODEV; |
332 | 332 | ||
333 | switch (boot_cpu_data.x86_model) { | 333 | switch (boot_cpu_data.x86_model) { |
334 | 334 | ||
335 | case 0x1A: /* Core i7, Xeon 5500 series */ | 335 | case 0x1A: /* Core i7, Xeon 5500 series */ |
336 | case 0x1E: /* Core i7 and i5 Processor - Lynnfield Jasper Forest */ | 336 | case 0x1E: /* Core i7 and i5 Processor - Lynnfield Jasper Forest */ |
337 | case 0x1F: /* Core i7 and i5 Processor - Nehalem */ | 337 | case 0x1F: /* Core i7 and i5 Processor - Nehalem */ |
338 | case 0x2E: /* Nehalem-EX Xeon */ | 338 | case 0x2E: /* Nehalem-EX Xeon */ |
339 | case 0x2F: /* Westmere-EX Xeon */ | 339 | case 0x2F: /* Westmere-EX Xeon */ |
340 | case 0x25: /* Westmere */ | 340 | case 0x25: /* Westmere */ |
341 | case 0x2C: /* Westmere */ | 341 | case 0x2C: /* Westmere */ |
342 | cpuidle_state_table = nehalem_cstates; | 342 | cpuidle_state_table = nehalem_cstates; |
343 | auto_demotion_disable_flags = | 343 | auto_demotion_disable_flags = |
344 | (NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE); | 344 | (NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE); |
345 | break; | 345 | break; |
346 | 346 | ||
347 | case 0x1C: /* 28 - Atom Processor */ | 347 | case 0x1C: /* 28 - Atom Processor */ |
348 | cpuidle_state_table = atom_cstates; | 348 | cpuidle_state_table = atom_cstates; |
349 | break; | 349 | break; |
350 | 350 | ||
351 | case 0x26: /* 38 - Lincroft Atom Processor */ | 351 | case 0x26: /* 38 - Lincroft Atom Processor */ |
352 | cpuidle_state_table = atom_cstates; | 352 | cpuidle_state_table = atom_cstates; |
353 | auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE; | 353 | auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE; |
354 | break; | 354 | break; |
355 | 355 | ||
356 | case 0x2A: /* SNB */ | 356 | case 0x2A: /* SNB */ |
357 | case 0x2D: /* SNB Xeon */ | 357 | case 0x2D: /* SNB Xeon */ |
358 | cpuidle_state_table = snb_cstates; | 358 | cpuidle_state_table = snb_cstates; |
359 | break; | 359 | break; |
360 | 360 | ||
361 | default: | 361 | default: |
362 | pr_debug(PREFIX "does not run on family %d model %d\n", | 362 | pr_debug(PREFIX "does not run on family %d model %d\n", |
363 | boot_cpu_data.x86, boot_cpu_data.x86_model); | 363 | boot_cpu_data.x86, boot_cpu_data.x86_model); |
364 | return -ENODEV; | 364 | return -ENODEV; |
365 | } | 365 | } |
366 | 366 | ||
367 | if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ | 367 | if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ |
368 | lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; | 368 | lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; |
369 | else { | 369 | else { |
370 | smp_call_function(__setup_broadcast_timer, (void *)true, 1); | 370 | smp_call_function(__setup_broadcast_timer, (void *)true, 1); |
371 | register_cpu_notifier(&setup_broadcast_notifier); | 371 | register_cpu_notifier(&setup_broadcast_notifier); |
372 | } | 372 | } |
373 | 373 | ||
374 | pr_debug(PREFIX "v" INTEL_IDLE_VERSION | 374 | pr_debug(PREFIX "v" INTEL_IDLE_VERSION |
375 | " model 0x%X\n", boot_cpu_data.x86_model); | 375 | " model 0x%X\n", boot_cpu_data.x86_model); |
376 | 376 | ||
377 | pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n", | 377 | pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n", |
378 | lapic_timer_reliable_states); | 378 | lapic_timer_reliable_states); |
379 | return 0; | 379 | return 0; |
380 | } | 380 | } |
381 | 381 | ||
382 | /* | 382 | /* |
383 | * intel_idle_cpuidle_devices_uninit() | 383 | * intel_idle_cpuidle_devices_uninit() |
384 | * unregister, free cpuidle_devices | 384 | * unregister, free cpuidle_devices |
385 | */ | 385 | */ |
386 | static void intel_idle_cpuidle_devices_uninit(void) | 386 | static void intel_idle_cpuidle_devices_uninit(void) |
387 | { | 387 | { |
388 | int i; | 388 | int i; |
389 | struct cpuidle_device *dev; | 389 | struct cpuidle_device *dev; |
390 | 390 | ||
391 | for_each_online_cpu(i) { | 391 | for_each_online_cpu(i) { |
392 | dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); | 392 | dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); |
393 | cpuidle_unregister_device(dev); | 393 | cpuidle_unregister_device(dev); |
394 | } | 394 | } |
395 | 395 | ||
396 | free_percpu(intel_idle_cpuidle_devices); | 396 | free_percpu(intel_idle_cpuidle_devices); |
397 | return; | 397 | return; |
398 | } | 398 | } |
399 | /* | 399 | /* |
400 | * intel_idle_cpuidle_devices_init() | 400 | * intel_idle_cpuidle_devices_init() |
401 | * allocate, initialize, register cpuidle_devices | 401 | * allocate, initialize, register cpuidle_devices |
402 | */ | 402 | */ |
403 | static int intel_idle_cpuidle_devices_init(void) | 403 | static int intel_idle_cpuidle_devices_init(void) |
404 | { | 404 | { |
405 | int i, cstate; | 405 | int i, cstate; |
406 | struct cpuidle_device *dev; | 406 | struct cpuidle_device *dev; |
407 | 407 | ||
408 | intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); | 408 | intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); |
409 | if (intel_idle_cpuidle_devices == NULL) | 409 | if (intel_idle_cpuidle_devices == NULL) |
410 | return -ENOMEM; | 410 | return -ENOMEM; |
411 | 411 | ||
412 | for_each_online_cpu(i) { | 412 | for_each_online_cpu(i) { |
413 | dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); | 413 | dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); |
414 | 414 | ||
415 | dev->state_count = 1; | 415 | dev->state_count = 1; |
416 | 416 | ||
417 | for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { | 417 | for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { |
418 | int num_substates; | 418 | int num_substates; |
419 | 419 | ||
420 | if (cstate > max_cstate) { | 420 | if (cstate > max_cstate) { |
421 | printk(PREFIX "max_cstate %d reached\n", | 421 | printk(PREFIX "max_cstate %d reached\n", |
422 | max_cstate); | 422 | max_cstate); |
423 | break; | 423 | break; |
424 | } | 424 | } |
425 | 425 | ||
426 | /* does the state exist in CPUID.MWAIT? */ | 426 | /* does the state exist in CPUID.MWAIT? */ |
427 | num_substates = (mwait_substates >> ((cstate) * 4)) | 427 | num_substates = (mwait_substates >> ((cstate) * 4)) |
428 | & MWAIT_SUBSTATE_MASK; | 428 | & MWAIT_SUBSTATE_MASK; |
429 | if (num_substates == 0) | 429 | if (num_substates == 0) |
430 | continue; | 430 | continue; |
431 | /* is the state not enabled? */ | 431 | /* is the state not enabled? */ |
432 | if (cpuidle_state_table[cstate].enter == NULL) { | 432 | if (cpuidle_state_table[cstate].enter == NULL) { |
433 | /* does the driver not know about the state? */ | 433 | /* does the driver not know about the state? */ |
434 | if (*cpuidle_state_table[cstate].name == '\0') | 434 | if (*cpuidle_state_table[cstate].name == '\0') |
435 | pr_debug(PREFIX "unaware of model 0x%x" | 435 | pr_debug(PREFIX "unaware of model 0x%x" |
436 | " MWAIT %d please" | 436 | " MWAIT %d please" |
437 | " contact lenb@kernel.org", | 437 | " contact lenb@kernel.org", |
438 | boot_cpu_data.x86_model, cstate); | 438 | boot_cpu_data.x86_model, cstate); |
439 | continue; | 439 | continue; |
440 | } | 440 | } |
441 | 441 | ||
442 | if ((cstate > 2) && | 442 | if ((cstate > 2) && |
443 | !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) | 443 | !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) |
444 | mark_tsc_unstable("TSC halts in idle" | 444 | mark_tsc_unstable("TSC halts in idle" |
445 | " states deeper than C2"); | 445 | " states deeper than C2"); |
446 | 446 | ||
447 | dev->states[dev->state_count] = /* structure copy */ | 447 | dev->states[dev->state_count] = /* structure copy */ |
448 | cpuidle_state_table[cstate]; | 448 | cpuidle_state_table[cstate]; |
449 | 449 | ||
450 | dev->state_count += 1; | 450 | dev->state_count += 1; |
451 | } | 451 | } |
452 | 452 | ||
453 | dev->cpu = i; | 453 | dev->cpu = i; |
454 | if (cpuidle_register_device(dev)) { | 454 | if (cpuidle_register_device(dev)) { |
455 | pr_debug(PREFIX "cpuidle_register_device %d failed!\n", | 455 | pr_debug(PREFIX "cpuidle_register_device %d failed!\n", |
456 | i); | 456 | i); |
457 | intel_idle_cpuidle_devices_uninit(); | 457 | intel_idle_cpuidle_devices_uninit(); |
458 | return -EIO; | 458 | return -EIO; |
459 | } | 459 | } |
460 | } | 460 | } |
461 | if (auto_demotion_disable_flags) | 461 | if (auto_demotion_disable_flags) |
462 | smp_call_function(auto_demotion_disable, NULL, 1); | 462 | smp_call_function(auto_demotion_disable, NULL, 1); |
463 | 463 | ||
464 | return 0; | 464 | return 0; |
465 | } | 465 | } |
466 | 466 | ||
467 | 467 | ||
468 | static int __init intel_idle_init(void) | 468 | static int __init intel_idle_init(void) |
469 | { | 469 | { |
470 | int retval; | 470 | int retval; |
471 | 471 | ||
472 | /* Do not load intel_idle at all for now if idle= is passed */ | 472 | /* Do not load intel_idle at all for now if idle= is passed */ |
473 | if (boot_option_idle_override != IDLE_NO_OVERRIDE) | 473 | if (boot_option_idle_override != IDLE_NO_OVERRIDE) |
474 | return -ENODEV; | 474 | return -ENODEV; |
475 | 475 | ||
476 | retval = intel_idle_probe(); | 476 | retval = intel_idle_probe(); |
477 | if (retval) | 477 | if (retval) |
478 | return retval; | 478 | return retval; |
479 | 479 | ||
480 | retval = cpuidle_register_driver(&intel_idle_driver); | 480 | retval = cpuidle_register_driver(&intel_idle_driver); |
481 | if (retval) { | 481 | if (retval) { |
482 | printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", | 482 | printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", |
483 | cpuidle_get_driver()->name); | 483 | cpuidle_get_driver()->name); |
484 | return retval; | 484 | return retval; |
485 | } | 485 | } |
486 | 486 | ||
487 | retval = intel_idle_cpuidle_devices_init(); | 487 | retval = intel_idle_cpuidle_devices_init(); |
488 | if (retval) { | 488 | if (retval) { |
489 | cpuidle_unregister_driver(&intel_idle_driver); | 489 | cpuidle_unregister_driver(&intel_idle_driver); |
490 | return retval; | 490 | return retval; |
491 | } | 491 | } |
492 | 492 | ||
493 | return 0; | 493 | return 0; |
494 | } | 494 | } |
495 | 495 | ||
496 | static void __exit intel_idle_exit(void) | 496 | static void __exit intel_idle_exit(void) |
497 | { | 497 | { |
498 | intel_idle_cpuidle_devices_uninit(); | 498 | intel_idle_cpuidle_devices_uninit(); |
499 | cpuidle_unregister_driver(&intel_idle_driver); | 499 | cpuidle_unregister_driver(&intel_idle_driver); |
500 | 500 | ||
501 | if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) { | 501 | if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) { |
502 | smp_call_function(__setup_broadcast_timer, (void *)false, 1); | 502 | smp_call_function(__setup_broadcast_timer, (void *)false, 1); |
503 | unregister_cpu_notifier(&setup_broadcast_notifier); | 503 | unregister_cpu_notifier(&setup_broadcast_notifier); |
504 | } | 504 | } |
505 | 505 | ||
506 | return; | 506 | return; |
507 | } | 507 | } |
508 | 508 | ||
509 | module_init(intel_idle_init); | 509 | module_init(intel_idle_init); |
510 | module_exit(intel_idle_exit); | 510 | module_exit(intel_idle_exit); |
511 | 511 | ||
512 | module_param(max_cstate, int, 0444); | 512 | module_param(max_cstate, int, 0444); |
513 | 513 | ||
514 | MODULE_AUTHOR("Len Brown <len.brown@intel.com>"); | 514 | MODULE_AUTHOR("Len Brown <len.brown@intel.com>"); |
515 | MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION); | 515 | MODULE_DESCRIPTION("Cpuidle driver for Intel Hardware v" INTEL_IDLE_VERSION); |
516 | MODULE_LICENSE("GPL"); | 516 | MODULE_LICENSE("GPL"); |
517 | 517 |