Commit b66b8b9a4a79087dde1b358a016e5c8739ccf186
Committed by
Greg Kroah-Hartman
1 parent
3bd391f056
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
intel-idle: convert to x86_cpu_id auto probing
With this it should be automatically loaded on suitable systems by udev. The old switch () is replaced with a table based approach, this also cleans up the code. Cc: Len Brown <lenb@kernel.org> Signed-off-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Thomas Renninger <trenn@suse.de> Acked-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Showing 1 changed file with 66 additions and 50 deletions Inline Diff
drivers/idle/intel_idle.c
1 | /* | 1 | /* |
2 | * intel_idle.c - native hardware idle loop for modern Intel processors | 2 | * intel_idle.c - native hardware idle loop for modern Intel processors |
3 | * | 3 | * |
4 | * Copyright (c) 2010, Intel Corporation. | 4 | * Copyright (c) 2010, Intel Corporation. |
5 | * Len Brown <len.brown@intel.com> | 5 | * Len Brown <len.brown@intel.com> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
8 | * under the terms and conditions of the GNU General Public License, | 8 | * under the terms and conditions of the GNU General Public License, |
9 | * version 2, as published by the Free Software Foundation. | 9 | * version 2, as published by the Free Software Foundation. |
10 | * | 10 | * |
11 | * This program is distributed in the hope it will be useful, but WITHOUT | 11 | * This program is distributed in the hope it will be useful, but WITHOUT |
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
14 | * more details. | 14 | * more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License along with | 16 | * You should have received a copy of the GNU General Public License along with |
17 | * this program; if not, write to the Free Software Foundation, Inc., | 17 | * this program; if not, write to the Free Software Foundation, Inc., |
18 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | 18 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * intel_idle is a cpuidle driver that loads on specific Intel processors | 22 | * intel_idle is a cpuidle driver that loads on specific Intel processors |
23 | * in lieu of the legacy ACPI processor_idle driver. The intent is to | 23 | * in lieu of the legacy ACPI processor_idle driver. The intent is to |
24 | * make Linux more efficient on these processors, as intel_idle knows | 24 | * make Linux more efficient on these processors, as intel_idle knows |
25 | * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs. | 25 | * more than ACPI, as well as make Linux more immune to ACPI BIOS bugs. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Design Assumptions | 29 | * Design Assumptions |
30 | * | 30 | * |
31 | * All CPUs have same idle states as boot CPU | 31 | * All CPUs have same idle states as boot CPU |
32 | * | 32 | * |
33 | * Chipset BM_STS (bus master status) bit is a NOP | 33 | * Chipset BM_STS (bus master status) bit is a NOP |
34 | * for preventing entry into deep C-stats | 34 | * for preventing entry into deep C-stats |
35 | */ | 35 | */ |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Known limitations | 38 | * Known limitations |
39 | * | 39 | * |
40 | * The driver currently initializes for_each_online_cpu() upon modprobe. | 40 | * The driver currently initializes for_each_online_cpu() upon modprobe. |
41 | * It it unaware of subsequent processors hot-added to the system. | 41 | * It it unaware of subsequent processors hot-added to the system. |
42 | * This means that if you boot with maxcpus=n and later online | 42 | * This means that if you boot with maxcpus=n and later online |
43 | * processors above n, those processors will use C1 only. | 43 | * processors above n, those processors will use C1 only. |
44 | * | 44 | * |
45 | * ACPI has a .suspend hack to turn off deep c-statees during suspend | 45 | * ACPI has a .suspend hack to turn off deep c-statees during suspend |
46 | * to avoid complications with the lapic timer workaround. | 46 | * to avoid complications with the lapic timer workaround. |
47 | * Have not seen issues with suspend, but may need same workaround here. | 47 | * Have not seen issues with suspend, but may need same workaround here. |
48 | * | 48 | * |
49 | * There is currently no kernel-based automatic probing/loading mechanism | 49 | * There is currently no kernel-based automatic probing/loading mechanism |
50 | * if the driver is built as a module. | 50 | * if the driver is built as a module. |
51 | */ | 51 | */ |
52 | 52 | ||
53 | /* un-comment DEBUG to enable pr_debug() statements */ | 53 | /* un-comment DEBUG to enable pr_debug() statements */ |
54 | #define DEBUG | 54 | #define DEBUG |
55 | 55 | ||
56 | #include <linux/kernel.h> | 56 | #include <linux/kernel.h> |
57 | #include <linux/cpuidle.h> | 57 | #include <linux/cpuidle.h> |
58 | #include <linux/clockchips.h> | 58 | #include <linux/clockchips.h> |
59 | #include <linux/hrtimer.h> /* ktime_get_real() */ | 59 | #include <linux/hrtimer.h> /* ktime_get_real() */ |
60 | #include <trace/events/power.h> | 60 | #include <trace/events/power.h> |
61 | #include <linux/sched.h> | 61 | #include <linux/sched.h> |
62 | #include <linux/notifier.h> | 62 | #include <linux/notifier.h> |
63 | #include <linux/cpu.h> | 63 | #include <linux/cpu.h> |
64 | #include <linux/module.h> | 64 | #include <linux/module.h> |
65 | #include <asm/cpu_device_id.h> | ||
65 | #include <asm/mwait.h> | 66 | #include <asm/mwait.h> |
66 | #include <asm/msr.h> | 67 | #include <asm/msr.h> |
67 | 68 | ||
68 | #define INTEL_IDLE_VERSION "0.4" | 69 | #define INTEL_IDLE_VERSION "0.4" |
69 | #define PREFIX "intel_idle: " | 70 | #define PREFIX "intel_idle: " |
70 | 71 | ||
71 | static struct cpuidle_driver intel_idle_driver = { | 72 | static struct cpuidle_driver intel_idle_driver = { |
72 | .name = "intel_idle", | 73 | .name = "intel_idle", |
73 | .owner = THIS_MODULE, | 74 | .owner = THIS_MODULE, |
74 | }; | 75 | }; |
75 | /* intel_idle.max_cstate=0 disables driver */ | 76 | /* intel_idle.max_cstate=0 disables driver */ |
76 | static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; | 77 | static int max_cstate = MWAIT_MAX_NUM_CSTATES - 1; |
77 | 78 | ||
78 | static unsigned int mwait_substates; | 79 | static unsigned int mwait_substates; |
79 | 80 | ||
80 | #define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF | 81 | #define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF |
81 | /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ | 82 | /* Reliable LAPIC Timer States, bit 1 for C1 etc. */ |
82 | static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ | 83 | static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ |
83 | 84 | ||
85 | struct idle_cpu { | ||
86 | struct cpuidle_state *state_table; | ||
87 | |||
88 | /* | ||
89 | * Hardware C-state auto-demotion may not always be optimal. | ||
90 | * Indicate which enable bits to clear here. | ||
91 | */ | ||
92 | unsigned long auto_demotion_disable_flags; | ||
93 | }; | ||
94 | |||
95 | static const struct idle_cpu *icpu; | ||
84 | static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; | 96 | static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; |
85 | static int intel_idle(struct cpuidle_device *dev, | 97 | static int intel_idle(struct cpuidle_device *dev, |
86 | struct cpuidle_driver *drv, int index); | 98 | struct cpuidle_driver *drv, int index); |
87 | 99 | ||
88 | static struct cpuidle_state *cpuidle_state_table; | 100 | static struct cpuidle_state *cpuidle_state_table; |
89 | 101 | ||
90 | /* | 102 | /* |
91 | * Hardware C-state auto-demotion may not always be optimal. | ||
92 | * Indicate which enable bits to clear here. | ||
93 | */ | ||
94 | static unsigned long long auto_demotion_disable_flags; | ||
95 | |||
96 | /* | ||
97 | * Set this flag for states where the HW flushes the TLB for us | 103 | * Set this flag for states where the HW flushes the TLB for us |
98 | * and so we don't need cross-calls to keep it consistent. | 104 | * and so we don't need cross-calls to keep it consistent. |
99 | * If this flag is set, SW flushes the TLB, so even if the | 105 | * If this flag is set, SW flushes the TLB, so even if the |
100 | * HW doesn't do the flushing, this flag is safe to use. | 106 | * HW doesn't do the flushing, this flag is safe to use. |
101 | */ | 107 | */ |
102 | #define CPUIDLE_FLAG_TLB_FLUSHED 0x10000 | 108 | #define CPUIDLE_FLAG_TLB_FLUSHED 0x10000 |
103 | 109 | ||
104 | /* | 110 | /* |
105 | * States are indexed by the cstate number, | 111 | * States are indexed by the cstate number, |
106 | * which is also the index into the MWAIT hint array. | 112 | * which is also the index into the MWAIT hint array. |
107 | * Thus C0 is a dummy. | 113 | * Thus C0 is a dummy. |
108 | */ | 114 | */ |
109 | static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { | 115 | static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = { |
110 | { /* MWAIT C0 */ }, | 116 | { /* MWAIT C0 */ }, |
111 | { /* MWAIT C1 */ | 117 | { /* MWAIT C1 */ |
112 | .name = "C1-NHM", | 118 | .name = "C1-NHM", |
113 | .desc = "MWAIT 0x00", | 119 | .desc = "MWAIT 0x00", |
114 | .flags = CPUIDLE_FLAG_TIME_VALID, | 120 | .flags = CPUIDLE_FLAG_TIME_VALID, |
115 | .exit_latency = 3, | 121 | .exit_latency = 3, |
116 | .target_residency = 6, | 122 | .target_residency = 6, |
117 | .enter = &intel_idle }, | 123 | .enter = &intel_idle }, |
118 | { /* MWAIT C2 */ | 124 | { /* MWAIT C2 */ |
119 | .name = "C3-NHM", | 125 | .name = "C3-NHM", |
120 | .desc = "MWAIT 0x10", | 126 | .desc = "MWAIT 0x10", |
121 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 127 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
122 | .exit_latency = 20, | 128 | .exit_latency = 20, |
123 | .target_residency = 80, | 129 | .target_residency = 80, |
124 | .enter = &intel_idle }, | 130 | .enter = &intel_idle }, |
125 | { /* MWAIT C3 */ | 131 | { /* MWAIT C3 */ |
126 | .name = "C6-NHM", | 132 | .name = "C6-NHM", |
127 | .desc = "MWAIT 0x20", | 133 | .desc = "MWAIT 0x20", |
128 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 134 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
129 | .exit_latency = 200, | 135 | .exit_latency = 200, |
130 | .target_residency = 800, | 136 | .target_residency = 800, |
131 | .enter = &intel_idle }, | 137 | .enter = &intel_idle }, |
132 | }; | 138 | }; |
133 | 139 | ||
134 | static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { | 140 | static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { |
135 | { /* MWAIT C0 */ }, | 141 | { /* MWAIT C0 */ }, |
136 | { /* MWAIT C1 */ | 142 | { /* MWAIT C1 */ |
137 | .name = "C1-SNB", | 143 | .name = "C1-SNB", |
138 | .desc = "MWAIT 0x00", | 144 | .desc = "MWAIT 0x00", |
139 | .flags = CPUIDLE_FLAG_TIME_VALID, | 145 | .flags = CPUIDLE_FLAG_TIME_VALID, |
140 | .exit_latency = 1, | 146 | .exit_latency = 1, |
141 | .target_residency = 1, | 147 | .target_residency = 1, |
142 | .enter = &intel_idle }, | 148 | .enter = &intel_idle }, |
143 | { /* MWAIT C2 */ | 149 | { /* MWAIT C2 */ |
144 | .name = "C3-SNB", | 150 | .name = "C3-SNB", |
145 | .desc = "MWAIT 0x10", | 151 | .desc = "MWAIT 0x10", |
146 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 152 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
147 | .exit_latency = 80, | 153 | .exit_latency = 80, |
148 | .target_residency = 211, | 154 | .target_residency = 211, |
149 | .enter = &intel_idle }, | 155 | .enter = &intel_idle }, |
150 | { /* MWAIT C3 */ | 156 | { /* MWAIT C3 */ |
151 | .name = "C6-SNB", | 157 | .name = "C6-SNB", |
152 | .desc = "MWAIT 0x20", | 158 | .desc = "MWAIT 0x20", |
153 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 159 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
154 | .exit_latency = 104, | 160 | .exit_latency = 104, |
155 | .target_residency = 345, | 161 | .target_residency = 345, |
156 | .enter = &intel_idle }, | 162 | .enter = &intel_idle }, |
157 | { /* MWAIT C4 */ | 163 | { /* MWAIT C4 */ |
158 | .name = "C7-SNB", | 164 | .name = "C7-SNB", |
159 | .desc = "MWAIT 0x30", | 165 | .desc = "MWAIT 0x30", |
160 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 166 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
161 | .exit_latency = 109, | 167 | .exit_latency = 109, |
162 | .target_residency = 345, | 168 | .target_residency = 345, |
163 | .enter = &intel_idle }, | 169 | .enter = &intel_idle }, |
164 | }; | 170 | }; |
165 | 171 | ||
166 | static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { | 172 | static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { |
167 | { /* MWAIT C0 */ }, | 173 | { /* MWAIT C0 */ }, |
168 | { /* MWAIT C1 */ | 174 | { /* MWAIT C1 */ |
169 | .name = "C1-ATM", | 175 | .name = "C1-ATM", |
170 | .desc = "MWAIT 0x00", | 176 | .desc = "MWAIT 0x00", |
171 | .flags = CPUIDLE_FLAG_TIME_VALID, | 177 | .flags = CPUIDLE_FLAG_TIME_VALID, |
172 | .exit_latency = 1, | 178 | .exit_latency = 1, |
173 | .target_residency = 4, | 179 | .target_residency = 4, |
174 | .enter = &intel_idle }, | 180 | .enter = &intel_idle }, |
175 | { /* MWAIT C2 */ | 181 | { /* MWAIT C2 */ |
176 | .name = "C2-ATM", | 182 | .name = "C2-ATM", |
177 | .desc = "MWAIT 0x10", | 183 | .desc = "MWAIT 0x10", |
178 | .flags = CPUIDLE_FLAG_TIME_VALID, | 184 | .flags = CPUIDLE_FLAG_TIME_VALID, |
179 | .exit_latency = 20, | 185 | .exit_latency = 20, |
180 | .target_residency = 80, | 186 | .target_residency = 80, |
181 | .enter = &intel_idle }, | 187 | .enter = &intel_idle }, |
182 | { /* MWAIT C3 */ }, | 188 | { /* MWAIT C3 */ }, |
183 | { /* MWAIT C4 */ | 189 | { /* MWAIT C4 */ |
184 | .name = "C4-ATM", | 190 | .name = "C4-ATM", |
185 | .desc = "MWAIT 0x30", | 191 | .desc = "MWAIT 0x30", |
186 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 192 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
187 | .exit_latency = 100, | 193 | .exit_latency = 100, |
188 | .target_residency = 400, | 194 | .target_residency = 400, |
189 | .enter = &intel_idle }, | 195 | .enter = &intel_idle }, |
190 | { /* MWAIT C5 */ }, | 196 | { /* MWAIT C5 */ }, |
191 | { /* MWAIT C6 */ | 197 | { /* MWAIT C6 */ |
192 | .name = "C6-ATM", | 198 | .name = "C6-ATM", |
193 | .desc = "MWAIT 0x52", | 199 | .desc = "MWAIT 0x52", |
194 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, | 200 | .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, |
195 | .exit_latency = 140, | 201 | .exit_latency = 140, |
196 | .target_residency = 560, | 202 | .target_residency = 560, |
197 | .enter = &intel_idle }, | 203 | .enter = &intel_idle }, |
198 | }; | 204 | }; |
199 | 205 | ||
200 | static long get_driver_data(int cstate) | 206 | static long get_driver_data(int cstate) |
201 | { | 207 | { |
202 | int driver_data; | 208 | int driver_data; |
203 | switch (cstate) { | 209 | switch (cstate) { |
204 | 210 | ||
205 | case 1: /* MWAIT C1 */ | 211 | case 1: /* MWAIT C1 */ |
206 | driver_data = 0x00; | 212 | driver_data = 0x00; |
207 | break; | 213 | break; |
208 | case 2: /* MWAIT C2 */ | 214 | case 2: /* MWAIT C2 */ |
209 | driver_data = 0x10; | 215 | driver_data = 0x10; |
210 | break; | 216 | break; |
211 | case 3: /* MWAIT C3 */ | 217 | case 3: /* MWAIT C3 */ |
212 | driver_data = 0x20; | 218 | driver_data = 0x20; |
213 | break; | 219 | break; |
214 | case 4: /* MWAIT C4 */ | 220 | case 4: /* MWAIT C4 */ |
215 | driver_data = 0x30; | 221 | driver_data = 0x30; |
216 | break; | 222 | break; |
217 | case 5: /* MWAIT C5 */ | 223 | case 5: /* MWAIT C5 */ |
218 | driver_data = 0x40; | 224 | driver_data = 0x40; |
219 | break; | 225 | break; |
220 | case 6: /* MWAIT C6 */ | 226 | case 6: /* MWAIT C6 */ |
221 | driver_data = 0x52; | 227 | driver_data = 0x52; |
222 | break; | 228 | break; |
223 | default: | 229 | default: |
224 | driver_data = 0x00; | 230 | driver_data = 0x00; |
225 | } | 231 | } |
226 | return driver_data; | 232 | return driver_data; |
227 | } | 233 | } |
228 | 234 | ||
229 | /** | 235 | /** |
230 | * intel_idle | 236 | * intel_idle |
231 | * @dev: cpuidle_device | 237 | * @dev: cpuidle_device |
232 | * @drv: cpuidle driver | 238 | * @drv: cpuidle driver |
233 | * @index: index of cpuidle state | 239 | * @index: index of cpuidle state |
234 | * | 240 | * |
235 | * Must be called under local_irq_disable(). | 241 | * Must be called under local_irq_disable(). |
236 | */ | 242 | */ |
237 | static int intel_idle(struct cpuidle_device *dev, | 243 | static int intel_idle(struct cpuidle_device *dev, |
238 | struct cpuidle_driver *drv, int index) | 244 | struct cpuidle_driver *drv, int index) |
239 | { | 245 | { |
240 | unsigned long ecx = 1; /* break on interrupt flag */ | 246 | unsigned long ecx = 1; /* break on interrupt flag */ |
241 | struct cpuidle_state *state = &drv->states[index]; | 247 | struct cpuidle_state *state = &drv->states[index]; |
242 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; | 248 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
243 | unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage); | 249 | unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage); |
244 | unsigned int cstate; | 250 | unsigned int cstate; |
245 | ktime_t kt_before, kt_after; | 251 | ktime_t kt_before, kt_after; |
246 | s64 usec_delta; | 252 | s64 usec_delta; |
247 | int cpu = smp_processor_id(); | 253 | int cpu = smp_processor_id(); |
248 | 254 | ||
249 | cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; | 255 | cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1; |
250 | 256 | ||
251 | /* | 257 | /* |
252 | * leave_mm() to avoid costly and often unnecessary wakeups | 258 | * leave_mm() to avoid costly and often unnecessary wakeups |
253 | * for flushing the user TLB's associated with the active mm. | 259 | * for flushing the user TLB's associated with the active mm. |
254 | */ | 260 | */ |
255 | if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED) | 261 | if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED) |
256 | leave_mm(cpu); | 262 | leave_mm(cpu); |
257 | 263 | ||
258 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) | 264 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) |
259 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); | 265 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu); |
260 | 266 | ||
261 | kt_before = ktime_get_real(); | 267 | kt_before = ktime_get_real(); |
262 | 268 | ||
263 | stop_critical_timings(); | 269 | stop_critical_timings(); |
264 | if (!need_resched()) { | 270 | if (!need_resched()) { |
265 | 271 | ||
266 | __monitor((void *)¤t_thread_info()->flags, 0, 0); | 272 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
267 | smp_mb(); | 273 | smp_mb(); |
268 | if (!need_resched()) | 274 | if (!need_resched()) |
269 | __mwait(eax, ecx); | 275 | __mwait(eax, ecx); |
270 | } | 276 | } |
271 | 277 | ||
272 | start_critical_timings(); | 278 | start_critical_timings(); |
273 | 279 | ||
274 | kt_after = ktime_get_real(); | 280 | kt_after = ktime_get_real(); |
275 | usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before)); | 281 | usec_delta = ktime_to_us(ktime_sub(kt_after, kt_before)); |
276 | 282 | ||
277 | local_irq_enable(); | 283 | local_irq_enable(); |
278 | 284 | ||
279 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) | 285 | if (!(lapic_timer_reliable_states & (1 << (cstate)))) |
280 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); | 286 | clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu); |
281 | 287 | ||
282 | /* Update cpuidle counters */ | 288 | /* Update cpuidle counters */ |
283 | dev->last_residency = (int)usec_delta; | 289 | dev->last_residency = (int)usec_delta; |
284 | 290 | ||
285 | return index; | 291 | return index; |
286 | } | 292 | } |
287 | 293 | ||
288 | static void __setup_broadcast_timer(void *arg) | 294 | static void __setup_broadcast_timer(void *arg) |
289 | { | 295 | { |
290 | unsigned long reason = (unsigned long)arg; | 296 | unsigned long reason = (unsigned long)arg; |
291 | int cpu = smp_processor_id(); | 297 | int cpu = smp_processor_id(); |
292 | 298 | ||
293 | reason = reason ? | 299 | reason = reason ? |
294 | CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; | 300 | CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; |
295 | 301 | ||
296 | clockevents_notify(reason, &cpu); | 302 | clockevents_notify(reason, &cpu); |
297 | } | 303 | } |
298 | 304 | ||
299 | static int setup_broadcast_cpuhp_notify(struct notifier_block *n, | 305 | static int setup_broadcast_cpuhp_notify(struct notifier_block *n, |
300 | unsigned long action, void *hcpu) | 306 | unsigned long action, void *hcpu) |
301 | { | 307 | { |
302 | int hotcpu = (unsigned long)hcpu; | 308 | int hotcpu = (unsigned long)hcpu; |
303 | 309 | ||
304 | switch (action & 0xf) { | 310 | switch (action & 0xf) { |
305 | case CPU_ONLINE: | 311 | case CPU_ONLINE: |
306 | smp_call_function_single(hotcpu, __setup_broadcast_timer, | 312 | smp_call_function_single(hotcpu, __setup_broadcast_timer, |
307 | (void *)true, 1); | 313 | (void *)true, 1); |
308 | break; | 314 | break; |
309 | } | 315 | } |
310 | return NOTIFY_OK; | 316 | return NOTIFY_OK; |
311 | } | 317 | } |
312 | 318 | ||
313 | static struct notifier_block setup_broadcast_notifier = { | 319 | static struct notifier_block setup_broadcast_notifier = { |
314 | .notifier_call = setup_broadcast_cpuhp_notify, | 320 | .notifier_call = setup_broadcast_cpuhp_notify, |
315 | }; | 321 | }; |
316 | 322 | ||
317 | static void auto_demotion_disable(void *dummy) | 323 | static void auto_demotion_disable(void *dummy) |
318 | { | 324 | { |
319 | unsigned long long msr_bits; | 325 | unsigned long long msr_bits; |
320 | 326 | ||
321 | rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); | 327 | rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); |
322 | msr_bits &= ~auto_demotion_disable_flags; | 328 | msr_bits &= ~(icpu->auto_demotion_disable_flags); |
323 | wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); | 329 | wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); |
324 | } | 330 | } |
325 | 331 | ||
332 | static const struct idle_cpu idle_cpu_nehalem = { | ||
333 | .state_table = nehalem_cstates, | ||
334 | }; | ||
335 | |||
336 | static const struct idle_cpu idle_cpu_westmere = { | ||
337 | .state_table = nehalem_cstates, | ||
338 | .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, | ||
339 | }; | ||
340 | |||
341 | static const struct idle_cpu idle_cpu_atom = { | ||
342 | .state_table = atom_cstates, | ||
343 | }; | ||
344 | |||
345 | static const struct idle_cpu idle_cpu_lincroft = { | ||
346 | .state_table = atom_cstates, | ||
347 | .auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE, | ||
348 | }; | ||
349 | |||
350 | static const struct idle_cpu idle_cpu_snb = { | ||
351 | .state_table = snb_cstates, | ||
352 | }; | ||
353 | |||
354 | #define ICPU(model, cpu) \ | ||
355 | { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, (unsigned long)&cpu } | ||
356 | |||
357 | static const struct x86_cpu_id intel_idle_ids[] = { | ||
358 | ICPU(0x1a, idle_cpu_nehalem), | ||
359 | ICPU(0x1e, idle_cpu_nehalem), | ||
360 | ICPU(0x1f, idle_cpu_nehalem), | ||
361 | ICPU(0x25, idle_cpu_westmere), | ||
362 | ICPU(0x2c, idle_cpu_westmere), | ||
363 | ICPU(0x2f, idle_cpu_westmere), | ||
364 | ICPU(0x1c, idle_cpu_atom), | ||
365 | ICPU(0x26, idle_cpu_lincroft), | ||
366 | ICPU(0x2f, idle_cpu_westmere), | ||
367 | ICPU(0x2a, idle_cpu_snb), | ||
368 | ICPU(0x2d, idle_cpu_snb), | ||
369 | {} | ||
370 | }; | ||
371 | MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); | ||
372 | |||
326 | /* | 373 | /* |
327 | * intel_idle_probe() | 374 | * intel_idle_probe() |
328 | */ | 375 | */ |
329 | static int intel_idle_probe(void) | 376 | static int intel_idle_probe(void) |
330 | { | 377 | { |
331 | unsigned int eax, ebx, ecx; | 378 | unsigned int eax, ebx, ecx; |
379 | const struct x86_cpu_id *id; | ||
332 | 380 | ||
333 | if (max_cstate == 0) { | 381 | if (max_cstate == 0) { |
334 | pr_debug(PREFIX "disabled\n"); | 382 | pr_debug(PREFIX "disabled\n"); |
335 | return -EPERM; | 383 | return -EPERM; |
336 | } | 384 | } |
337 | 385 | ||
338 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) | 386 | id = x86_match_cpu(intel_idle_ids); |
387 | if (!id) { | ||
388 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | ||
389 | boot_cpu_data.x86 == 6) | ||
390 | pr_debug(PREFIX "does not run on family %d model %d\n", | ||
391 | boot_cpu_data.x86, boot_cpu_data.x86_model); | ||
339 | return -ENODEV; | 392 | return -ENODEV; |
393 | } | ||
340 | 394 | ||
341 | if (!boot_cpu_has(X86_FEATURE_MWAIT)) | ||
342 | return -ENODEV; | ||
343 | |||
344 | if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) | 395 | if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) |
345 | return -ENODEV; | 396 | return -ENODEV; |
346 | 397 | ||
347 | cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); | 398 | cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates); |
348 | 399 | ||
349 | if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || | 400 | if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || |
350 | !(ecx & CPUID5_ECX_INTERRUPT_BREAK) || | 401 | !(ecx & CPUID5_ECX_INTERRUPT_BREAK) || |
351 | !mwait_substates) | 402 | !mwait_substates) |
352 | return -ENODEV; | 403 | return -ENODEV; |
353 | 404 | ||
354 | pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates); | 405 | pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates); |
355 | 406 | ||
407 | icpu = (const struct idle_cpu *)id->driver_data; | ||
408 | cpuidle_state_table = icpu->state_table; | ||
356 | 409 | ||
357 | if (boot_cpu_data.x86 != 6) /* family 6 */ | ||
358 | return -ENODEV; | ||
359 | |||
360 | switch (boot_cpu_data.x86_model) { | ||
361 | |||
362 | case 0x1A: /* Core i7, Xeon 5500 series */ | ||
363 | case 0x1E: /* Core i7 and i5 Processor - Lynnfield Jasper Forest */ | ||
364 | case 0x1F: /* Core i7 and i5 Processor - Nehalem */ | ||
365 | case 0x2E: /* Nehalem-EX Xeon */ | ||
366 | case 0x2F: /* Westmere-EX Xeon */ | ||
367 | case 0x25: /* Westmere */ | ||
368 | case 0x2C: /* Westmere */ | ||
369 | cpuidle_state_table = nehalem_cstates; | ||
370 | auto_demotion_disable_flags = | ||
371 | (NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE); | ||
372 | break; | ||
373 | |||
374 | case 0x1C: /* 28 - Atom Processor */ | ||
375 | cpuidle_state_table = atom_cstates; | ||
376 | break; | ||
377 | |||
378 | case 0x26: /* 38 - Lincroft Atom Processor */ | ||
379 | cpuidle_state_table = atom_cstates; | ||
380 | auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE; | ||
381 | break; | ||
382 | |||
383 | case 0x2A: /* SNB */ | ||
384 | case 0x2D: /* SNB Xeon */ | ||
385 | cpuidle_state_table = snb_cstates; | ||
386 | break; | ||
387 | |||
388 | default: | ||
389 | pr_debug(PREFIX "does not run on family %d model %d\n", | ||
390 | boot_cpu_data.x86, boot_cpu_data.x86_model); | ||
391 | return -ENODEV; | ||
392 | } | ||
393 | |||
394 | if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ | 410 | if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ |
395 | lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; | 411 | lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; |
396 | else { | 412 | else { |
397 | on_each_cpu(__setup_broadcast_timer, (void *)true, 1); | 413 | on_each_cpu(__setup_broadcast_timer, (void *)true, 1); |
398 | register_cpu_notifier(&setup_broadcast_notifier); | 414 | register_cpu_notifier(&setup_broadcast_notifier); |
399 | } | 415 | } |
400 | 416 | ||
401 | pr_debug(PREFIX "v" INTEL_IDLE_VERSION | 417 | pr_debug(PREFIX "v" INTEL_IDLE_VERSION |
402 | " model 0x%X\n", boot_cpu_data.x86_model); | 418 | " model 0x%X\n", boot_cpu_data.x86_model); |
403 | 419 | ||
404 | pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n", | 420 | pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n", |
405 | lapic_timer_reliable_states); | 421 | lapic_timer_reliable_states); |
406 | return 0; | 422 | return 0; |
407 | } | 423 | } |
408 | 424 | ||
409 | /* | 425 | /* |
410 | * intel_idle_cpuidle_devices_uninit() | 426 | * intel_idle_cpuidle_devices_uninit() |
411 | * unregister, free cpuidle_devices | 427 | * unregister, free cpuidle_devices |
412 | */ | 428 | */ |
413 | static void intel_idle_cpuidle_devices_uninit(void) | 429 | static void intel_idle_cpuidle_devices_uninit(void) |
414 | { | 430 | { |
415 | int i; | 431 | int i; |
416 | struct cpuidle_device *dev; | 432 | struct cpuidle_device *dev; |
417 | 433 | ||
418 | for_each_online_cpu(i) { | 434 | for_each_online_cpu(i) { |
419 | dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); | 435 | dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); |
420 | cpuidle_unregister_device(dev); | 436 | cpuidle_unregister_device(dev); |
421 | } | 437 | } |
422 | 438 | ||
423 | free_percpu(intel_idle_cpuidle_devices); | 439 | free_percpu(intel_idle_cpuidle_devices); |
424 | return; | 440 | return; |
425 | } | 441 | } |
426 | /* | 442 | /* |
427 | * intel_idle_cpuidle_driver_init() | 443 | * intel_idle_cpuidle_driver_init() |
428 | * allocate, initialize cpuidle_states | 444 | * allocate, initialize cpuidle_states |
429 | */ | 445 | */ |
430 | static int intel_idle_cpuidle_driver_init(void) | 446 | static int intel_idle_cpuidle_driver_init(void) |
431 | { | 447 | { |
432 | int cstate; | 448 | int cstate; |
433 | struct cpuidle_driver *drv = &intel_idle_driver; | 449 | struct cpuidle_driver *drv = &intel_idle_driver; |
434 | 450 | ||
435 | drv->state_count = 1; | 451 | drv->state_count = 1; |
436 | 452 | ||
437 | for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { | 453 | for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { |
438 | int num_substates; | 454 | int num_substates; |
439 | 455 | ||
440 | if (cstate > max_cstate) { | 456 | if (cstate > max_cstate) { |
441 | printk(PREFIX "max_cstate %d reached\n", | 457 | printk(PREFIX "max_cstate %d reached\n", |
442 | max_cstate); | 458 | max_cstate); |
443 | break; | 459 | break; |
444 | } | 460 | } |
445 | 461 | ||
446 | /* does the state exist in CPUID.MWAIT? */ | 462 | /* does the state exist in CPUID.MWAIT? */ |
447 | num_substates = (mwait_substates >> ((cstate) * 4)) | 463 | num_substates = (mwait_substates >> ((cstate) * 4)) |
448 | & MWAIT_SUBSTATE_MASK; | 464 | & MWAIT_SUBSTATE_MASK; |
449 | if (num_substates == 0) | 465 | if (num_substates == 0) |
450 | continue; | 466 | continue; |
451 | /* is the state not enabled? */ | 467 | /* is the state not enabled? */ |
452 | if (cpuidle_state_table[cstate].enter == NULL) { | 468 | if (cpuidle_state_table[cstate].enter == NULL) { |
453 | /* does the driver not know about the state? */ | 469 | /* does the driver not know about the state? */ |
454 | if (*cpuidle_state_table[cstate].name == '\0') | 470 | if (*cpuidle_state_table[cstate].name == '\0') |
455 | pr_debug(PREFIX "unaware of model 0x%x" | 471 | pr_debug(PREFIX "unaware of model 0x%x" |
456 | " MWAIT %d please" | 472 | " MWAIT %d please" |
457 | " contact lenb@kernel.org", | 473 | " contact lenb@kernel.org", |
458 | boot_cpu_data.x86_model, cstate); | 474 | boot_cpu_data.x86_model, cstate); |
459 | continue; | 475 | continue; |
460 | } | 476 | } |
461 | 477 | ||
462 | if ((cstate > 2) && | 478 | if ((cstate > 2) && |
463 | !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) | 479 | !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) |
464 | mark_tsc_unstable("TSC halts in idle" | 480 | mark_tsc_unstable("TSC halts in idle" |
465 | " states deeper than C2"); | 481 | " states deeper than C2"); |
466 | 482 | ||
467 | drv->states[drv->state_count] = /* structure copy */ | 483 | drv->states[drv->state_count] = /* structure copy */ |
468 | cpuidle_state_table[cstate]; | 484 | cpuidle_state_table[cstate]; |
469 | 485 | ||
470 | drv->state_count += 1; | 486 | drv->state_count += 1; |
471 | } | 487 | } |
472 | 488 | ||
473 | if (auto_demotion_disable_flags) | 489 | if (icpu->auto_demotion_disable_flags) |
474 | on_each_cpu(auto_demotion_disable, NULL, 1); | 490 | on_each_cpu(auto_demotion_disable, NULL, 1); |
475 | 491 | ||
476 | return 0; | 492 | return 0; |
477 | } | 493 | } |
478 | 494 | ||
479 | 495 | ||
480 | /* | 496 | /* |
481 | * intel_idle_cpu_init() | 497 | * intel_idle_cpu_init() |
482 | * allocate, initialize, register cpuidle_devices | 498 | * allocate, initialize, register cpuidle_devices |
483 | * @cpu: cpu/core to initialize | 499 | * @cpu: cpu/core to initialize |
484 | */ | 500 | */ |
485 | int intel_idle_cpu_init(int cpu) | 501 | int intel_idle_cpu_init(int cpu) |
486 | { | 502 | { |
487 | int cstate; | 503 | int cstate; |
488 | struct cpuidle_device *dev; | 504 | struct cpuidle_device *dev; |
489 | 505 | ||
490 | dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu); | 506 | dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu); |
491 | 507 | ||
492 | dev->state_count = 1; | 508 | dev->state_count = 1; |
493 | 509 | ||
494 | for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { | 510 | for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { |
495 | int num_substates; | 511 | int num_substates; |
496 | 512 | ||
497 | if (cstate > max_cstate) { | 513 | if (cstate > max_cstate) { |
498 | printk(PREFIX "max_cstate %d reached\n", | 514 | printk(PREFIX "max_cstate %d reached\n", |
499 | max_cstate); | 515 | max_cstate); |
500 | break; | 516 | break; |
501 | } | 517 | } |
502 | 518 | ||
503 | /* does the state exist in CPUID.MWAIT? */ | 519 | /* does the state exist in CPUID.MWAIT? */ |
504 | num_substates = (mwait_substates >> ((cstate) * 4)) | 520 | num_substates = (mwait_substates >> ((cstate) * 4)) |
505 | & MWAIT_SUBSTATE_MASK; | 521 | & MWAIT_SUBSTATE_MASK; |
506 | if (num_substates == 0) | 522 | if (num_substates == 0) |
507 | continue; | 523 | continue; |
508 | /* is the state not enabled? */ | 524 | /* is the state not enabled? */ |
509 | if (cpuidle_state_table[cstate].enter == NULL) | 525 | if (cpuidle_state_table[cstate].enter == NULL) |
510 | continue; | 526 | continue; |
511 | 527 | ||
512 | dev->states_usage[dev->state_count].driver_data = | 528 | dev->states_usage[dev->state_count].driver_data = |
513 | (void *)get_driver_data(cstate); | 529 | (void *)get_driver_data(cstate); |
514 | 530 | ||
515 | dev->state_count += 1; | 531 | dev->state_count += 1; |
516 | } | 532 | } |
517 | dev->cpu = cpu; | 533 | dev->cpu = cpu; |
518 | 534 | ||
519 | if (cpuidle_register_device(dev)) { | 535 | if (cpuidle_register_device(dev)) { |
520 | pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu); | 536 | pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu); |
521 | intel_idle_cpuidle_devices_uninit(); | 537 | intel_idle_cpuidle_devices_uninit(); |
522 | return -EIO; | 538 | return -EIO; |
523 | } | 539 | } |
524 | 540 | ||
525 | if (auto_demotion_disable_flags) | 541 | if (icpu->auto_demotion_disable_flags) |
526 | smp_call_function_single(cpu, auto_demotion_disable, NULL, 1); | 542 | smp_call_function_single(cpu, auto_demotion_disable, NULL, 1); |
527 | 543 | ||
528 | return 0; | 544 | return 0; |
529 | } | 545 | } |
530 | 546 | ||
531 | 547 | ||
532 | static int __init intel_idle_init(void) | 548 | static int __init intel_idle_init(void) |
533 | { | 549 | { |
534 | int retval, i; | 550 | int retval, i; |
535 | 551 | ||
536 | /* Do not load intel_idle at all for now if idle= is passed */ | 552 | /* Do not load intel_idle at all for now if idle= is passed */ |
537 | if (boot_option_idle_override != IDLE_NO_OVERRIDE) | 553 | if (boot_option_idle_override != IDLE_NO_OVERRIDE) |
538 | return -ENODEV; | 554 | return -ENODEV; |
539 | 555 | ||
540 | retval = intel_idle_probe(); | 556 | retval = intel_idle_probe(); |
541 | if (retval) | 557 | if (retval) |
542 | return retval; | 558 | return retval; |