Commit 4c2a997c34c0aa952ba9c247b0c2043526054919

Authored by Joe Buehler
Committed by Thomas Gleixner
1 parent 1c776bf87c

x86: add PCI ID for 6300ESB force hpet

00:1f.0 ISA bridge: Intel Corporation 6300ESB LPC Interface Controller (rev 02)
00:1f.0 Class 0601: 8086:25a1 (rev 02)

kernel: pci 0000:00:1f.0: Force enabled HPET at 0xfed00000
kernel: hpet clockevent registered
kernel: hpet0: at MMIO 0xfed00000, IRQs 2, 8, 0
kernel: hpet0: 3 64-bit timers, 14318180 Hz

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

Showing 1 changed file with 2 additions and 0 deletions Inline Diff

arch/x86/kernel/quirks.c
1 /* 1 /*
2 * This file contains work-arounds for x86 and x86_64 platform bugs. 2 * This file contains work-arounds for x86 and x86_64 platform bugs.
3 */ 3 */
4 #include <linux/pci.h> 4 #include <linux/pci.h>
5 #include <linux/irq.h> 5 #include <linux/irq.h>
6 6
7 #include <asm/hpet.h> 7 #include <asm/hpet.h>
8 8
9 #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI) 9 #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
10 10
11 static void __devinit quirk_intel_irqbalance(struct pci_dev *dev) 11 static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
12 { 12 {
13 u8 config, rev; 13 u8 config, rev;
14 u16 word; 14 u16 word;
15 15
16 /* BIOS may enable hardware IRQ balancing for 16 /* BIOS may enable hardware IRQ balancing for
17 * E7520/E7320/E7525(revision ID 0x9 and below) 17 * E7520/E7320/E7525(revision ID 0x9 and below)
18 * based platforms. 18 * based platforms.
19 * Disable SW irqbalance/affinity on those platforms. 19 * Disable SW irqbalance/affinity on those platforms.
20 */ 20 */
21 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); 21 pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
22 if (rev > 0x9) 22 if (rev > 0x9)
23 return; 23 return;
24 24
25 /* enable access to config space*/ 25 /* enable access to config space*/
26 pci_read_config_byte(dev, 0xf4, &config); 26 pci_read_config_byte(dev, 0xf4, &config);
27 pci_write_config_byte(dev, 0xf4, config|0x2); 27 pci_write_config_byte(dev, 0xf4, config|0x2);
28 28
29 /* 29 /*
30 * read xTPR register. We may not have a pci_dev for device 8 30 * read xTPR register. We may not have a pci_dev for device 8
31 * because it might be hidden until the above write. 31 * because it might be hidden until the above write.
32 */ 32 */
33 pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word); 33 pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word);
34 34
35 if (!(word & (1 << 13))) { 35 if (!(word & (1 << 13))) {
36 dev_info(&dev->dev, "Intel E7520/7320/7525 detected; " 36 dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
37 "disabling irq balancing and affinity\n"); 37 "disabling irq balancing and affinity\n");
38 #ifdef CONFIG_IRQBALANCE 38 #ifdef CONFIG_IRQBALANCE
39 irqbalance_disable(""); 39 irqbalance_disable("");
40 #endif 40 #endif
41 noirqdebug_setup(""); 41 noirqdebug_setup("");
42 #ifdef CONFIG_PROC_FS 42 #ifdef CONFIG_PROC_FS
43 no_irq_affinity = 1; 43 no_irq_affinity = 1;
44 #endif 44 #endif
45 } 45 }
46 46
47 /* put back the original value for config space*/ 47 /* put back the original value for config space*/
48 if (!(config & 0x2)) 48 if (!(config & 0x2))
49 pci_write_config_byte(dev, 0xf4, config); 49 pci_write_config_byte(dev, 0xf4, config);
50 } 50 }
51 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, 51 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
52 quirk_intel_irqbalance); 52 quirk_intel_irqbalance);
53 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, 53 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
54 quirk_intel_irqbalance); 54 quirk_intel_irqbalance);
55 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, 55 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
56 quirk_intel_irqbalance); 56 quirk_intel_irqbalance);
57 #endif 57 #endif
58 58
59 #if defined(CONFIG_HPET_TIMER) 59 #if defined(CONFIG_HPET_TIMER)
60 unsigned long force_hpet_address; 60 unsigned long force_hpet_address;
61 61
62 static enum { 62 static enum {
63 NONE_FORCE_HPET_RESUME, 63 NONE_FORCE_HPET_RESUME,
64 OLD_ICH_FORCE_HPET_RESUME, 64 OLD_ICH_FORCE_HPET_RESUME,
65 ICH_FORCE_HPET_RESUME, 65 ICH_FORCE_HPET_RESUME,
66 VT8237_FORCE_HPET_RESUME, 66 VT8237_FORCE_HPET_RESUME,
67 NVIDIA_FORCE_HPET_RESUME, 67 NVIDIA_FORCE_HPET_RESUME,
68 } force_hpet_resume_type; 68 } force_hpet_resume_type;
69 69
70 static void __iomem *rcba_base; 70 static void __iomem *rcba_base;
71 71
72 static void ich_force_hpet_resume(void) 72 static void ich_force_hpet_resume(void)
73 { 73 {
74 u32 val; 74 u32 val;
75 75
76 if (!force_hpet_address) 76 if (!force_hpet_address)
77 return; 77 return;
78 78
79 if (rcba_base == NULL) 79 if (rcba_base == NULL)
80 BUG(); 80 BUG();
81 81
82 /* read the Function Disable register, dword mode only */ 82 /* read the Function Disable register, dword mode only */
83 val = readl(rcba_base + 0x3404); 83 val = readl(rcba_base + 0x3404);
84 if (!(val & 0x80)) { 84 if (!(val & 0x80)) {
85 /* HPET disabled in HPTC. Trying to enable */ 85 /* HPET disabled in HPTC. Trying to enable */
86 writel(val | 0x80, rcba_base + 0x3404); 86 writel(val | 0x80, rcba_base + 0x3404);
87 } 87 }
88 88
89 val = readl(rcba_base + 0x3404); 89 val = readl(rcba_base + 0x3404);
90 if (!(val & 0x80)) 90 if (!(val & 0x80))
91 BUG(); 91 BUG();
92 else 92 else
93 printk(KERN_DEBUG "Force enabled HPET at resume\n"); 93 printk(KERN_DEBUG "Force enabled HPET at resume\n");
94 94
95 return; 95 return;
96 } 96 }
97 97
98 static void ich_force_enable_hpet(struct pci_dev *dev) 98 static void ich_force_enable_hpet(struct pci_dev *dev)
99 { 99 {
100 u32 val; 100 u32 val;
101 u32 uninitialized_var(rcba); 101 u32 uninitialized_var(rcba);
102 int err = 0; 102 int err = 0;
103 103
104 if (hpet_address || force_hpet_address) 104 if (hpet_address || force_hpet_address)
105 return; 105 return;
106 106
107 pci_read_config_dword(dev, 0xF0, &rcba); 107 pci_read_config_dword(dev, 0xF0, &rcba);
108 rcba &= 0xFFFFC000; 108 rcba &= 0xFFFFC000;
109 if (rcba == 0) { 109 if (rcba == 0) {
110 dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; " 110 dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; "
111 "cannot force enable HPET\n"); 111 "cannot force enable HPET\n");
112 return; 112 return;
113 } 113 }
114 114
115 /* use bits 31:14, 16 kB aligned */ 115 /* use bits 31:14, 16 kB aligned */
116 rcba_base = ioremap_nocache(rcba, 0x4000); 116 rcba_base = ioremap_nocache(rcba, 0x4000);
117 if (rcba_base == NULL) { 117 if (rcba_base == NULL) {
118 dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; " 118 dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; "
119 "cannot force enable HPET\n"); 119 "cannot force enable HPET\n");
120 return; 120 return;
121 } 121 }
122 122
123 /* read the Function Disable register, dword mode only */ 123 /* read the Function Disable register, dword mode only */
124 val = readl(rcba_base + 0x3404); 124 val = readl(rcba_base + 0x3404);
125 125
126 if (val & 0x80) { 126 if (val & 0x80) {
127 /* HPET is enabled in HPTC. Just not reported by BIOS */ 127 /* HPET is enabled in HPTC. Just not reported by BIOS */
128 val = val & 0x3; 128 val = val & 0x3;
129 force_hpet_address = 0xFED00000 | (val << 12); 129 force_hpet_address = 0xFED00000 | (val << 12);
130 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " 130 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
131 "0x%lx\n", force_hpet_address); 131 "0x%lx\n", force_hpet_address);
132 iounmap(rcba_base); 132 iounmap(rcba_base);
133 return; 133 return;
134 } 134 }
135 135
136 /* HPET disabled in HPTC. Trying to enable */ 136 /* HPET disabled in HPTC. Trying to enable */
137 writel(val | 0x80, rcba_base + 0x3404); 137 writel(val | 0x80, rcba_base + 0x3404);
138 138
139 val = readl(rcba_base + 0x3404); 139 val = readl(rcba_base + 0x3404);
140 if (!(val & 0x80)) { 140 if (!(val & 0x80)) {
141 err = 1; 141 err = 1;
142 } else { 142 } else {
143 val = val & 0x3; 143 val = val & 0x3;
144 force_hpet_address = 0xFED00000 | (val << 12); 144 force_hpet_address = 0xFED00000 | (val << 12);
145 } 145 }
146 146
147 if (err) { 147 if (err) {
148 force_hpet_address = 0; 148 force_hpet_address = 0;
149 iounmap(rcba_base); 149 iounmap(rcba_base);
150 dev_printk(KERN_DEBUG, &dev->dev, 150 dev_printk(KERN_DEBUG, &dev->dev,
151 "Failed to force enable HPET\n"); 151 "Failed to force enable HPET\n");
152 } else { 152 } else {
153 force_hpet_resume_type = ICH_FORCE_HPET_RESUME; 153 force_hpet_resume_type = ICH_FORCE_HPET_RESUME;
154 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " 154 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
155 "0x%lx\n", force_hpet_address); 155 "0x%lx\n", force_hpet_address);
156 } 156 }
157 } 157 }
158 158
159 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, 159 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
160 ich_force_enable_hpet); 160 ich_force_enable_hpet);
161 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, 161 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0,
162 ich_force_enable_hpet); 162 ich_force_enable_hpet);
163 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, 163 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
164 ich_force_enable_hpet); 164 ich_force_enable_hpet);
165 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, 165 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
166 ich_force_enable_hpet); 166 ich_force_enable_hpet);
167 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, 167 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
168 ich_force_enable_hpet); 168 ich_force_enable_hpet);
169 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, 169 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
170 ich_force_enable_hpet); 170 ich_force_enable_hpet);
171 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, 171 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
172 ich_force_enable_hpet); 172 ich_force_enable_hpet);
173 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, 173 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
174 ich_force_enable_hpet); 174 ich_force_enable_hpet);
175 175
176 176
177 static struct pci_dev *cached_dev; 177 static struct pci_dev *cached_dev;
178 178
179 static void old_ich_force_hpet_resume(void) 179 static void old_ich_force_hpet_resume(void)
180 { 180 {
181 u32 val; 181 u32 val;
182 u32 uninitialized_var(gen_cntl); 182 u32 uninitialized_var(gen_cntl);
183 183
184 if (!force_hpet_address || !cached_dev) 184 if (!force_hpet_address || !cached_dev)
185 return; 185 return;
186 186
187 pci_read_config_dword(cached_dev, 0xD0, &gen_cntl); 187 pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
188 gen_cntl &= (~(0x7 << 15)); 188 gen_cntl &= (~(0x7 << 15));
189 gen_cntl |= (0x4 << 15); 189 gen_cntl |= (0x4 << 15);
190 190
191 pci_write_config_dword(cached_dev, 0xD0, gen_cntl); 191 pci_write_config_dword(cached_dev, 0xD0, gen_cntl);
192 pci_read_config_dword(cached_dev, 0xD0, &gen_cntl); 192 pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
193 val = gen_cntl >> 15; 193 val = gen_cntl >> 15;
194 val &= 0x7; 194 val &= 0x7;
195 if (val == 0x4) 195 if (val == 0x4)
196 printk(KERN_DEBUG "Force enabled HPET at resume\n"); 196 printk(KERN_DEBUG "Force enabled HPET at resume\n");
197 else 197 else
198 BUG(); 198 BUG();
199 } 199 }
200 200
201 static void old_ich_force_enable_hpet(struct pci_dev *dev) 201 static void old_ich_force_enable_hpet(struct pci_dev *dev)
202 { 202 {
203 u32 val; 203 u32 val;
204 u32 uninitialized_var(gen_cntl); 204 u32 uninitialized_var(gen_cntl);
205 205
206 if (hpet_address || force_hpet_address) 206 if (hpet_address || force_hpet_address)
207 return; 207 return;
208 208
209 pci_read_config_dword(dev, 0xD0, &gen_cntl); 209 pci_read_config_dword(dev, 0xD0, &gen_cntl);
210 /* 210 /*
211 * Bit 17 is HPET enable bit. 211 * Bit 17 is HPET enable bit.
212 * Bit 16:15 control the HPET base address. 212 * Bit 16:15 control the HPET base address.
213 */ 213 */
214 val = gen_cntl >> 15; 214 val = gen_cntl >> 15;
215 val &= 0x7; 215 val &= 0x7;
216 if (val & 0x4) { 216 if (val & 0x4) {
217 val &= 0x3; 217 val &= 0x3;
218 force_hpet_address = 0xFED00000 | (val << 12); 218 force_hpet_address = 0xFED00000 | (val << 12);
219 dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n", 219 dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
220 force_hpet_address); 220 force_hpet_address);
221 return; 221 return;
222 } 222 }
223 223
224 /* 224 /*
225 * HPET is disabled. Trying enabling at FED00000 and check 225 * HPET is disabled. Trying enabling at FED00000 and check
226 * whether it sticks 226 * whether it sticks
227 */ 227 */
228 gen_cntl &= (~(0x7 << 15)); 228 gen_cntl &= (~(0x7 << 15));
229 gen_cntl |= (0x4 << 15); 229 gen_cntl |= (0x4 << 15);
230 pci_write_config_dword(dev, 0xD0, gen_cntl); 230 pci_write_config_dword(dev, 0xD0, gen_cntl);
231 231
232 pci_read_config_dword(dev, 0xD0, &gen_cntl); 232 pci_read_config_dword(dev, 0xD0, &gen_cntl);
233 233
234 val = gen_cntl >> 15; 234 val = gen_cntl >> 15;
235 val &= 0x7; 235 val &= 0x7;
236 if (val & 0x4) { 236 if (val & 0x4) {
237 /* HPET is enabled in HPTC. Just not reported by BIOS */ 237 /* HPET is enabled in HPTC. Just not reported by BIOS */
238 val &= 0x3; 238 val &= 0x3;
239 force_hpet_address = 0xFED00000 | (val << 12); 239 force_hpet_address = 0xFED00000 | (val << 12);
240 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " 240 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
241 "0x%lx\n", force_hpet_address); 241 "0x%lx\n", force_hpet_address);
242 cached_dev = dev; 242 cached_dev = dev;
243 force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME; 243 force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME;
244 return; 244 return;
245 } 245 }
246 246
247 dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n"); 247 dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
248 } 248 }
249 249
250 /* 250 /*
251 * Undocumented chipset features. Make sure that the user enforced 251 * Undocumented chipset features. Make sure that the user enforced
252 * this. 252 * this.
253 */ 253 */
254 static void old_ich_force_enable_hpet_user(struct pci_dev *dev) 254 static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
255 { 255 {
256 if (hpet_force_user) 256 if (hpet_force_user)
257 old_ich_force_enable_hpet(dev); 257 old_ich_force_enable_hpet(dev);
258 } 258 }
259 259
260 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
261 old_ich_force_enable_hpet_user);
260 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, 262 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
261 old_ich_force_enable_hpet_user); 263 old_ich_force_enable_hpet_user);
262 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, 264 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
263 old_ich_force_enable_hpet_user); 265 old_ich_force_enable_hpet_user);
264 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, 266 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
265 old_ich_force_enable_hpet_user); 267 old_ich_force_enable_hpet_user);
266 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, 268 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
267 old_ich_force_enable_hpet_user); 269 old_ich_force_enable_hpet_user);
268 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, 270 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
269 old_ich_force_enable_hpet); 271 old_ich_force_enable_hpet);
270 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12, 272 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
271 old_ich_force_enable_hpet); 273 old_ich_force_enable_hpet);
272 274
273 275
274 static void vt8237_force_hpet_resume(void) 276 static void vt8237_force_hpet_resume(void)
275 { 277 {
276 u32 val; 278 u32 val;
277 279
278 if (!force_hpet_address || !cached_dev) 280 if (!force_hpet_address || !cached_dev)
279 return; 281 return;
280 282
281 val = 0xfed00000 | 0x80; 283 val = 0xfed00000 | 0x80;
282 pci_write_config_dword(cached_dev, 0x68, val); 284 pci_write_config_dword(cached_dev, 0x68, val);
283 285
284 pci_read_config_dword(cached_dev, 0x68, &val); 286 pci_read_config_dword(cached_dev, 0x68, &val);
285 if (val & 0x80) 287 if (val & 0x80)
286 printk(KERN_DEBUG "Force enabled HPET at resume\n"); 288 printk(KERN_DEBUG "Force enabled HPET at resume\n");
287 else 289 else
288 BUG(); 290 BUG();
289 } 291 }
290 292
291 static void vt8237_force_enable_hpet(struct pci_dev *dev) 293 static void vt8237_force_enable_hpet(struct pci_dev *dev)
292 { 294 {
293 u32 uninitialized_var(val); 295 u32 uninitialized_var(val);
294 296
295 if (!hpet_force_user || hpet_address || force_hpet_address) 297 if (!hpet_force_user || hpet_address || force_hpet_address)
296 return; 298 return;
297 299
298 pci_read_config_dword(dev, 0x68, &val); 300 pci_read_config_dword(dev, 0x68, &val);
299 /* 301 /*
300 * Bit 7 is HPET enable bit. 302 * Bit 7 is HPET enable bit.
301 * Bit 31:10 is HPET base address (contrary to what datasheet claims) 303 * Bit 31:10 is HPET base address (contrary to what datasheet claims)
302 */ 304 */
303 if (val & 0x80) { 305 if (val & 0x80) {
304 force_hpet_address = (val & ~0x3ff); 306 force_hpet_address = (val & ~0x3ff);
305 dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n", 307 dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
306 force_hpet_address); 308 force_hpet_address);
307 return; 309 return;
308 } 310 }
309 311
310 /* 312 /*
311 * HPET is disabled. Trying enabling at FED00000 and check 313 * HPET is disabled. Trying enabling at FED00000 and check
312 * whether it sticks 314 * whether it sticks
313 */ 315 */
314 val = 0xfed00000 | 0x80; 316 val = 0xfed00000 | 0x80;
315 pci_write_config_dword(dev, 0x68, val); 317 pci_write_config_dword(dev, 0x68, val);
316 318
317 pci_read_config_dword(dev, 0x68, &val); 319 pci_read_config_dword(dev, 0x68, &val);
318 if (val & 0x80) { 320 if (val & 0x80) {
319 force_hpet_address = (val & ~0x3ff); 321 force_hpet_address = (val & ~0x3ff);
320 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at " 322 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
321 "0x%lx\n", force_hpet_address); 323 "0x%lx\n", force_hpet_address);
322 cached_dev = dev; 324 cached_dev = dev;
323 force_hpet_resume_type = VT8237_FORCE_HPET_RESUME; 325 force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
324 return; 326 return;
325 } 327 }
326 328
327 dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n"); 329 dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
328 } 330 }
329 331
330 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, 332 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
331 vt8237_force_enable_hpet); 333 vt8237_force_enable_hpet);
332 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, 334 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
333 vt8237_force_enable_hpet); 335 vt8237_force_enable_hpet);
334 336
335 /* 337 /*
336 * Undocumented chipset feature taken from LinuxBIOS. 338 * Undocumented chipset feature taken from LinuxBIOS.
337 */ 339 */
338 static void nvidia_force_hpet_resume(void) 340 static void nvidia_force_hpet_resume(void)
339 { 341 {
340 pci_write_config_dword(cached_dev, 0x44, 0xfed00001); 342 pci_write_config_dword(cached_dev, 0x44, 0xfed00001);
341 printk(KERN_DEBUG "Force enabled HPET at resume\n"); 343 printk(KERN_DEBUG "Force enabled HPET at resume\n");
342 } 344 }
343 345
344 static void nvidia_force_enable_hpet(struct pci_dev *dev) 346 static void nvidia_force_enable_hpet(struct pci_dev *dev)
345 { 347 {
346 u32 uninitialized_var(val); 348 u32 uninitialized_var(val);
347 349
348 if (!hpet_force_user || hpet_address || force_hpet_address) 350 if (!hpet_force_user || hpet_address || force_hpet_address)
349 return; 351 return;
350 352
351 pci_write_config_dword(dev, 0x44, 0xfed00001); 353 pci_write_config_dword(dev, 0x44, 0xfed00001);
352 pci_read_config_dword(dev, 0x44, &val); 354 pci_read_config_dword(dev, 0x44, &val);
353 force_hpet_address = val & 0xfffffffe; 355 force_hpet_address = val & 0xfffffffe;
354 force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME; 356 force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME;
355 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n", 357 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
356 force_hpet_address); 358 force_hpet_address);
357 cached_dev = dev; 359 cached_dev = dev;
358 return; 360 return;
359 } 361 }
360 362
361 /* ISA Bridges */ 363 /* ISA Bridges */
362 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050, 364 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050,
363 nvidia_force_enable_hpet); 365 nvidia_force_enable_hpet);
364 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051, 366 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051,
365 nvidia_force_enable_hpet); 367 nvidia_force_enable_hpet);
366 368
367 /* LPC bridges */ 369 /* LPC bridges */
368 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260, 370 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260,
369 nvidia_force_enable_hpet); 371 nvidia_force_enable_hpet);
370 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360, 372 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
371 nvidia_force_enable_hpet); 373 nvidia_force_enable_hpet);
372 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361, 374 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
373 nvidia_force_enable_hpet); 375 nvidia_force_enable_hpet);
374 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362, 376 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362,
375 nvidia_force_enable_hpet); 377 nvidia_force_enable_hpet);
376 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363, 378 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363,
377 nvidia_force_enable_hpet); 379 nvidia_force_enable_hpet);
378 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364, 380 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364,
379 nvidia_force_enable_hpet); 381 nvidia_force_enable_hpet);
380 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365, 382 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365,
381 nvidia_force_enable_hpet); 383 nvidia_force_enable_hpet);
382 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366, 384 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366,
383 nvidia_force_enable_hpet); 385 nvidia_force_enable_hpet);
384 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367, 386 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
385 nvidia_force_enable_hpet); 387 nvidia_force_enable_hpet);
386 388
387 void force_hpet_resume(void) 389 void force_hpet_resume(void)
388 { 390 {
389 switch (force_hpet_resume_type) { 391 switch (force_hpet_resume_type) {
390 case ICH_FORCE_HPET_RESUME: 392 case ICH_FORCE_HPET_RESUME:
391 ich_force_hpet_resume(); 393 ich_force_hpet_resume();
392 return; 394 return;
393 case OLD_ICH_FORCE_HPET_RESUME: 395 case OLD_ICH_FORCE_HPET_RESUME:
394 old_ich_force_hpet_resume(); 396 old_ich_force_hpet_resume();
395 return; 397 return;
396 case VT8237_FORCE_HPET_RESUME: 398 case VT8237_FORCE_HPET_RESUME:
397 vt8237_force_hpet_resume(); 399 vt8237_force_hpet_resume();
398 return; 400 return;
399 case NVIDIA_FORCE_HPET_RESUME: 401 case NVIDIA_FORCE_HPET_RESUME:
400 nvidia_force_hpet_resume(); 402 nvidia_force_hpet_resume();
401 return; 403 return;
402 default: 404 default:
403 break; 405 break;
404 } 406 }
405 } 407 }
406 408
407 #endif 409 #endif
408 410