Commit 1597cacbe39802d86656d1f2e6329895bd2ef531
Committed by
Greg Kroah-Hartman
1 parent
beb7cc8238
Exists in
master
and in
7 other branches
PCI: Fix multiple problems with VIA hardware
This patch is designed to fix: - Disk eating corruptor on KT7 after resume from RAM - VIA IRQ handling - VIA fixups for bus lockups after resume from RAM The core of this is to add a table of resume fixups run at resume time. We need to do this for a variety of boards and features, but particularly we need to do this to get various critical VIA fixups done on resume. The second part of the problem is to handle VIA IRQ number rules which are a bit odd and need special handling for PIC interrupts. Various patches broke various boxes and while this one may not be perfect (hopefully it is) it ensures the workaround is applied to the right devices only. From: Jean Delvare <khali@linux-fr.org> Now that PCI quirks are replayed on software resume, we can safely re-enable the Asus SMBus unhiding quirk even when software suspend support is enabled. [akpm@osdl.org: fix const warning] Signed-off-by: Alan Cox <alan@redhat.com> Cc: Jean Delvare <khali@linux-fr.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Showing 6 changed files with 144 additions and 63 deletions Inline Diff
arch/i386/pci/fixup.c
1 | /* | 1 | /* |
2 | * Exceptions for specific devices. Usually work-arounds for fatal design flaws. | 2 | * Exceptions for specific devices. Usually work-arounds for fatal design flaws. |
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <linux/delay.h> | 5 | #include <linux/delay.h> |
6 | #include <linux/dmi.h> | 6 | #include <linux/dmi.h> |
7 | #include <linux/pci.h> | 7 | #include <linux/pci.h> |
8 | #include <linux/init.h> | 8 | #include <linux/init.h> |
9 | #include "pci.h" | 9 | #include "pci.h" |
10 | 10 | ||
11 | 11 | ||
12 | static void __devinit pci_fixup_i450nx(struct pci_dev *d) | 12 | static void __devinit pci_fixup_i450nx(struct pci_dev *d) |
13 | { | 13 | { |
14 | /* | 14 | /* |
15 | * i450NX -- Find and scan all secondary buses on all PXB's. | 15 | * i450NX -- Find and scan all secondary buses on all PXB's. |
16 | */ | 16 | */ |
17 | int pxb, reg; | 17 | int pxb, reg; |
18 | u8 busno, suba, subb; | 18 | u8 busno, suba, subb; |
19 | 19 | ||
20 | printk(KERN_WARNING "PCI: Searching for i450NX host bridges on %s\n", pci_name(d)); | 20 | printk(KERN_WARNING "PCI: Searching for i450NX host bridges on %s\n", pci_name(d)); |
21 | reg = 0xd0; | 21 | reg = 0xd0; |
22 | for(pxb=0; pxb<2; pxb++) { | 22 | for(pxb=0; pxb<2; pxb++) { |
23 | pci_read_config_byte(d, reg++, &busno); | 23 | pci_read_config_byte(d, reg++, &busno); |
24 | pci_read_config_byte(d, reg++, &suba); | 24 | pci_read_config_byte(d, reg++, &suba); |
25 | pci_read_config_byte(d, reg++, &subb); | 25 | pci_read_config_byte(d, reg++, &subb); |
26 | DBG("i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno, suba, subb); | 26 | DBG("i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno, suba, subb); |
27 | if (busno) | 27 | if (busno) |
28 | pci_scan_bus(busno, &pci_root_ops, NULL); /* Bus A */ | 28 | pci_scan_bus(busno, &pci_root_ops, NULL); /* Bus A */ |
29 | if (suba < subb) | 29 | if (suba < subb) |
30 | pci_scan_bus(suba+1, &pci_root_ops, NULL); /* Bus B */ | 30 | pci_scan_bus(suba+1, &pci_root_ops, NULL); /* Bus B */ |
31 | } | 31 | } |
32 | pcibios_last_bus = -1; | 32 | pcibios_last_bus = -1; |
33 | } | 33 | } |
34 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82451NX, pci_fixup_i450nx); | 34 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82451NX, pci_fixup_i450nx); |
35 | 35 | ||
36 | static void __devinit pci_fixup_i450gx(struct pci_dev *d) | 36 | static void __devinit pci_fixup_i450gx(struct pci_dev *d) |
37 | { | 37 | { |
38 | /* | 38 | /* |
39 | * i450GX and i450KX -- Find and scan all secondary buses. | 39 | * i450GX and i450KX -- Find and scan all secondary buses. |
40 | * (called separately for each PCI bridge found) | 40 | * (called separately for each PCI bridge found) |
41 | */ | 41 | */ |
42 | u8 busno; | 42 | u8 busno; |
43 | pci_read_config_byte(d, 0x4a, &busno); | 43 | pci_read_config_byte(d, 0x4a, &busno); |
44 | printk(KERN_INFO "PCI: i440KX/GX host bridge %s: secondary bus %02x\n", pci_name(d), busno); | 44 | printk(KERN_INFO "PCI: i440KX/GX host bridge %s: secondary bus %02x\n", pci_name(d), busno); |
45 | pci_scan_bus(busno, &pci_root_ops, NULL); | 45 | pci_scan_bus(busno, &pci_root_ops, NULL); |
46 | pcibios_last_bus = -1; | 46 | pcibios_last_bus = -1; |
47 | } | 47 | } |
48 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454GX, pci_fixup_i450gx); | 48 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454GX, pci_fixup_i450gx); |
49 | 49 | ||
50 | static void __devinit pci_fixup_umc_ide(struct pci_dev *d) | 50 | static void __devinit pci_fixup_umc_ide(struct pci_dev *d) |
51 | { | 51 | { |
52 | /* | 52 | /* |
53 | * UM8886BF IDE controller sets region type bits incorrectly, | 53 | * UM8886BF IDE controller sets region type bits incorrectly, |
54 | * therefore they look like memory despite of them being I/O. | 54 | * therefore they look like memory despite of them being I/O. |
55 | */ | 55 | */ |
56 | int i; | 56 | int i; |
57 | 57 | ||
58 | printk(KERN_WARNING "PCI: Fixing base address flags for device %s\n", pci_name(d)); | 58 | printk(KERN_WARNING "PCI: Fixing base address flags for device %s\n", pci_name(d)); |
59 | for(i=0; i<4; i++) | 59 | for(i=0; i<4; i++) |
60 | d->resource[i].flags |= PCI_BASE_ADDRESS_SPACE_IO; | 60 | d->resource[i].flags |= PCI_BASE_ADDRESS_SPACE_IO; |
61 | } | 61 | } |
62 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide); | 62 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide); |
63 | 63 | ||
64 | static void __devinit pci_fixup_ncr53c810(struct pci_dev *d) | 64 | static void __devinit pci_fixup_ncr53c810(struct pci_dev *d) |
65 | { | 65 | { |
66 | /* | 66 | /* |
67 | * NCR 53C810 returns class code 0 (at least on some systems). | 67 | * NCR 53C810 returns class code 0 (at least on some systems). |
68 | * Fix class to be PCI_CLASS_STORAGE_SCSI | 68 | * Fix class to be PCI_CLASS_STORAGE_SCSI |
69 | */ | 69 | */ |
70 | if (!d->class) { | 70 | if (!d->class) { |
71 | printk(KERN_WARNING "PCI: fixing NCR 53C810 class code for %s\n", pci_name(d)); | 71 | printk(KERN_WARNING "PCI: fixing NCR 53C810 class code for %s\n", pci_name(d)); |
72 | d->class = PCI_CLASS_STORAGE_SCSI << 8; | 72 | d->class = PCI_CLASS_STORAGE_SCSI << 8; |
73 | } | 73 | } |
74 | } | 74 | } |
75 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, pci_fixup_ncr53c810); | 75 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, pci_fixup_ncr53c810); |
76 | 76 | ||
77 | static void __devinit pci_fixup_latency(struct pci_dev *d) | 77 | static void __devinit pci_fixup_latency(struct pci_dev *d) |
78 | { | 78 | { |
79 | /* | 79 | /* |
80 | * SiS 5597 and 5598 chipsets require latency timer set to | 80 | * SiS 5597 and 5598 chipsets require latency timer set to |
81 | * at most 32 to avoid lockups. | 81 | * at most 32 to avoid lockups. |
82 | */ | 82 | */ |
83 | DBG("PCI: Setting max latency to 32\n"); | 83 | DBG("PCI: Setting max latency to 32\n"); |
84 | pcibios_max_latency = 32; | 84 | pcibios_max_latency = 32; |
85 | } | 85 | } |
86 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, pci_fixup_latency); | 86 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, pci_fixup_latency); |
87 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5598, pci_fixup_latency); | 87 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5598, pci_fixup_latency); |
88 | 88 | ||
89 | static void __devinit pci_fixup_piix4_acpi(struct pci_dev *d) | 89 | static void __devinit pci_fixup_piix4_acpi(struct pci_dev *d) |
90 | { | 90 | { |
91 | /* | 91 | /* |
92 | * PIIX4 ACPI device: hardwired IRQ9 | 92 | * PIIX4 ACPI device: hardwired IRQ9 |
93 | */ | 93 | */ |
94 | d->irq = 9; | 94 | d->irq = 9; |
95 | } | 95 | } |
96 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, pci_fixup_piix4_acpi); | 96 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, pci_fixup_piix4_acpi); |
97 | 97 | ||
98 | /* | 98 | /* |
99 | * Addresses issues with problems in the memory write queue timer in | 99 | * Addresses issues with problems in the memory write queue timer in |
100 | * certain VIA Northbridges. This bugfix is per VIA's specifications, | 100 | * certain VIA Northbridges. This bugfix is per VIA's specifications, |
101 | * except for the KL133/KM133: clearing bit 5 on those Northbridges seems | 101 | * except for the KL133/KM133: clearing bit 5 on those Northbridges seems |
102 | * to trigger a bug in its integrated ProSavage video card, which | 102 | * to trigger a bug in its integrated ProSavage video card, which |
103 | * causes screen corruption. We only clear bits 6 and 7 for that chipset, | 103 | * causes screen corruption. We only clear bits 6 and 7 for that chipset, |
104 | * until VIA can provide us with definitive information on why screen | 104 | * until VIA can provide us with definitive information on why screen |
105 | * corruption occurs, and what exactly those bits do. | 105 | * corruption occurs, and what exactly those bits do. |
106 | * | 106 | * |
107 | * VIA 8363,8622,8361 Northbridges: | 107 | * VIA 8363,8622,8361 Northbridges: |
108 | * - bits 5, 6, 7 at offset 0x55 need to be turned off | 108 | * - bits 5, 6, 7 at offset 0x55 need to be turned off |
109 | * VIA 8367 (KT266x) Northbridges: | 109 | * VIA 8367 (KT266x) Northbridges: |
110 | * - bits 5, 6, 7 at offset 0x95 need to be turned off | 110 | * - bits 5, 6, 7 at offset 0x95 need to be turned off |
111 | * VIA 8363 rev 0x81/0x84 (KL133/KM133) Northbridges: | 111 | * VIA 8363 rev 0x81/0x84 (KL133/KM133) Northbridges: |
112 | * - bits 6, 7 at offset 0x55 need to be turned off | 112 | * - bits 6, 7 at offset 0x55 need to be turned off |
113 | */ | 113 | */ |
114 | 114 | ||
115 | #define VIA_8363_KL133_REVISION_ID 0x81 | 115 | #define VIA_8363_KL133_REVISION_ID 0x81 |
116 | #define VIA_8363_KM133_REVISION_ID 0x84 | 116 | #define VIA_8363_KM133_REVISION_ID 0x84 |
117 | 117 | ||
118 | static void __devinit pci_fixup_via_northbridge_bug(struct pci_dev *d) | 118 | static void pci_fixup_via_northbridge_bug(struct pci_dev *d) |
119 | { | 119 | { |
120 | u8 v; | 120 | u8 v; |
121 | u8 revision; | 121 | u8 revision; |
122 | int where = 0x55; | 122 | int where = 0x55; |
123 | int mask = 0x1f; /* clear bits 5, 6, 7 by default */ | 123 | int mask = 0x1f; /* clear bits 5, 6, 7 by default */ |
124 | 124 | ||
125 | pci_read_config_byte(d, PCI_REVISION_ID, &revision); | 125 | pci_read_config_byte(d, PCI_REVISION_ID, &revision); |
126 | 126 | ||
127 | if (d->device == PCI_DEVICE_ID_VIA_8367_0) { | 127 | if (d->device == PCI_DEVICE_ID_VIA_8367_0) { |
128 | /* fix pci bus latency issues resulted by NB bios error | 128 | /* fix pci bus latency issues resulted by NB bios error |
129 | it appears on bug free^Wreduced kt266x's bios forces | 129 | it appears on bug free^Wreduced kt266x's bios forces |
130 | NB latency to zero */ | 130 | NB latency to zero */ |
131 | pci_write_config_byte(d, PCI_LATENCY_TIMER, 0); | 131 | pci_write_config_byte(d, PCI_LATENCY_TIMER, 0); |
132 | 132 | ||
133 | where = 0x95; /* the memory write queue timer register is | 133 | where = 0x95; /* the memory write queue timer register is |
134 | different for the KT266x's: 0x95 not 0x55 */ | 134 | different for the KT266x's: 0x95 not 0x55 */ |
135 | } else if (d->device == PCI_DEVICE_ID_VIA_8363_0 && | 135 | } else if (d->device == PCI_DEVICE_ID_VIA_8363_0 && |
136 | (revision == VIA_8363_KL133_REVISION_ID || | 136 | (revision == VIA_8363_KL133_REVISION_ID || |
137 | revision == VIA_8363_KM133_REVISION_ID)) { | 137 | revision == VIA_8363_KM133_REVISION_ID)) { |
138 | mask = 0x3f; /* clear only bits 6 and 7; clearing bit 5 | 138 | mask = 0x3f; /* clear only bits 6 and 7; clearing bit 5 |
139 | causes screen corruption on the KL133/KM133 */ | 139 | causes screen corruption on the KL133/KM133 */ |
140 | } | 140 | } |
141 | 141 | ||
142 | pci_read_config_byte(d, where, &v); | 142 | pci_read_config_byte(d, where, &v); |
143 | if (v & ~mask) { | 143 | if (v & ~mask) { |
144 | printk(KERN_WARNING "Disabling VIA memory write queue (PCI ID %04x, rev %02x): [%02x] %02x & %02x -> %02x\n", \ | 144 | printk(KERN_WARNING "Disabling VIA memory write queue (PCI ID %04x, rev %02x): [%02x] %02x & %02x -> %02x\n", \ |
145 | d->device, revision, where, v, mask, v & mask); | 145 | d->device, revision, where, v, mask, v & mask); |
146 | v &= mask; | 146 | v &= mask; |
147 | pci_write_config_byte(d, where, v); | 147 | pci_write_config_byte(d, where, v); |
148 | } | 148 | } |
149 | } | 149 | } |
150 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_via_northbridge_bug); | 150 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_via_northbridge_bug); |
151 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8622, pci_fixup_via_northbridge_bug); | 151 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8622, pci_fixup_via_northbridge_bug); |
152 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug); | 152 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug); |
153 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_via_northbridge_bug); | 153 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_via_northbridge_bug); |
154 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_via_northbridge_bug); | ||
155 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8622, pci_fixup_via_northbridge_bug); | ||
156 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug); | ||
157 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_via_northbridge_bug); | ||
154 | 158 | ||
155 | /* | 159 | /* |
156 | * For some reasons Intel decided that certain parts of their | 160 | * For some reasons Intel decided that certain parts of their |
157 | * 815, 845 and some other chipsets must look like PCI-to-PCI bridges | 161 | * 815, 845 and some other chipsets must look like PCI-to-PCI bridges |
158 | * while they are obviously not. The 82801 family (AA, AB, BAM/CAM, | 162 | * while they are obviously not. The 82801 family (AA, AB, BAM/CAM, |
159 | * BA/CA/DB and E) PCI bridges are actually HUB-to-PCI ones, according | 163 | * BA/CA/DB and E) PCI bridges are actually HUB-to-PCI ones, according |
160 | * to Intel terminology. These devices do forward all addresses from | 164 | * to Intel terminology. These devices do forward all addresses from |
161 | * system to PCI bus no matter what are their window settings, so they are | 165 | * system to PCI bus no matter what are their window settings, so they are |
162 | * "transparent" (or subtractive decoding) from programmers point of view. | 166 | * "transparent" (or subtractive decoding) from programmers point of view. |
163 | */ | 167 | */ |
164 | static void __devinit pci_fixup_transparent_bridge(struct pci_dev *dev) | 168 | static void __devinit pci_fixup_transparent_bridge(struct pci_dev *dev) |
165 | { | 169 | { |
166 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && | 170 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && |
167 | (dev->device & 0xff00) == 0x2400) | 171 | (dev->device & 0xff00) == 0x2400) |
168 | dev->transparent = 1; | 172 | dev->transparent = 1; |
169 | } | 173 | } |
170 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_fixup_transparent_bridge); | 174 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_fixup_transparent_bridge); |
171 | 175 | ||
172 | /* | 176 | /* |
173 | * Fixup for C1 Halt Disconnect problem on nForce2 systems. | 177 | * Fixup for C1 Halt Disconnect problem on nForce2 systems. |
174 | * | 178 | * |
175 | * From information provided by "Allen Martin" <AMartin@nvidia.com>: | 179 | * From information provided by "Allen Martin" <AMartin@nvidia.com>: |
176 | * | 180 | * |
177 | * A hang is caused when the CPU generates a very fast CONNECT/HALT cycle | 181 | * A hang is caused when the CPU generates a very fast CONNECT/HALT cycle |
178 | * sequence. Workaround is to set the SYSTEM_IDLE_TIMEOUT to 80 ns. | 182 | * sequence. Workaround is to set the SYSTEM_IDLE_TIMEOUT to 80 ns. |
179 | * This allows the state-machine and timer to return to a proper state within | 183 | * This allows the state-machine and timer to return to a proper state within |
180 | * 80 ns of the CONNECT and probe appearing together. Since the CPU will not | 184 | * 80 ns of the CONNECT and probe appearing together. Since the CPU will not |
181 | * issue another HALT within 80 ns of the initial HALT, the failure condition | 185 | * issue another HALT within 80 ns of the initial HALT, the failure condition |
182 | * is avoided. | 186 | * is avoided. |
183 | */ | 187 | */ |
184 | static void __init pci_fixup_nforce2(struct pci_dev *dev) | 188 | static void pci_fixup_nforce2(struct pci_dev *dev) |
185 | { | 189 | { |
186 | u32 val; | 190 | u32 val; |
187 | 191 | ||
188 | /* | 192 | /* |
189 | * Chip Old value New value | 193 | * Chip Old value New value |
190 | * C17 0x1F0FFF01 0x1F01FF01 | 194 | * C17 0x1F0FFF01 0x1F01FF01 |
191 | * C18D 0x9F0FFF01 0x9F01FF01 | 195 | * C18D 0x9F0FFF01 0x9F01FF01 |
192 | * | 196 | * |
193 | * Northbridge chip version may be determined by | 197 | * Northbridge chip version may be determined by |
194 | * reading the PCI revision ID (0xC1 or greater is C18D). | 198 | * reading the PCI revision ID (0xC1 or greater is C18D). |
195 | */ | 199 | */ |
196 | pci_read_config_dword(dev, 0x6c, &val); | 200 | pci_read_config_dword(dev, 0x6c, &val); |
197 | 201 | ||
198 | /* | 202 | /* |
199 | * Apply fixup if needed, but don't touch disconnect state | 203 | * Apply fixup if needed, but don't touch disconnect state |
200 | */ | 204 | */ |
201 | if ((val & 0x00FF0000) != 0x00010000) { | 205 | if ((val & 0x00FF0000) != 0x00010000) { |
202 | printk(KERN_WARNING "PCI: nForce2 C1 Halt Disconnect fixup\n"); | 206 | printk(KERN_WARNING "PCI: nForce2 C1 Halt Disconnect fixup\n"); |
203 | pci_write_config_dword(dev, 0x6c, (val & 0xFF00FFFF) | 0x00010000); | 207 | pci_write_config_dword(dev, 0x6c, (val & 0xFF00FFFF) | 0x00010000); |
204 | } | 208 | } |
205 | } | 209 | } |
206 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, pci_fixup_nforce2); | 210 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, pci_fixup_nforce2); |
211 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, pci_fixup_nforce2); | ||
207 | 212 | ||
208 | /* Max PCI Express root ports */ | 213 | /* Max PCI Express root ports */ |
209 | #define MAX_PCIEROOT 6 | 214 | #define MAX_PCIEROOT 6 |
210 | static int quirk_aspm_offset[MAX_PCIEROOT << 3]; | 215 | static int quirk_aspm_offset[MAX_PCIEROOT << 3]; |
211 | 216 | ||
212 | #define GET_INDEX(a, b) ((((a) - PCI_DEVICE_ID_INTEL_MCH_PA) << 3) + ((b) & 7)) | 217 | #define GET_INDEX(a, b) ((((a) - PCI_DEVICE_ID_INTEL_MCH_PA) << 3) + ((b) & 7)) |
213 | 218 | ||
214 | static int quirk_pcie_aspm_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) | 219 | static int quirk_pcie_aspm_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) |
215 | { | 220 | { |
216 | return raw_pci_ops->read(0, bus->number, devfn, where, size, value); | 221 | return raw_pci_ops->read(0, bus->number, devfn, where, size, value); |
217 | } | 222 | } |
218 | 223 | ||
219 | /* | 224 | /* |
220 | * Replace the original pci bus ops for write with a new one that will filter | 225 | * Replace the original pci bus ops for write with a new one that will filter |
221 | * the request to insure ASPM cannot be enabled. | 226 | * the request to insure ASPM cannot be enabled. |
222 | */ | 227 | */ |
223 | static int quirk_pcie_aspm_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) | 228 | static int quirk_pcie_aspm_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) |
224 | { | 229 | { |
225 | u8 offset; | 230 | u8 offset; |
226 | 231 | ||
227 | offset = quirk_aspm_offset[GET_INDEX(bus->self->device, devfn)]; | 232 | offset = quirk_aspm_offset[GET_INDEX(bus->self->device, devfn)]; |
228 | 233 | ||
229 | if ((offset) && (where == offset)) | 234 | if ((offset) && (where == offset)) |
230 | value = value & 0xfffffffc; | 235 | value = value & 0xfffffffc; |
231 | 236 | ||
232 | return raw_pci_ops->write(0, bus->number, devfn, where, size, value); | 237 | return raw_pci_ops->write(0, bus->number, devfn, where, size, value); |
233 | } | 238 | } |
234 | 239 | ||
235 | static struct pci_ops quirk_pcie_aspm_ops = { | 240 | static struct pci_ops quirk_pcie_aspm_ops = { |
236 | .read = quirk_pcie_aspm_read, | 241 | .read = quirk_pcie_aspm_read, |
237 | .write = quirk_pcie_aspm_write, | 242 | .write = quirk_pcie_aspm_write, |
238 | }; | 243 | }; |
239 | 244 | ||
240 | /* | 245 | /* |
241 | * Prevents PCI Express ASPM (Active State Power Management) being enabled. | 246 | * Prevents PCI Express ASPM (Active State Power Management) being enabled. |
242 | * | 247 | * |
243 | * Save the register offset, where the ASPM control bits are located, | 248 | * Save the register offset, where the ASPM control bits are located, |
244 | * for each PCI Express device that is in the device list of | 249 | * for each PCI Express device that is in the device list of |
245 | * the root port in an array for fast indexing. Replace the bus ops | 250 | * the root port in an array for fast indexing. Replace the bus ops |
246 | * with the modified one. | 251 | * with the modified one. |
247 | */ | 252 | */ |
248 | static void pcie_rootport_aspm_quirk(struct pci_dev *pdev) | 253 | static void pcie_rootport_aspm_quirk(struct pci_dev *pdev) |
249 | { | 254 | { |
250 | int cap_base, i; | 255 | int cap_base, i; |
251 | struct pci_bus *pbus; | 256 | struct pci_bus *pbus; |
252 | struct pci_dev *dev; | 257 | struct pci_dev *dev; |
253 | 258 | ||
254 | if ((pbus = pdev->subordinate) == NULL) | 259 | if ((pbus = pdev->subordinate) == NULL) |
255 | return; | 260 | return; |
256 | 261 | ||
257 | /* | 262 | /* |
258 | * Check if the DID of pdev matches one of the six root ports. This | 263 | * Check if the DID of pdev matches one of the six root ports. This |
259 | * check is needed in the case this function is called directly by the | 264 | * check is needed in the case this function is called directly by the |
260 | * hot-plug driver. | 265 | * hot-plug driver. |
261 | */ | 266 | */ |
262 | if ((pdev->device < PCI_DEVICE_ID_INTEL_MCH_PA) || | 267 | if ((pdev->device < PCI_DEVICE_ID_INTEL_MCH_PA) || |
263 | (pdev->device > PCI_DEVICE_ID_INTEL_MCH_PC1)) | 268 | (pdev->device > PCI_DEVICE_ID_INTEL_MCH_PC1)) |
264 | return; | 269 | return; |
265 | 270 | ||
266 | if (list_empty(&pbus->devices)) { | 271 | if (list_empty(&pbus->devices)) { |
267 | /* | 272 | /* |
268 | * If no device is attached to the root port at power-up or | 273 | * If no device is attached to the root port at power-up or |
269 | * after hot-remove, the pbus->devices is empty and this code | 274 | * after hot-remove, the pbus->devices is empty and this code |
270 | * will set the offsets to zero and the bus ops to parent's bus | 275 | * will set the offsets to zero and the bus ops to parent's bus |
271 | * ops, which is unmodified. | 276 | * ops, which is unmodified. |
272 | */ | 277 | */ |
273 | for (i= GET_INDEX(pdev->device, 0); i <= GET_INDEX(pdev->device, 7); ++i) | 278 | for (i= GET_INDEX(pdev->device, 0); i <= GET_INDEX(pdev->device, 7); ++i) |
274 | quirk_aspm_offset[i] = 0; | 279 | quirk_aspm_offset[i] = 0; |
275 | 280 | ||
276 | pbus->ops = pbus->parent->ops; | 281 | pbus->ops = pbus->parent->ops; |
277 | } else { | 282 | } else { |
278 | /* | 283 | /* |
279 | * If devices are attached to the root port at power-up or | 284 | * If devices are attached to the root port at power-up or |
280 | * after hot-add, the code loops through the device list of | 285 | * after hot-add, the code loops through the device list of |
281 | * each root port to save the register offsets and replace the | 286 | * each root port to save the register offsets and replace the |
282 | * bus ops. | 287 | * bus ops. |
283 | */ | 288 | */ |
284 | list_for_each_entry(dev, &pbus->devices, bus_list) { | 289 | list_for_each_entry(dev, &pbus->devices, bus_list) { |
285 | /* There are 0 to 8 devices attached to this bus */ | 290 | /* There are 0 to 8 devices attached to this bus */ |
286 | cap_base = pci_find_capability(dev, PCI_CAP_ID_EXP); | 291 | cap_base = pci_find_capability(dev, PCI_CAP_ID_EXP); |
287 | quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)]= cap_base + 0x10; | 292 | quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)]= cap_base + 0x10; |
288 | } | 293 | } |
289 | pbus->ops = &quirk_pcie_aspm_ops; | 294 | pbus->ops = &quirk_pcie_aspm_ops; |
290 | } | 295 | } |
291 | } | 296 | } |
292 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA, pcie_rootport_aspm_quirk ); | 297 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA, pcie_rootport_aspm_quirk ); |
293 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA1, pcie_rootport_aspm_quirk ); | 298 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA1, pcie_rootport_aspm_quirk ); |
294 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PB, pcie_rootport_aspm_quirk ); | 299 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PB, pcie_rootport_aspm_quirk ); |
295 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PB1, pcie_rootport_aspm_quirk ); | 300 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PB1, pcie_rootport_aspm_quirk ); |
296 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC, pcie_rootport_aspm_quirk ); | 301 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC, pcie_rootport_aspm_quirk ); |
297 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC1, pcie_rootport_aspm_quirk ); | 302 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC1, pcie_rootport_aspm_quirk ); |
298 | 303 | ||
299 | /* | 304 | /* |
300 | * Fixup to mark boot BIOS video selected by BIOS before it changes | 305 | * Fixup to mark boot BIOS video selected by BIOS before it changes |
301 | * | 306 | * |
302 | * From information provided by "Jon Smirl" <jonsmirl@gmail.com> | 307 | * From information provided by "Jon Smirl" <jonsmirl@gmail.com> |
303 | * | 308 | * |
304 | * The standard boot ROM sequence for an x86 machine uses the BIOS | 309 | * The standard boot ROM sequence for an x86 machine uses the BIOS |
305 | * to select an initial video card for boot display. This boot video | 310 | * to select an initial video card for boot display. This boot video |
306 | * card will have it's BIOS copied to C0000 in system RAM. | 311 | * card will have it's BIOS copied to C0000 in system RAM. |
307 | * IORESOURCE_ROM_SHADOW is used to associate the boot video | 312 | * IORESOURCE_ROM_SHADOW is used to associate the boot video |
308 | * card with this copy. On laptops this copy has to be used since | 313 | * card with this copy. On laptops this copy has to be used since |
309 | * the main ROM may be compressed or combined with another image. | 314 | * the main ROM may be compressed or combined with another image. |
310 | * See pci_map_rom() for use of this flag. IORESOURCE_ROM_SHADOW | 315 | * See pci_map_rom() for use of this flag. IORESOURCE_ROM_SHADOW |
311 | * is marked here since the boot video device will be the only enabled | 316 | * is marked here since the boot video device will be the only enabled |
312 | * video device at this point. | 317 | * video device at this point. |
313 | */ | 318 | */ |
314 | 319 | ||
315 | static void __devinit pci_fixup_video(struct pci_dev *pdev) | 320 | static void __devinit pci_fixup_video(struct pci_dev *pdev) |
316 | { | 321 | { |
317 | struct pci_dev *bridge; | 322 | struct pci_dev *bridge; |
318 | struct pci_bus *bus; | 323 | struct pci_bus *bus; |
319 | u16 config; | 324 | u16 config; |
320 | 325 | ||
321 | if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA) | 326 | if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA) |
322 | return; | 327 | return; |
323 | 328 | ||
324 | /* Is VGA routed to us? */ | 329 | /* Is VGA routed to us? */ |
325 | bus = pdev->bus; | 330 | bus = pdev->bus; |
326 | while (bus) { | 331 | while (bus) { |
327 | bridge = bus->self; | 332 | bridge = bus->self; |
328 | 333 | ||
329 | /* | 334 | /* |
330 | * From information provided by | 335 | * From information provided by |
331 | * "David Miller" <davem@davemloft.net> | 336 | * "David Miller" <davem@davemloft.net> |
332 | * The bridge control register is valid for PCI header | 337 | * The bridge control register is valid for PCI header |
333 | * type BRIDGE, or CARDBUS. Host to PCI controllers use | 338 | * type BRIDGE, or CARDBUS. Host to PCI controllers use |
334 | * PCI header type NORMAL. | 339 | * PCI header type NORMAL. |
335 | */ | 340 | */ |
336 | if (bridge | 341 | if (bridge |
337 | &&((bridge->hdr_type == PCI_HEADER_TYPE_BRIDGE) | 342 | &&((bridge->hdr_type == PCI_HEADER_TYPE_BRIDGE) |
338 | ||(bridge->hdr_type == PCI_HEADER_TYPE_CARDBUS))) { | 343 | ||(bridge->hdr_type == PCI_HEADER_TYPE_CARDBUS))) { |
339 | pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, | 344 | pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, |
340 | &config); | 345 | &config); |
341 | if (!(config & PCI_BRIDGE_CTL_VGA)) | 346 | if (!(config & PCI_BRIDGE_CTL_VGA)) |
342 | return; | 347 | return; |
343 | } | 348 | } |
344 | bus = bus->parent; | 349 | bus = bus->parent; |
345 | } | 350 | } |
346 | pci_read_config_word(pdev, PCI_COMMAND, &config); | 351 | pci_read_config_word(pdev, PCI_COMMAND, &config); |
347 | if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { | 352 | if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { |
348 | pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW; | 353 | pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW; |
349 | printk(KERN_DEBUG "Boot video device is %s\n", pci_name(pdev)); | 354 | printk(KERN_DEBUG "Boot video device is %s\n", pci_name(pdev)); |
350 | } | 355 | } |
351 | } | 356 | } |
352 | DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video); | 357 | DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video); |
353 | 358 | ||
354 | /* | 359 | /* |
355 | * Some Toshiba laptops need extra code to enable their TI TSB43AB22/A. | 360 | * Some Toshiba laptops need extra code to enable their TI TSB43AB22/A. |
356 | * | 361 | * |
357 | * We pretend to bring them out of full D3 state, and restore the proper | 362 | * We pretend to bring them out of full D3 state, and restore the proper |
358 | * IRQ, PCI cache line size, and BARs, otherwise the device won't function | 363 | * IRQ, PCI cache line size, and BARs, otherwise the device won't function |
359 | * properly. In some cases, the device will generate an interrupt on | 364 | * properly. In some cases, the device will generate an interrupt on |
360 | * the wrong IRQ line, causing any devices sharing the line it's | 365 | * the wrong IRQ line, causing any devices sharing the line it's |
361 | * *supposed* to use to be disabled by the kernel's IRQ debug code. | 366 | * *supposed* to use to be disabled by the kernel's IRQ debug code. |
362 | */ | 367 | */ |
363 | static u16 toshiba_line_size; | 368 | static u16 toshiba_line_size; |
364 | 369 | ||
365 | static struct dmi_system_id __devinitdata toshiba_ohci1394_dmi_table[] = { | 370 | static struct dmi_system_id __devinitdata toshiba_ohci1394_dmi_table[] = { |
366 | { | 371 | { |
367 | .ident = "Toshiba PS5 based laptop", | 372 | .ident = "Toshiba PS5 based laptop", |
368 | .matches = { | 373 | .matches = { |
369 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | 374 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), |
370 | DMI_MATCH(DMI_PRODUCT_VERSION, "PS5"), | 375 | DMI_MATCH(DMI_PRODUCT_VERSION, "PS5"), |
371 | }, | 376 | }, |
372 | }, | 377 | }, |
373 | { | 378 | { |
374 | .ident = "Toshiba PSM4 based laptop", | 379 | .ident = "Toshiba PSM4 based laptop", |
375 | .matches = { | 380 | .matches = { |
376 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | 381 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), |
377 | DMI_MATCH(DMI_PRODUCT_VERSION, "PSM4"), | 382 | DMI_MATCH(DMI_PRODUCT_VERSION, "PSM4"), |
378 | }, | 383 | }, |
379 | }, | 384 | }, |
380 | { | 385 | { |
381 | .ident = "Toshiba A40 based laptop", | 386 | .ident = "Toshiba A40 based laptop", |
382 | .matches = { | 387 | .matches = { |
383 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | 388 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), |
384 | DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"), | 389 | DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"), |
385 | }, | 390 | }, |
386 | }, | 391 | }, |
387 | { } | 392 | { } |
388 | }; | 393 | }; |
389 | 394 | ||
390 | static void __devinit pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev) | 395 | static void __devinit pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev) |
391 | { | 396 | { |
392 | if (!dmi_check_system(toshiba_ohci1394_dmi_table)) | 397 | if (!dmi_check_system(toshiba_ohci1394_dmi_table)) |
393 | return; /* only applies to certain Toshibas (so far) */ | 398 | return; /* only applies to certain Toshibas (so far) */ |
394 | 399 | ||
395 | dev->current_state = PCI_D3cold; | 400 | dev->current_state = PCI_D3cold; |
396 | pci_read_config_word(dev, PCI_CACHE_LINE_SIZE, &toshiba_line_size); | 401 | pci_read_config_word(dev, PCI_CACHE_LINE_SIZE, &toshiba_line_size); |
397 | } | 402 | } |
398 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0x8032, | 403 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0x8032, |
399 | pci_pre_fixup_toshiba_ohci1394); | 404 | pci_pre_fixup_toshiba_ohci1394); |
400 | 405 | ||
401 | static void __devinit pci_post_fixup_toshiba_ohci1394(struct pci_dev *dev) | 406 | static void __devinit pci_post_fixup_toshiba_ohci1394(struct pci_dev *dev) |
402 | { | 407 | { |
403 | if (!dmi_check_system(toshiba_ohci1394_dmi_table)) | 408 | if (!dmi_check_system(toshiba_ohci1394_dmi_table)) |
404 | return; /* only applies to certain Toshibas (so far) */ | 409 | return; /* only applies to certain Toshibas (so far) */ |
405 | 410 | ||
406 | /* Restore config space on Toshiba laptops */ | 411 | /* Restore config space on Toshiba laptops */ |
407 | pci_write_config_word(dev, PCI_CACHE_LINE_SIZE, toshiba_line_size); | 412 | pci_write_config_word(dev, PCI_CACHE_LINE_SIZE, toshiba_line_size); |
408 | pci_read_config_byte(dev, PCI_INTERRUPT_LINE, (u8 *)&dev->irq); | 413 | pci_read_config_byte(dev, PCI_INTERRUPT_LINE, (u8 *)&dev->irq); |
409 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, | 414 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, |
410 | pci_resource_start(dev, 0)); | 415 | pci_resource_start(dev, 0)); |
411 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, | 416 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, |
412 | pci_resource_start(dev, 1)); | 417 | pci_resource_start(dev, 1)); |
413 | } | 418 | } |
414 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_TI, 0x8032, | 419 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_TI, 0x8032, |
415 | pci_post_fixup_toshiba_ohci1394); | 420 | pci_post_fixup_toshiba_ohci1394); |
416 | 421 | ||
417 | 422 | ||
418 | /* | 423 | /* |
419 | * Prevent the BIOS trapping accesses to the Cyrix CS5530A video device | 424 | * Prevent the BIOS trapping accesses to the Cyrix CS5530A video device |
420 | * configuration space. | 425 | * configuration space. |
421 | */ | 426 | */ |
422 | static void __devinit pci_early_fixup_cyrix_5530(struct pci_dev *dev) | 427 | static void pci_early_fixup_cyrix_5530(struct pci_dev *dev) |
423 | { | 428 | { |
424 | u8 r; | 429 | u8 r; |
425 | /* clear 'F4 Video Configuration Trap' bit */ | 430 | /* clear 'F4 Video Configuration Trap' bit */ |
426 | pci_read_config_byte(dev, 0x42, &r); | 431 | pci_read_config_byte(dev, 0x42, &r); |
427 | r &= 0xfd; | 432 | r &= 0xfd; |
428 | pci_write_config_byte(dev, 0x42, r); | 433 | pci_write_config_byte(dev, 0x42, r); |
429 | } | 434 | } |
430 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, | 435 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, |
436 | pci_early_fixup_cyrix_5530); | ||
437 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, | ||
431 | pci_early_fixup_cyrix_5530); | 438 | pci_early_fixup_cyrix_5530); |
432 | 439 |
drivers/pci/pci-driver.c
1 | /* | 1 | /* |
2 | * drivers/pci/pci-driver.c | 2 | * drivers/pci/pci-driver.c |
3 | * | 3 | * |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/pci.h> | 6 | #include <linux/pci.h> |
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | #include <linux/init.h> | 8 | #include <linux/init.h> |
9 | #include <linux/device.h> | 9 | #include <linux/device.h> |
10 | #include <linux/mempolicy.h> | 10 | #include <linux/mempolicy.h> |
11 | #include <linux/string.h> | 11 | #include <linux/string.h> |
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include "pci.h" | 14 | #include "pci.h" |
15 | 15 | ||
16 | /* | 16 | /* |
17 | * Registration of PCI drivers and handling of hot-pluggable devices. | 17 | * Registration of PCI drivers and handling of hot-pluggable devices. |
18 | */ | 18 | */ |
19 | 19 | ||
20 | /* multithreaded probe logic */ | 20 | /* multithreaded probe logic */ |
21 | static int pci_multithread_probe = | 21 | static int pci_multithread_probe = |
22 | #ifdef CONFIG_PCI_MULTITHREAD_PROBE | 22 | #ifdef CONFIG_PCI_MULTITHREAD_PROBE |
23 | 1; | 23 | 1; |
24 | #else | 24 | #else |
25 | 0; | 25 | 0; |
26 | #endif | 26 | #endif |
27 | __module_param_call("", pci_multithread_probe, param_set_bool, param_get_bool, &pci_multithread_probe, 0644); | 27 | __module_param_call("", pci_multithread_probe, param_set_bool, param_get_bool, &pci_multithread_probe, 0644); |
28 | 28 | ||
29 | 29 | ||
30 | /* | 30 | /* |
31 | * Dynamic device IDs are disabled for !CONFIG_HOTPLUG | 31 | * Dynamic device IDs are disabled for !CONFIG_HOTPLUG |
32 | */ | 32 | */ |
33 | 33 | ||
34 | struct pci_dynid { | 34 | struct pci_dynid { |
35 | struct list_head node; | 35 | struct list_head node; |
36 | struct pci_device_id id; | 36 | struct pci_device_id id; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | #ifdef CONFIG_HOTPLUG | 39 | #ifdef CONFIG_HOTPLUG |
40 | 40 | ||
41 | /** | 41 | /** |
42 | * store_new_id - add a new PCI device ID to this driver and re-probe devices | 42 | * store_new_id - add a new PCI device ID to this driver and re-probe devices |
43 | * @driver: target device driver | 43 | * @driver: target device driver |
44 | * @buf: buffer for scanning device ID data | 44 | * @buf: buffer for scanning device ID data |
45 | * @count: input size | 45 | * @count: input size |
46 | * | 46 | * |
47 | * Adds a new dynamic pci device ID to this driver, | 47 | * Adds a new dynamic pci device ID to this driver, |
48 | * and causes the driver to probe for all devices again. | 48 | * and causes the driver to probe for all devices again. |
49 | */ | 49 | */ |
50 | static ssize_t | 50 | static ssize_t |
51 | store_new_id(struct device_driver *driver, const char *buf, size_t count) | 51 | store_new_id(struct device_driver *driver, const char *buf, size_t count) |
52 | { | 52 | { |
53 | struct pci_dynid *dynid; | 53 | struct pci_dynid *dynid; |
54 | struct pci_driver *pdrv = to_pci_driver(driver); | 54 | struct pci_driver *pdrv = to_pci_driver(driver); |
55 | __u32 vendor=PCI_ANY_ID, device=PCI_ANY_ID, subvendor=PCI_ANY_ID, | 55 | __u32 vendor=PCI_ANY_ID, device=PCI_ANY_ID, subvendor=PCI_ANY_ID, |
56 | subdevice=PCI_ANY_ID, class=0, class_mask=0; | 56 | subdevice=PCI_ANY_ID, class=0, class_mask=0; |
57 | unsigned long driver_data=0; | 57 | unsigned long driver_data=0; |
58 | int fields=0; | 58 | int fields=0; |
59 | int retval = 0; | 59 | int retval = 0; |
60 | 60 | ||
61 | fields = sscanf(buf, "%x %x %x %x %x %x %lux", | 61 | fields = sscanf(buf, "%x %x %x %x %x %x %lux", |
62 | &vendor, &device, &subvendor, &subdevice, | 62 | &vendor, &device, &subvendor, &subdevice, |
63 | &class, &class_mask, &driver_data); | 63 | &class, &class_mask, &driver_data); |
64 | if (fields < 0) | 64 | if (fields < 0) |
65 | return -EINVAL; | 65 | return -EINVAL; |
66 | 66 | ||
67 | dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); | 67 | dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); |
68 | if (!dynid) | 68 | if (!dynid) |
69 | return -ENOMEM; | 69 | return -ENOMEM; |
70 | 70 | ||
71 | INIT_LIST_HEAD(&dynid->node); | 71 | INIT_LIST_HEAD(&dynid->node); |
72 | dynid->id.vendor = vendor; | 72 | dynid->id.vendor = vendor; |
73 | dynid->id.device = device; | 73 | dynid->id.device = device; |
74 | dynid->id.subvendor = subvendor; | 74 | dynid->id.subvendor = subvendor; |
75 | dynid->id.subdevice = subdevice; | 75 | dynid->id.subdevice = subdevice; |
76 | dynid->id.class = class; | 76 | dynid->id.class = class; |
77 | dynid->id.class_mask = class_mask; | 77 | dynid->id.class_mask = class_mask; |
78 | dynid->id.driver_data = pdrv->dynids.use_driver_data ? | 78 | dynid->id.driver_data = pdrv->dynids.use_driver_data ? |
79 | driver_data : 0UL; | 79 | driver_data : 0UL; |
80 | 80 | ||
81 | spin_lock(&pdrv->dynids.lock); | 81 | spin_lock(&pdrv->dynids.lock); |
82 | list_add_tail(&pdrv->dynids.list, &dynid->node); | 82 | list_add_tail(&pdrv->dynids.list, &dynid->node); |
83 | spin_unlock(&pdrv->dynids.lock); | 83 | spin_unlock(&pdrv->dynids.lock); |
84 | 84 | ||
85 | if (get_driver(&pdrv->driver)) { | 85 | if (get_driver(&pdrv->driver)) { |
86 | retval = driver_attach(&pdrv->driver); | 86 | retval = driver_attach(&pdrv->driver); |
87 | put_driver(&pdrv->driver); | 87 | put_driver(&pdrv->driver); |
88 | } | 88 | } |
89 | 89 | ||
90 | if (retval) | 90 | if (retval) |
91 | return retval; | 91 | return retval; |
92 | return count; | 92 | return count; |
93 | } | 93 | } |
94 | static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id); | 94 | static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id); |
95 | 95 | ||
96 | static void | 96 | static void |
97 | pci_free_dynids(struct pci_driver *drv) | 97 | pci_free_dynids(struct pci_driver *drv) |
98 | { | 98 | { |
99 | struct pci_dynid *dynid, *n; | 99 | struct pci_dynid *dynid, *n; |
100 | 100 | ||
101 | spin_lock(&drv->dynids.lock); | 101 | spin_lock(&drv->dynids.lock); |
102 | list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { | 102 | list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { |
103 | list_del(&dynid->node); | 103 | list_del(&dynid->node); |
104 | kfree(dynid); | 104 | kfree(dynid); |
105 | } | 105 | } |
106 | spin_unlock(&drv->dynids.lock); | 106 | spin_unlock(&drv->dynids.lock); |
107 | } | 107 | } |
108 | 108 | ||
109 | static int | 109 | static int |
110 | pci_create_newid_file(struct pci_driver *drv) | 110 | pci_create_newid_file(struct pci_driver *drv) |
111 | { | 111 | { |
112 | int error = 0; | 112 | int error = 0; |
113 | if (drv->probe != NULL) | 113 | if (drv->probe != NULL) |
114 | error = sysfs_create_file(&drv->driver.kobj, | 114 | error = sysfs_create_file(&drv->driver.kobj, |
115 | &driver_attr_new_id.attr); | 115 | &driver_attr_new_id.attr); |
116 | return error; | 116 | return error; |
117 | } | 117 | } |
118 | 118 | ||
119 | #else /* !CONFIG_HOTPLUG */ | 119 | #else /* !CONFIG_HOTPLUG */ |
120 | static inline void pci_free_dynids(struct pci_driver *drv) {} | 120 | static inline void pci_free_dynids(struct pci_driver *drv) {} |
121 | static inline int pci_create_newid_file(struct pci_driver *drv) | 121 | static inline int pci_create_newid_file(struct pci_driver *drv) |
122 | { | 122 | { |
123 | return 0; | 123 | return 0; |
124 | } | 124 | } |
125 | #endif | 125 | #endif |
126 | 126 | ||
127 | /** | 127 | /** |
128 | * pci_match_id - See if a pci device matches a given pci_id table | 128 | * pci_match_id - See if a pci device matches a given pci_id table |
129 | * @ids: array of PCI device id structures to search in | 129 | * @ids: array of PCI device id structures to search in |
130 | * @dev: the PCI device structure to match against. | 130 | * @dev: the PCI device structure to match against. |
131 | * | 131 | * |
132 | * Used by a driver to check whether a PCI device present in the | 132 | * Used by a driver to check whether a PCI device present in the |
133 | * system is in its list of supported devices. Returns the matching | 133 | * system is in its list of supported devices. Returns the matching |
134 | * pci_device_id structure or %NULL if there is no match. | 134 | * pci_device_id structure or %NULL if there is no match. |
135 | * | 135 | * |
136 | * Depreciated, don't use this as it will not catch any dynamic ids | 136 | * Depreciated, don't use this as it will not catch any dynamic ids |
137 | * that a driver might want to check for. | 137 | * that a driver might want to check for. |
138 | */ | 138 | */ |
139 | const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, | 139 | const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, |
140 | struct pci_dev *dev) | 140 | struct pci_dev *dev) |
141 | { | 141 | { |
142 | if (ids) { | 142 | if (ids) { |
143 | while (ids->vendor || ids->subvendor || ids->class_mask) { | 143 | while (ids->vendor || ids->subvendor || ids->class_mask) { |
144 | if (pci_match_one_device(ids, dev)) | 144 | if (pci_match_one_device(ids, dev)) |
145 | return ids; | 145 | return ids; |
146 | ids++; | 146 | ids++; |
147 | } | 147 | } |
148 | } | 148 | } |
149 | return NULL; | 149 | return NULL; |
150 | } | 150 | } |
151 | 151 | ||
152 | /** | 152 | /** |
153 | * pci_match_device - Tell if a PCI device structure has a matching | 153 | * pci_match_device - Tell if a PCI device structure has a matching |
154 | * PCI device id structure | 154 | * PCI device id structure |
155 | * @drv: the PCI driver to match against | 155 | * @drv: the PCI driver to match against |
156 | * @dev: the PCI device structure to match against | 156 | * @dev: the PCI device structure to match against |
157 | * | 157 | * |
158 | * Used by a driver to check whether a PCI device present in the | 158 | * Used by a driver to check whether a PCI device present in the |
159 | * system is in its list of supported devices. Returns the matching | 159 | * system is in its list of supported devices. Returns the matching |
160 | * pci_device_id structure or %NULL if there is no match. | 160 | * pci_device_id structure or %NULL if there is no match. |
161 | */ | 161 | */ |
162 | const struct pci_device_id *pci_match_device(struct pci_driver *drv, | 162 | const struct pci_device_id *pci_match_device(struct pci_driver *drv, |
163 | struct pci_dev *dev) | 163 | struct pci_dev *dev) |
164 | { | 164 | { |
165 | struct pci_dynid *dynid; | 165 | struct pci_dynid *dynid; |
166 | 166 | ||
167 | /* Look at the dynamic ids first, before the static ones */ | 167 | /* Look at the dynamic ids first, before the static ones */ |
168 | spin_lock(&drv->dynids.lock); | 168 | spin_lock(&drv->dynids.lock); |
169 | list_for_each_entry(dynid, &drv->dynids.list, node) { | 169 | list_for_each_entry(dynid, &drv->dynids.list, node) { |
170 | if (pci_match_one_device(&dynid->id, dev)) { | 170 | if (pci_match_one_device(&dynid->id, dev)) { |
171 | spin_unlock(&drv->dynids.lock); | 171 | spin_unlock(&drv->dynids.lock); |
172 | return &dynid->id; | 172 | return &dynid->id; |
173 | } | 173 | } |
174 | } | 174 | } |
175 | spin_unlock(&drv->dynids.lock); | 175 | spin_unlock(&drv->dynids.lock); |
176 | 176 | ||
177 | return pci_match_id(drv->id_table, dev); | 177 | return pci_match_id(drv->id_table, dev); |
178 | } | 178 | } |
179 | 179 | ||
180 | static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, | 180 | static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, |
181 | const struct pci_device_id *id) | 181 | const struct pci_device_id *id) |
182 | { | 182 | { |
183 | int error; | 183 | int error; |
184 | #ifdef CONFIG_NUMA | 184 | #ifdef CONFIG_NUMA |
185 | /* Execute driver initialization on node where the | 185 | /* Execute driver initialization on node where the |
186 | device's bus is attached to. This way the driver likely | 186 | device's bus is attached to. This way the driver likely |
187 | allocates its local memory on the right node without | 187 | allocates its local memory on the right node without |
188 | any need to change it. */ | 188 | any need to change it. */ |
189 | struct mempolicy *oldpol; | 189 | struct mempolicy *oldpol; |
190 | cpumask_t oldmask = current->cpus_allowed; | 190 | cpumask_t oldmask = current->cpus_allowed; |
191 | int node = pcibus_to_node(dev->bus); | 191 | int node = pcibus_to_node(dev->bus); |
192 | if (node >= 0 && node_online(node)) | 192 | if (node >= 0 && node_online(node)) |
193 | set_cpus_allowed(current, node_to_cpumask(node)); | 193 | set_cpus_allowed(current, node_to_cpumask(node)); |
194 | /* And set default memory allocation policy */ | 194 | /* And set default memory allocation policy */ |
195 | oldpol = current->mempolicy; | 195 | oldpol = current->mempolicy; |
196 | current->mempolicy = &default_policy; | 196 | current->mempolicy = &default_policy; |
197 | mpol_get(current->mempolicy); | 197 | mpol_get(current->mempolicy); |
198 | #endif | 198 | #endif |
199 | error = drv->probe(dev, id); | 199 | error = drv->probe(dev, id); |
200 | #ifdef CONFIG_NUMA | 200 | #ifdef CONFIG_NUMA |
201 | set_cpus_allowed(current, oldmask); | 201 | set_cpus_allowed(current, oldmask); |
202 | mpol_free(current->mempolicy); | 202 | mpol_free(current->mempolicy); |
203 | current->mempolicy = oldpol; | 203 | current->mempolicy = oldpol; |
204 | #endif | 204 | #endif |
205 | return error; | 205 | return error; |
206 | } | 206 | } |
207 | 207 | ||
208 | /** | 208 | /** |
209 | * __pci_device_probe() | 209 | * __pci_device_probe() |
210 | * @drv: driver to call to check if it wants the PCI device | 210 | * @drv: driver to call to check if it wants the PCI device |
211 | * @pci_dev: PCI device being probed | 211 | * @pci_dev: PCI device being probed |
212 | * | 212 | * |
213 | * returns 0 on success, else error. | 213 | * returns 0 on success, else error. |
214 | * side-effect: pci_dev->driver is set to drv when drv claims pci_dev. | 214 | * side-effect: pci_dev->driver is set to drv when drv claims pci_dev. |
215 | */ | 215 | */ |
216 | static int | 216 | static int |
217 | __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev) | 217 | __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev) |
218 | { | 218 | { |
219 | const struct pci_device_id *id; | 219 | const struct pci_device_id *id; |
220 | int error = 0; | 220 | int error = 0; |
221 | 221 | ||
222 | if (!pci_dev->driver && drv->probe) { | 222 | if (!pci_dev->driver && drv->probe) { |
223 | error = -ENODEV; | 223 | error = -ENODEV; |
224 | 224 | ||
225 | id = pci_match_device(drv, pci_dev); | 225 | id = pci_match_device(drv, pci_dev); |
226 | if (id) | 226 | if (id) |
227 | error = pci_call_probe(drv, pci_dev, id); | 227 | error = pci_call_probe(drv, pci_dev, id); |
228 | if (error >= 0) { | 228 | if (error >= 0) { |
229 | pci_dev->driver = drv; | 229 | pci_dev->driver = drv; |
230 | error = 0; | 230 | error = 0; |
231 | } | 231 | } |
232 | } | 232 | } |
233 | return error; | 233 | return error; |
234 | } | 234 | } |
235 | 235 | ||
236 | static int pci_device_probe(struct device * dev) | 236 | static int pci_device_probe(struct device * dev) |
237 | { | 237 | { |
238 | int error = 0; | 238 | int error = 0; |
239 | struct pci_driver *drv; | 239 | struct pci_driver *drv; |
240 | struct pci_dev *pci_dev; | 240 | struct pci_dev *pci_dev; |
241 | 241 | ||
242 | drv = to_pci_driver(dev->driver); | 242 | drv = to_pci_driver(dev->driver); |
243 | pci_dev = to_pci_dev(dev); | 243 | pci_dev = to_pci_dev(dev); |
244 | pci_dev_get(pci_dev); | 244 | pci_dev_get(pci_dev); |
245 | error = __pci_device_probe(drv, pci_dev); | 245 | error = __pci_device_probe(drv, pci_dev); |
246 | if (error) | 246 | if (error) |
247 | pci_dev_put(pci_dev); | 247 | pci_dev_put(pci_dev); |
248 | 248 | ||
249 | return error; | 249 | return error; |
250 | } | 250 | } |
251 | 251 | ||
252 | static int pci_device_remove(struct device * dev) | 252 | static int pci_device_remove(struct device * dev) |
253 | { | 253 | { |
254 | struct pci_dev * pci_dev = to_pci_dev(dev); | 254 | struct pci_dev * pci_dev = to_pci_dev(dev); |
255 | struct pci_driver * drv = pci_dev->driver; | 255 | struct pci_driver * drv = pci_dev->driver; |
256 | 256 | ||
257 | if (drv) { | 257 | if (drv) { |
258 | if (drv->remove) | 258 | if (drv->remove) |
259 | drv->remove(pci_dev); | 259 | drv->remove(pci_dev); |
260 | pci_dev->driver = NULL; | 260 | pci_dev->driver = NULL; |
261 | } | 261 | } |
262 | 262 | ||
263 | /* | 263 | /* |
264 | * If the device is still on, set the power state as "unknown", | 264 | * If the device is still on, set the power state as "unknown", |
265 | * since it might change by the next time we load the driver. | 265 | * since it might change by the next time we load the driver. |
266 | */ | 266 | */ |
267 | if (pci_dev->current_state == PCI_D0) | 267 | if (pci_dev->current_state == PCI_D0) |
268 | pci_dev->current_state = PCI_UNKNOWN; | 268 | pci_dev->current_state = PCI_UNKNOWN; |
269 | 269 | ||
270 | /* | 270 | /* |
271 | * We would love to complain here if pci_dev->is_enabled is set, that | 271 | * We would love to complain here if pci_dev->is_enabled is set, that |
272 | * the driver should have called pci_disable_device(), but the | 272 | * the driver should have called pci_disable_device(), but the |
273 | * unfortunate fact is there are too many odd BIOS and bridge setups | 273 | * unfortunate fact is there are too many odd BIOS and bridge setups |
274 | * that don't like drivers doing that all of the time. | 274 | * that don't like drivers doing that all of the time. |
275 | * Oh well, we can dream of sane hardware when we sleep, no matter how | 275 | * Oh well, we can dream of sane hardware when we sleep, no matter how |
276 | * horrible the crap we have to deal with is when we are awake... | 276 | * horrible the crap we have to deal with is when we are awake... |
277 | */ | 277 | */ |
278 | 278 | ||
279 | pci_dev_put(pci_dev); | 279 | pci_dev_put(pci_dev); |
280 | return 0; | 280 | return 0; |
281 | } | 281 | } |
282 | 282 | ||
283 | static int pci_device_suspend(struct device * dev, pm_message_t state) | 283 | static int pci_device_suspend(struct device * dev, pm_message_t state) |
284 | { | 284 | { |
285 | struct pci_dev * pci_dev = to_pci_dev(dev); | 285 | struct pci_dev * pci_dev = to_pci_dev(dev); |
286 | struct pci_driver * drv = pci_dev->driver; | 286 | struct pci_driver * drv = pci_dev->driver; |
287 | int i = 0; | 287 | int i = 0; |
288 | 288 | ||
289 | if (drv && drv->suspend) { | 289 | if (drv && drv->suspend) { |
290 | i = drv->suspend(pci_dev, state); | 290 | i = drv->suspend(pci_dev, state); |
291 | suspend_report_result(drv->suspend, i); | 291 | suspend_report_result(drv->suspend, i); |
292 | } else { | 292 | } else { |
293 | pci_save_state(pci_dev); | 293 | pci_save_state(pci_dev); |
294 | /* | 294 | /* |
295 | * mark its power state as "unknown", since we don't know if | 295 | * mark its power state as "unknown", since we don't know if |
296 | * e.g. the BIOS will change its device state when we suspend. | 296 | * e.g. the BIOS will change its device state when we suspend. |
297 | */ | 297 | */ |
298 | if (pci_dev->current_state == PCI_D0) | 298 | if (pci_dev->current_state == PCI_D0) |
299 | pci_dev->current_state = PCI_UNKNOWN; | 299 | pci_dev->current_state = PCI_UNKNOWN; |
300 | } | 300 | } |
301 | return i; | 301 | return i; |
302 | } | 302 | } |
303 | 303 | ||
304 | static int pci_device_suspend_late(struct device * dev, pm_message_t state) | 304 | static int pci_device_suspend_late(struct device * dev, pm_message_t state) |
305 | { | 305 | { |
306 | struct pci_dev * pci_dev = to_pci_dev(dev); | 306 | struct pci_dev * pci_dev = to_pci_dev(dev); |
307 | struct pci_driver * drv = pci_dev->driver; | 307 | struct pci_driver * drv = pci_dev->driver; |
308 | int i = 0; | 308 | int i = 0; |
309 | 309 | ||
310 | if (drv && drv->suspend_late) { | 310 | if (drv && drv->suspend_late) { |
311 | i = drv->suspend_late(pci_dev, state); | 311 | i = drv->suspend_late(pci_dev, state); |
312 | suspend_report_result(drv->suspend_late, i); | 312 | suspend_report_result(drv->suspend_late, i); |
313 | } | 313 | } |
314 | return i; | 314 | return i; |
315 | } | 315 | } |
316 | 316 | ||
317 | /* | 317 | /* |
318 | * Default resume method for devices that have no driver provided resume, | 318 | * Default resume method for devices that have no driver provided resume, |
319 | * or not even a driver at all. | 319 | * or not even a driver at all. |
320 | */ | 320 | */ |
321 | static int pci_default_resume(struct pci_dev *pci_dev) | 321 | static int pci_default_resume(struct pci_dev *pci_dev) |
322 | { | 322 | { |
323 | int retval = 0; | 323 | int retval = 0; |
324 | 324 | ||
325 | /* restore the PCI config space */ | 325 | /* restore the PCI config space */ |
326 | pci_restore_state(pci_dev); | 326 | pci_restore_state(pci_dev); |
327 | /* if the device was enabled before suspend, reenable */ | 327 | /* if the device was enabled before suspend, reenable */ |
328 | if (atomic_read(&pci_dev->enable_cnt)) | 328 | if (atomic_read(&pci_dev->enable_cnt)) |
329 | retval = __pci_enable_device(pci_dev); | 329 | retval = __pci_enable_device(pci_dev); |
330 | /* if the device was busmaster before the suspend, make it busmaster again */ | 330 | /* if the device was busmaster before the suspend, make it busmaster again */ |
331 | if (pci_dev->is_busmaster) | 331 | if (pci_dev->is_busmaster) |
332 | pci_set_master(pci_dev); | 332 | pci_set_master(pci_dev); |
333 | 333 | ||
334 | return retval; | 334 | return retval; |
335 | } | 335 | } |
336 | 336 | ||
337 | static int pci_device_resume(struct device * dev) | 337 | static int pci_device_resume(struct device * dev) |
338 | { | 338 | { |
339 | int error; | 339 | int error; |
340 | struct pci_dev * pci_dev = to_pci_dev(dev); | 340 | struct pci_dev * pci_dev = to_pci_dev(dev); |
341 | struct pci_driver * drv = pci_dev->driver; | 341 | struct pci_driver * drv = pci_dev->driver; |
342 | 342 | ||
343 | if (drv && drv->resume) | 343 | if (drv && drv->resume) |
344 | error = drv->resume(pci_dev); | 344 | error = drv->resume(pci_dev); |
345 | else | 345 | else |
346 | error = pci_default_resume(pci_dev); | 346 | error = pci_default_resume(pci_dev); |
347 | return error; | 347 | return error; |
348 | } | 348 | } |
349 | 349 | ||
350 | static int pci_device_resume_early(struct device * dev) | 350 | static int pci_device_resume_early(struct device * dev) |
351 | { | 351 | { |
352 | int error = 0; | 352 | int error = 0; |
353 | struct pci_dev * pci_dev = to_pci_dev(dev); | 353 | struct pci_dev * pci_dev = to_pci_dev(dev); |
354 | struct pci_driver * drv = pci_dev->driver; | 354 | struct pci_driver * drv = pci_dev->driver; |
355 | 355 | ||
356 | pci_fixup_device(pci_fixup_resume, pci_dev); | ||
357 | |||
356 | if (drv && drv->resume_early) | 358 | if (drv && drv->resume_early) |
357 | error = drv->resume_early(pci_dev); | 359 | error = drv->resume_early(pci_dev); |
358 | return error; | 360 | return error; |
359 | } | 361 | } |
360 | 362 | ||
361 | static void pci_device_shutdown(struct device *dev) | 363 | static void pci_device_shutdown(struct device *dev) |
362 | { | 364 | { |
363 | struct pci_dev *pci_dev = to_pci_dev(dev); | 365 | struct pci_dev *pci_dev = to_pci_dev(dev); |
364 | struct pci_driver *drv = pci_dev->driver; | 366 | struct pci_driver *drv = pci_dev->driver; |
365 | 367 | ||
366 | if (drv && drv->shutdown) | 368 | if (drv && drv->shutdown) |
367 | drv->shutdown(pci_dev); | 369 | drv->shutdown(pci_dev); |
368 | } | 370 | } |
369 | 371 | ||
370 | #define kobj_to_pci_driver(obj) container_of(obj, struct device_driver, kobj) | 372 | #define kobj_to_pci_driver(obj) container_of(obj, struct device_driver, kobj) |
371 | #define attr_to_driver_attribute(obj) container_of(obj, struct driver_attribute, attr) | 373 | #define attr_to_driver_attribute(obj) container_of(obj, struct driver_attribute, attr) |
372 | 374 | ||
373 | static ssize_t | 375 | static ssize_t |
374 | pci_driver_attr_show(struct kobject * kobj, struct attribute *attr, char *buf) | 376 | pci_driver_attr_show(struct kobject * kobj, struct attribute *attr, char *buf) |
375 | { | 377 | { |
376 | struct device_driver *driver = kobj_to_pci_driver(kobj); | 378 | struct device_driver *driver = kobj_to_pci_driver(kobj); |
377 | struct driver_attribute *dattr = attr_to_driver_attribute(attr); | 379 | struct driver_attribute *dattr = attr_to_driver_attribute(attr); |
378 | ssize_t ret; | 380 | ssize_t ret; |
379 | 381 | ||
380 | if (!get_driver(driver)) | 382 | if (!get_driver(driver)) |
381 | return -ENODEV; | 383 | return -ENODEV; |
382 | 384 | ||
383 | ret = dattr->show ? dattr->show(driver, buf) : -EIO; | 385 | ret = dattr->show ? dattr->show(driver, buf) : -EIO; |
384 | 386 | ||
385 | put_driver(driver); | 387 | put_driver(driver); |
386 | return ret; | 388 | return ret; |
387 | } | 389 | } |
388 | 390 | ||
389 | static ssize_t | 391 | static ssize_t |
390 | pci_driver_attr_store(struct kobject * kobj, struct attribute *attr, | 392 | pci_driver_attr_store(struct kobject * kobj, struct attribute *attr, |
391 | const char *buf, size_t count) | 393 | const char *buf, size_t count) |
392 | { | 394 | { |
393 | struct device_driver *driver = kobj_to_pci_driver(kobj); | 395 | struct device_driver *driver = kobj_to_pci_driver(kobj); |
394 | struct driver_attribute *dattr = attr_to_driver_attribute(attr); | 396 | struct driver_attribute *dattr = attr_to_driver_attribute(attr); |
395 | ssize_t ret; | 397 | ssize_t ret; |
396 | 398 | ||
397 | if (!get_driver(driver)) | 399 | if (!get_driver(driver)) |
398 | return -ENODEV; | 400 | return -ENODEV; |
399 | 401 | ||
400 | ret = dattr->store ? dattr->store(driver, buf, count) : -EIO; | 402 | ret = dattr->store ? dattr->store(driver, buf, count) : -EIO; |
401 | 403 | ||
402 | put_driver(driver); | 404 | put_driver(driver); |
403 | return ret; | 405 | return ret; |
404 | } | 406 | } |
405 | 407 | ||
406 | static struct sysfs_ops pci_driver_sysfs_ops = { | 408 | static struct sysfs_ops pci_driver_sysfs_ops = { |
407 | .show = pci_driver_attr_show, | 409 | .show = pci_driver_attr_show, |
408 | .store = pci_driver_attr_store, | 410 | .store = pci_driver_attr_store, |
409 | }; | 411 | }; |
410 | static struct kobj_type pci_driver_kobj_type = { | 412 | static struct kobj_type pci_driver_kobj_type = { |
411 | .sysfs_ops = &pci_driver_sysfs_ops, | 413 | .sysfs_ops = &pci_driver_sysfs_ops, |
412 | }; | 414 | }; |
413 | 415 | ||
414 | /** | 416 | /** |
415 | * __pci_register_driver - register a new pci driver | 417 | * __pci_register_driver - register a new pci driver |
416 | * @drv: the driver structure to register | 418 | * @drv: the driver structure to register |
417 | * @owner: owner module of drv | 419 | * @owner: owner module of drv |
418 | * | 420 | * |
419 | * Adds the driver structure to the list of registered drivers. | 421 | * Adds the driver structure to the list of registered drivers. |
420 | * Returns a negative value on error, otherwise 0. | 422 | * Returns a negative value on error, otherwise 0. |
421 | * If no error occurred, the driver remains registered even if | 423 | * If no error occurred, the driver remains registered even if |
422 | * no device was claimed during registration. | 424 | * no device was claimed during registration. |
423 | */ | 425 | */ |
424 | int __pci_register_driver(struct pci_driver *drv, struct module *owner) | 426 | int __pci_register_driver(struct pci_driver *drv, struct module *owner) |
425 | { | 427 | { |
426 | int error; | 428 | int error; |
427 | 429 | ||
428 | /* initialize common driver fields */ | 430 | /* initialize common driver fields */ |
429 | drv->driver.name = drv->name; | 431 | drv->driver.name = drv->name; |
430 | drv->driver.bus = &pci_bus_type; | 432 | drv->driver.bus = &pci_bus_type; |
431 | drv->driver.owner = owner; | 433 | drv->driver.owner = owner; |
432 | drv->driver.kobj.ktype = &pci_driver_kobj_type; | 434 | drv->driver.kobj.ktype = &pci_driver_kobj_type; |
433 | 435 | ||
434 | if (pci_multithread_probe) | 436 | if (pci_multithread_probe) |
435 | drv->driver.multithread_probe = pci_multithread_probe; | 437 | drv->driver.multithread_probe = pci_multithread_probe; |
436 | else | 438 | else |
437 | drv->driver.multithread_probe = drv->multithread_probe; | 439 | drv->driver.multithread_probe = drv->multithread_probe; |
438 | 440 | ||
439 | spin_lock_init(&drv->dynids.lock); | 441 | spin_lock_init(&drv->dynids.lock); |
440 | INIT_LIST_HEAD(&drv->dynids.list); | 442 | INIT_LIST_HEAD(&drv->dynids.list); |
441 | 443 | ||
442 | /* register with core */ | 444 | /* register with core */ |
443 | error = driver_register(&drv->driver); | 445 | error = driver_register(&drv->driver); |
444 | if (error) | 446 | if (error) |
445 | return error; | 447 | return error; |
446 | 448 | ||
447 | error = pci_create_newid_file(drv); | 449 | error = pci_create_newid_file(drv); |
448 | if (error) | 450 | if (error) |
449 | driver_unregister(&drv->driver); | 451 | driver_unregister(&drv->driver); |
450 | 452 | ||
451 | return error; | 453 | return error; |
452 | } | 454 | } |
453 | 455 | ||
454 | /** | 456 | /** |
455 | * pci_unregister_driver - unregister a pci driver | 457 | * pci_unregister_driver - unregister a pci driver |
456 | * @drv: the driver structure to unregister | 458 | * @drv: the driver structure to unregister |
457 | * | 459 | * |
458 | * Deletes the driver structure from the list of registered PCI drivers, | 460 | * Deletes the driver structure from the list of registered PCI drivers, |
459 | * gives it a chance to clean up by calling its remove() function for | 461 | * gives it a chance to clean up by calling its remove() function for |
460 | * each device it was responsible for, and marks those devices as | 462 | * each device it was responsible for, and marks those devices as |
461 | * driverless. | 463 | * driverless. |
462 | */ | 464 | */ |
463 | 465 | ||
464 | void | 466 | void |
465 | pci_unregister_driver(struct pci_driver *drv) | 467 | pci_unregister_driver(struct pci_driver *drv) |
466 | { | 468 | { |
467 | driver_unregister(&drv->driver); | 469 | driver_unregister(&drv->driver); |
468 | pci_free_dynids(drv); | 470 | pci_free_dynids(drv); |
469 | } | 471 | } |
470 | 472 | ||
471 | static struct pci_driver pci_compat_driver = { | 473 | static struct pci_driver pci_compat_driver = { |
472 | .name = "compat" | 474 | .name = "compat" |
473 | }; | 475 | }; |
474 | 476 | ||
475 | /** | 477 | /** |
476 | * pci_dev_driver - get the pci_driver of a device | 478 | * pci_dev_driver - get the pci_driver of a device |
477 | * @dev: the device to query | 479 | * @dev: the device to query |
478 | * | 480 | * |
479 | * Returns the appropriate pci_driver structure or %NULL if there is no | 481 | * Returns the appropriate pci_driver structure or %NULL if there is no |
480 | * registered driver for the device. | 482 | * registered driver for the device. |
481 | */ | 483 | */ |
482 | struct pci_driver * | 484 | struct pci_driver * |
483 | pci_dev_driver(const struct pci_dev *dev) | 485 | pci_dev_driver(const struct pci_dev *dev) |
484 | { | 486 | { |
485 | if (dev->driver) | 487 | if (dev->driver) |
486 | return dev->driver; | 488 | return dev->driver; |
487 | else { | 489 | else { |
488 | int i; | 490 | int i; |
489 | for(i=0; i<=PCI_ROM_RESOURCE; i++) | 491 | for(i=0; i<=PCI_ROM_RESOURCE; i++) |
490 | if (dev->resource[i].flags & IORESOURCE_BUSY) | 492 | if (dev->resource[i].flags & IORESOURCE_BUSY) |
491 | return &pci_compat_driver; | 493 | return &pci_compat_driver; |
492 | } | 494 | } |
493 | return NULL; | 495 | return NULL; |
494 | } | 496 | } |
495 | 497 | ||
496 | /** | 498 | /** |
497 | * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure | 499 | * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure |
498 | * @dev: the PCI device structure to match against | 500 | * @dev: the PCI device structure to match against |
499 | * @drv: the device driver to search for matching PCI device id structures | 501 | * @drv: the device driver to search for matching PCI device id structures |
500 | * | 502 | * |
501 | * Used by a driver to check whether a PCI device present in the | 503 | * Used by a driver to check whether a PCI device present in the |
502 | * system is in its list of supported devices. Returns the matching | 504 | * system is in its list of supported devices. Returns the matching |
503 | * pci_device_id structure or %NULL if there is no match. | 505 | * pci_device_id structure or %NULL if there is no match. |
504 | */ | 506 | */ |
505 | static int pci_bus_match(struct device *dev, struct device_driver *drv) | 507 | static int pci_bus_match(struct device *dev, struct device_driver *drv) |
506 | { | 508 | { |
507 | struct pci_dev *pci_dev = to_pci_dev(dev); | 509 | struct pci_dev *pci_dev = to_pci_dev(dev); |
508 | struct pci_driver *pci_drv = to_pci_driver(drv); | 510 | struct pci_driver *pci_drv = to_pci_driver(drv); |
509 | const struct pci_device_id *found_id; | 511 | const struct pci_device_id *found_id; |
510 | 512 | ||
511 | found_id = pci_match_device(pci_drv, pci_dev); | 513 | found_id = pci_match_device(pci_drv, pci_dev); |
512 | if (found_id) | 514 | if (found_id) |
513 | return 1; | 515 | return 1; |
514 | 516 | ||
515 | return 0; | 517 | return 0; |
516 | } | 518 | } |
517 | 519 | ||
518 | /** | 520 | /** |
519 | * pci_dev_get - increments the reference count of the pci device structure | 521 | * pci_dev_get - increments the reference count of the pci device structure |
520 | * @dev: the device being referenced | 522 | * @dev: the device being referenced |
521 | * | 523 | * |
522 | * Each live reference to a device should be refcounted. | 524 | * Each live reference to a device should be refcounted. |
523 | * | 525 | * |
524 | * Drivers for PCI devices should normally record such references in | 526 | * Drivers for PCI devices should normally record such references in |
525 | * their probe() methods, when they bind to a device, and release | 527 | * their probe() methods, when they bind to a device, and release |
526 | * them by calling pci_dev_put(), in their disconnect() methods. | 528 | * them by calling pci_dev_put(), in their disconnect() methods. |
527 | * | 529 | * |
528 | * A pointer to the device with the incremented reference counter is returned. | 530 | * A pointer to the device with the incremented reference counter is returned. |
529 | */ | 531 | */ |
530 | struct pci_dev *pci_dev_get(struct pci_dev *dev) | 532 | struct pci_dev *pci_dev_get(struct pci_dev *dev) |
531 | { | 533 | { |
532 | if (dev) | 534 | if (dev) |
533 | get_device(&dev->dev); | 535 | get_device(&dev->dev); |
534 | return dev; | 536 | return dev; |
535 | } | 537 | } |
536 | 538 | ||
537 | /** | 539 | /** |
538 | * pci_dev_put - release a use of the pci device structure | 540 | * pci_dev_put - release a use of the pci device structure |
539 | * @dev: device that's been disconnected | 541 | * @dev: device that's been disconnected |
540 | * | 542 | * |
541 | * Must be called when a user of a device is finished with it. When the last | 543 | * Must be called when a user of a device is finished with it. When the last |
542 | * user of the device calls this function, the memory of the device is freed. | 544 | * user of the device calls this function, the memory of the device is freed. |
543 | */ | 545 | */ |
544 | void pci_dev_put(struct pci_dev *dev) | 546 | void pci_dev_put(struct pci_dev *dev) |
545 | { | 547 | { |
546 | if (dev) | 548 | if (dev) |
547 | put_device(&dev->dev); | 549 | put_device(&dev->dev); |
548 | } | 550 | } |
549 | 551 | ||
550 | #ifndef CONFIG_HOTPLUG | 552 | #ifndef CONFIG_HOTPLUG |
551 | int pci_uevent(struct device *dev, char **envp, int num_envp, | 553 | int pci_uevent(struct device *dev, char **envp, int num_envp, |
552 | char *buffer, int buffer_size) | 554 | char *buffer, int buffer_size) |
553 | { | 555 | { |
554 | return -ENODEV; | 556 | return -ENODEV; |
555 | } | 557 | } |
556 | #endif | 558 | #endif |
557 | 559 | ||
558 | struct bus_type pci_bus_type = { | 560 | struct bus_type pci_bus_type = { |
559 | .name = "pci", | 561 | .name = "pci", |
560 | .match = pci_bus_match, | 562 | .match = pci_bus_match, |
561 | .uevent = pci_uevent, | 563 | .uevent = pci_uevent, |
562 | .probe = pci_device_probe, | 564 | .probe = pci_device_probe, |
563 | .remove = pci_device_remove, | 565 | .remove = pci_device_remove, |
564 | .suspend = pci_device_suspend, | 566 | .suspend = pci_device_suspend, |
565 | .suspend_late = pci_device_suspend_late, | 567 | .suspend_late = pci_device_suspend_late, |
566 | .resume_early = pci_device_resume_early, | 568 | .resume_early = pci_device_resume_early, |
567 | .resume = pci_device_resume, | 569 | .resume = pci_device_resume, |
568 | .shutdown = pci_device_shutdown, | 570 | .shutdown = pci_device_shutdown, |
569 | .dev_attrs = pci_dev_attrs, | 571 | .dev_attrs = pci_dev_attrs, |
570 | }; | 572 | }; |
571 | 573 | ||
572 | static int __init pci_driver_init(void) | 574 | static int __init pci_driver_init(void) |
573 | { | 575 | { |
574 | return bus_register(&pci_bus_type); | 576 | return bus_register(&pci_bus_type); |
575 | } | 577 | } |
576 | 578 | ||
577 | postcore_initcall(pci_driver_init); | 579 | postcore_initcall(pci_driver_init); |
578 | 580 | ||
579 | EXPORT_SYMBOL(pci_match_id); | 581 | EXPORT_SYMBOL(pci_match_id); |
580 | EXPORT_SYMBOL(pci_match_device); | 582 | EXPORT_SYMBOL(pci_match_device); |
581 | EXPORT_SYMBOL(__pci_register_driver); | 583 | EXPORT_SYMBOL(__pci_register_driver); |
582 | EXPORT_SYMBOL(pci_unregister_driver); | 584 | EXPORT_SYMBOL(pci_unregister_driver); |
583 | EXPORT_SYMBOL(pci_dev_driver); | 585 | EXPORT_SYMBOL(pci_dev_driver); |
584 | EXPORT_SYMBOL(pci_bus_type); | 586 | EXPORT_SYMBOL(pci_bus_type); |
585 | EXPORT_SYMBOL(pci_dev_get); | 587 | EXPORT_SYMBOL(pci_dev_get); |
586 | EXPORT_SYMBOL(pci_dev_put); | 588 | EXPORT_SYMBOL(pci_dev_put); |
587 | 589 |
drivers/pci/quirks.c
1 | /* | 1 | /* |
2 | * This file contains work-arounds for many known PCI hardware | 2 | * This file contains work-arounds for many known PCI hardware |
3 | * bugs. Devices present only on certain architectures (host | 3 | * bugs. Devices present only on certain architectures (host |
4 | * bridges et cetera) should be handled in arch-specific code. | 4 | * bridges et cetera) should be handled in arch-specific code. |
5 | * | 5 | * |
6 | * Note: any quirks for hotpluggable devices must _NOT_ be declared __init. | 6 | * Note: any quirks for hotpluggable devices must _NOT_ be declared __init. |
7 | * | 7 | * |
8 | * Copyright (c) 1999 Martin Mares <mj@ucw.cz> | 8 | * Copyright (c) 1999 Martin Mares <mj@ucw.cz> |
9 | * | 9 | * |
10 | * Init/reset quirks for USB host controllers should be in the | 10 | * Init/reset quirks for USB host controllers should be in the |
11 | * USB quirks file, where their drivers can access reuse it. | 11 | * USB quirks file, where their drivers can access reuse it. |
12 | * | 12 | * |
13 | * The bridge optimization stuff has been removed. If you really | 13 | * The bridge optimization stuff has been removed. If you really |
14 | * have a silly BIOS which is unable to set your host bridge right, | 14 | * have a silly BIOS which is unable to set your host bridge right, |
15 | * use the PowerTweak utility (see http://powertweak.sourceforge.net). | 15 | * use the PowerTweak utility (see http://powertweak.sourceforge.net). |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
20 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
23 | #include <linux/acpi.h> | 23 | #include <linux/acpi.h> |
24 | #include "pci.h" | 24 | #include "pci.h" |
25 | 25 | ||
26 | /* The Mellanox Tavor device gives false positive parity errors | 26 | /* The Mellanox Tavor device gives false positive parity errors |
27 | * Mark this device with a broken_parity_status, to allow | 27 | * Mark this device with a broken_parity_status, to allow |
28 | * PCI scanning code to "skip" this now blacklisted device. | 28 | * PCI scanning code to "skip" this now blacklisted device. |
29 | */ | 29 | */ |
30 | static void __devinit quirk_mellanox_tavor(struct pci_dev *dev) | 30 | static void __devinit quirk_mellanox_tavor(struct pci_dev *dev) |
31 | { | 31 | { |
32 | dev->broken_parity_status = 1; /* This device gives false positives */ | 32 | dev->broken_parity_status = 1; /* This device gives false positives */ |
33 | } | 33 | } |
34 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor); | 34 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor); |
35 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor); | 35 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor); |
36 | 36 | ||
37 | /* Deal with broken BIOS'es that neglect to enable passive release, | 37 | /* Deal with broken BIOS'es that neglect to enable passive release, |
38 | which can cause problems in combination with the 82441FX/PPro MTRRs */ | 38 | which can cause problems in combination with the 82441FX/PPro MTRRs */ |
39 | static void __devinit quirk_passive_release(struct pci_dev *dev) | 39 | static void quirk_passive_release(struct pci_dev *dev) |
40 | { | 40 | { |
41 | struct pci_dev *d = NULL; | 41 | struct pci_dev *d = NULL; |
42 | unsigned char dlc; | 42 | unsigned char dlc; |
43 | 43 | ||
44 | /* We have to make sure a particular bit is set in the PIIX3 | 44 | /* We have to make sure a particular bit is set in the PIIX3 |
45 | ISA bridge, so we have to go out and find it. */ | 45 | ISA bridge, so we have to go out and find it. */ |
46 | while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) { | 46 | while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) { |
47 | pci_read_config_byte(d, 0x82, &dlc); | 47 | pci_read_config_byte(d, 0x82, &dlc); |
48 | if (!(dlc & 1<<1)) { | 48 | if (!(dlc & 1<<1)) { |
49 | printk(KERN_ERR "PCI: PIIX3: Enabling Passive Release on %s\n", pci_name(d)); | 49 | printk(KERN_ERR "PCI: PIIX3: Enabling Passive Release on %s\n", pci_name(d)); |
50 | dlc |= 1<<1; | 50 | dlc |= 1<<1; |
51 | pci_write_config_byte(d, 0x82, dlc); | 51 | pci_write_config_byte(d, 0x82, dlc); |
52 | } | 52 | } |
53 | } | 53 | } |
54 | } | 54 | } |
55 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release ); | 55 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release ); |
56 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release ); | ||
56 | 57 | ||
57 | /* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround | 58 | /* The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a workaround |
58 | but VIA don't answer queries. If you happen to have good contacts at VIA | 59 | but VIA don't answer queries. If you happen to have good contacts at VIA |
59 | ask them for me please -- Alan | 60 | ask them for me please -- Alan |
60 | 61 | ||
61 | This appears to be BIOS not version dependent. So presumably there is a | 62 | This appears to be BIOS not version dependent. So presumably there is a |
62 | chipset level fix */ | 63 | chipset level fix */ |
63 | int isa_dma_bridge_buggy; /* Exported */ | 64 | int isa_dma_bridge_buggy; /* Exported */ |
64 | 65 | ||
65 | static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev) | 66 | static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev) |
66 | { | 67 | { |
67 | if (!isa_dma_bridge_buggy) { | 68 | if (!isa_dma_bridge_buggy) { |
68 | isa_dma_bridge_buggy=1; | 69 | isa_dma_bridge_buggy=1; |
69 | printk(KERN_INFO "Activating ISA DMA hang workarounds.\n"); | 70 | printk(KERN_INFO "Activating ISA DMA hang workarounds.\n"); |
70 | } | 71 | } |
71 | } | 72 | } |
72 | /* | 73 | /* |
73 | * Its not totally clear which chipsets are the problematic ones | 74 | * Its not totally clear which chipsets are the problematic ones |
74 | * We know 82C586 and 82C596 variants are affected. | 75 | * We know 82C586 and 82C596 variants are affected. |
75 | */ | 76 | */ |
76 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs ); | 77 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs ); |
77 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs ); | 78 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs ); |
78 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs ); | 79 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs ); |
79 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs ); | 80 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs ); |
80 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs ); | 81 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs ); |
81 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs ); | 82 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs ); |
82 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs ); | 83 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs ); |
83 | 84 | ||
84 | int pci_pci_problems; | 85 | int pci_pci_problems; |
85 | 86 | ||
86 | /* | 87 | /* |
87 | * Chipsets where PCI->PCI transfers vanish or hang | 88 | * Chipsets where PCI->PCI transfers vanish or hang |
88 | */ | 89 | */ |
89 | static void __devinit quirk_nopcipci(struct pci_dev *dev) | 90 | static void __devinit quirk_nopcipci(struct pci_dev *dev) |
90 | { | 91 | { |
91 | if ((pci_pci_problems & PCIPCI_FAIL)==0) { | 92 | if ((pci_pci_problems & PCIPCI_FAIL)==0) { |
92 | printk(KERN_INFO "Disabling direct PCI/PCI transfers.\n"); | 93 | printk(KERN_INFO "Disabling direct PCI/PCI transfers.\n"); |
93 | pci_pci_problems |= PCIPCI_FAIL; | 94 | pci_pci_problems |= PCIPCI_FAIL; |
94 | } | 95 | } |
95 | } | 96 | } |
96 | 97 | ||
97 | static void __devinit quirk_nopciamd(struct pci_dev *dev) | 98 | static void __devinit quirk_nopciamd(struct pci_dev *dev) |
98 | { | 99 | { |
99 | u8 rev; | 100 | u8 rev; |
100 | pci_read_config_byte(dev, 0x08, &rev); | 101 | pci_read_config_byte(dev, 0x08, &rev); |
101 | if (rev == 0x13) { | 102 | if (rev == 0x13) { |
102 | /* Erratum 24 */ | 103 | /* Erratum 24 */ |
103 | printk(KERN_INFO "Chipset erratum: Disabling direct PCI/AGP transfers.\n"); | 104 | printk(KERN_INFO "Chipset erratum: Disabling direct PCI/AGP transfers.\n"); |
104 | pci_pci_problems |= PCIAGP_FAIL; | 105 | pci_pci_problems |= PCIAGP_FAIL; |
105 | } | 106 | } |
106 | } | 107 | } |
107 | 108 | ||
108 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci ); | 109 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci ); |
109 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci ); | 110 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci ); |
110 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd ); | 111 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd ); |
111 | 112 | ||
112 | /* | 113 | /* |
113 | * Triton requires workarounds to be used by the drivers | 114 | * Triton requires workarounds to be used by the drivers |
114 | */ | 115 | */ |
115 | static void __devinit quirk_triton(struct pci_dev *dev) | 116 | static void __devinit quirk_triton(struct pci_dev *dev) |
116 | { | 117 | { |
117 | if ((pci_pci_problems&PCIPCI_TRITON)==0) { | 118 | if ((pci_pci_problems&PCIPCI_TRITON)==0) { |
118 | printk(KERN_INFO "Limiting direct PCI/PCI transfers.\n"); | 119 | printk(KERN_INFO "Limiting direct PCI/PCI transfers.\n"); |
119 | pci_pci_problems |= PCIPCI_TRITON; | 120 | pci_pci_problems |= PCIPCI_TRITON; |
120 | } | 121 | } |
121 | } | 122 | } |
122 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton ); | 123 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton ); |
123 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton ); | 124 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton ); |
124 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton ); | 125 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton ); |
125 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton ); | 126 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton ); |
126 | 127 | ||
127 | /* | 128 | /* |
128 | * VIA Apollo KT133 needs PCI latency patch | 129 | * VIA Apollo KT133 needs PCI latency patch |
129 | * Made according to a windows driver based patch by George E. Breese | 130 | * Made according to a windows driver based patch by George E. Breese |
130 | * see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm | 131 | * see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm |
131 | * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for | 132 | * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for |
132 | * the info on which Mr Breese based his work. | 133 | * the info on which Mr Breese based his work. |
133 | * | 134 | * |
134 | * Updated based on further information from the site and also on | 135 | * Updated based on further information from the site and also on |
135 | * information provided by VIA | 136 | * information provided by VIA |
136 | */ | 137 | */ |
137 | static void __devinit quirk_vialatency(struct pci_dev *dev) | 138 | static void quirk_vialatency(struct pci_dev *dev) |
138 | { | 139 | { |
139 | struct pci_dev *p; | 140 | struct pci_dev *p; |
140 | u8 rev; | 141 | u8 rev; |
141 | u8 busarb; | 142 | u8 busarb; |
142 | /* Ok we have a potential problem chipset here. Now see if we have | 143 | /* Ok we have a potential problem chipset here. Now see if we have |
143 | a buggy southbridge */ | 144 | a buggy southbridge */ |
144 | 145 | ||
145 | p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL); | 146 | p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL); |
146 | if (p!=NULL) { | 147 | if (p!=NULL) { |
147 | pci_read_config_byte(p, PCI_CLASS_REVISION, &rev); | 148 | pci_read_config_byte(p, PCI_CLASS_REVISION, &rev); |
148 | /* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */ | 149 | /* 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; thanks Dan Hollis */ |
149 | /* Check for buggy part revisions */ | 150 | /* Check for buggy part revisions */ |
150 | if (rev < 0x40 || rev > 0x42) | 151 | if (rev < 0x40 || rev > 0x42) |
151 | goto exit; | 152 | goto exit; |
152 | } else { | 153 | } else { |
153 | p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL); | 154 | p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL); |
154 | if (p==NULL) /* No problem parts */ | 155 | if (p==NULL) /* No problem parts */ |
155 | goto exit; | 156 | goto exit; |
156 | pci_read_config_byte(p, PCI_CLASS_REVISION, &rev); | 157 | pci_read_config_byte(p, PCI_CLASS_REVISION, &rev); |
157 | /* Check for buggy part revisions */ | 158 | /* Check for buggy part revisions */ |
158 | if (rev < 0x10 || rev > 0x12) | 159 | if (rev < 0x10 || rev > 0x12) |
159 | goto exit; | 160 | goto exit; |
160 | } | 161 | } |
161 | 162 | ||
162 | /* | 163 | /* |
163 | * Ok we have the problem. Now set the PCI master grant to | 164 | * Ok we have the problem. Now set the PCI master grant to |
164 | * occur every master grant. The apparent bug is that under high | 165 | * occur every master grant. The apparent bug is that under high |
165 | * PCI load (quite common in Linux of course) you can get data | 166 | * PCI load (quite common in Linux of course) you can get data |
166 | * loss when the CPU is held off the bus for 3 bus master requests | 167 | * loss when the CPU is held off the bus for 3 bus master requests |
167 | * This happens to include the IDE controllers.... | 168 | * This happens to include the IDE controllers.... |
168 | * | 169 | * |
169 | * VIA only apply this fix when an SB Live! is present but under | 170 | * VIA only apply this fix when an SB Live! is present but under |
170 | * both Linux and Windows this isnt enough, and we have seen | 171 | * both Linux and Windows this isnt enough, and we have seen |
171 | * corruption without SB Live! but with things like 3 UDMA IDE | 172 | * corruption without SB Live! but with things like 3 UDMA IDE |
172 | * controllers. So we ignore that bit of the VIA recommendation.. | 173 | * controllers. So we ignore that bit of the VIA recommendation.. |
173 | */ | 174 | */ |
174 | 175 | ||
175 | pci_read_config_byte(dev, 0x76, &busarb); | 176 | pci_read_config_byte(dev, 0x76, &busarb); |
176 | /* Set bit 4 and bi 5 of byte 76 to 0x01 | 177 | /* Set bit 4 and bi 5 of byte 76 to 0x01 |
177 | "Master priority rotation on every PCI master grant */ | 178 | "Master priority rotation on every PCI master grant */ |
178 | busarb &= ~(1<<5); | 179 | busarb &= ~(1<<5); |
179 | busarb |= (1<<4); | 180 | busarb |= (1<<4); |
180 | pci_write_config_byte(dev, 0x76, busarb); | 181 | pci_write_config_byte(dev, 0x76, busarb); |
181 | printk(KERN_INFO "Applying VIA southbridge workaround.\n"); | 182 | printk(KERN_INFO "Applying VIA southbridge workaround.\n"); |
182 | exit: | 183 | exit: |
183 | pci_dev_put(p); | 184 | pci_dev_put(p); |
184 | } | 185 | } |
185 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency ); | 186 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency ); |
186 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency ); | 187 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency ); |
187 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency ); | 188 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency ); |
189 | /* Must restore this on a resume from RAM */ | ||
190 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency ); | ||
191 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency ); | ||
192 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency ); | ||
188 | 193 | ||
189 | /* | 194 | /* |
190 | * VIA Apollo VP3 needs ETBF on BT848/878 | 195 | * VIA Apollo VP3 needs ETBF on BT848/878 |
191 | */ | 196 | */ |
192 | static void __devinit quirk_viaetbf(struct pci_dev *dev) | 197 | static void __devinit quirk_viaetbf(struct pci_dev *dev) |
193 | { | 198 | { |
194 | if ((pci_pci_problems&PCIPCI_VIAETBF)==0) { | 199 | if ((pci_pci_problems&PCIPCI_VIAETBF)==0) { |
195 | printk(KERN_INFO "Limiting direct PCI/PCI transfers.\n"); | 200 | printk(KERN_INFO "Limiting direct PCI/PCI transfers.\n"); |
196 | pci_pci_problems |= PCIPCI_VIAETBF; | 201 | pci_pci_problems |= PCIPCI_VIAETBF; |
197 | } | 202 | } |
198 | } | 203 | } |
199 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_viaetbf ); | 204 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_viaetbf ); |
200 | 205 | ||
201 | static void __devinit quirk_vsfx(struct pci_dev *dev) | 206 | static void __devinit quirk_vsfx(struct pci_dev *dev) |
202 | { | 207 | { |
203 | if ((pci_pci_problems&PCIPCI_VSFX)==0) { | 208 | if ((pci_pci_problems&PCIPCI_VSFX)==0) { |
204 | printk(KERN_INFO "Limiting direct PCI/PCI transfers.\n"); | 209 | printk(KERN_INFO "Limiting direct PCI/PCI transfers.\n"); |
205 | pci_pci_problems |= PCIPCI_VSFX; | 210 | pci_pci_problems |= PCIPCI_VSFX; |
206 | } | 211 | } |
207 | } | 212 | } |
208 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx ); | 213 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx ); |
209 | 214 | ||
210 | /* | 215 | /* |
211 | * Ali Magik requires workarounds to be used by the drivers | 216 | * Ali Magik requires workarounds to be used by the drivers |
212 | * that DMA to AGP space. Latency must be set to 0xA and triton | 217 | * that DMA to AGP space. Latency must be set to 0xA and triton |
213 | * workaround applied too | 218 | * workaround applied too |
214 | * [Info kindly provided by ALi] | 219 | * [Info kindly provided by ALi] |
215 | */ | 220 | */ |
216 | static void __init quirk_alimagik(struct pci_dev *dev) | 221 | static void __init quirk_alimagik(struct pci_dev *dev) |
217 | { | 222 | { |
218 | if ((pci_pci_problems&PCIPCI_ALIMAGIK)==0) { | 223 | if ((pci_pci_problems&PCIPCI_ALIMAGIK)==0) { |
219 | printk(KERN_INFO "Limiting direct PCI/PCI transfers.\n"); | 224 | printk(KERN_INFO "Limiting direct PCI/PCI transfers.\n"); |
220 | pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON; | 225 | pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON; |
221 | } | 226 | } |
222 | } | 227 | } |
223 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik ); | 228 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik ); |
224 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik ); | 229 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik ); |
225 | 230 | ||
226 | /* | 231 | /* |
227 | * Natoma has some interesting boundary conditions with Zoran stuff | 232 | * Natoma has some interesting boundary conditions with Zoran stuff |
228 | * at least | 233 | * at least |
229 | */ | 234 | */ |
230 | static void __devinit quirk_natoma(struct pci_dev *dev) | 235 | static void __devinit quirk_natoma(struct pci_dev *dev) |
231 | { | 236 | { |
232 | if ((pci_pci_problems&PCIPCI_NATOMA)==0) { | 237 | if ((pci_pci_problems&PCIPCI_NATOMA)==0) { |
233 | printk(KERN_INFO "Limiting direct PCI/PCI transfers.\n"); | 238 | printk(KERN_INFO "Limiting direct PCI/PCI transfers.\n"); |
234 | pci_pci_problems |= PCIPCI_NATOMA; | 239 | pci_pci_problems |= PCIPCI_NATOMA; |
235 | } | 240 | } |
236 | } | 241 | } |
237 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma ); | 242 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma ); |
238 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma ); | 243 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma ); |
239 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma ); | 244 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma ); |
240 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma ); | 245 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma ); |
241 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma ); | 246 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma ); |
242 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma ); | 247 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma ); |
243 | 248 | ||
244 | /* | 249 | /* |
245 | * This chip can cause PCI parity errors if config register 0xA0 is read | 250 | * This chip can cause PCI parity errors if config register 0xA0 is read |
246 | * while DMAs are occurring. | 251 | * while DMAs are occurring. |
247 | */ | 252 | */ |
248 | static void __devinit quirk_citrine(struct pci_dev *dev) | 253 | static void __devinit quirk_citrine(struct pci_dev *dev) |
249 | { | 254 | { |
250 | dev->cfg_size = 0xA0; | 255 | dev->cfg_size = 0xA0; |
251 | } | 256 | } |
252 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine ); | 257 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine ); |
253 | 258 | ||
254 | /* | 259 | /* |
255 | * S3 868 and 968 chips report region size equal to 32M, but they decode 64M. | 260 | * S3 868 and 968 chips report region size equal to 32M, but they decode 64M. |
256 | * If it's needed, re-allocate the region. | 261 | * If it's needed, re-allocate the region. |
257 | */ | 262 | */ |
258 | static void __devinit quirk_s3_64M(struct pci_dev *dev) | 263 | static void __devinit quirk_s3_64M(struct pci_dev *dev) |
259 | { | 264 | { |
260 | struct resource *r = &dev->resource[0]; | 265 | struct resource *r = &dev->resource[0]; |
261 | 266 | ||
262 | if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) { | 267 | if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) { |
263 | r->start = 0; | 268 | r->start = 0; |
264 | r->end = 0x3ffffff; | 269 | r->end = 0x3ffffff; |
265 | } | 270 | } |
266 | } | 271 | } |
267 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M ); | 272 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M ); |
268 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M ); | 273 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M ); |
269 | 274 | ||
270 | static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, | 275 | static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region, |
271 | unsigned size, int nr, const char *name) | 276 | unsigned size, int nr, const char *name) |
272 | { | 277 | { |
273 | region &= ~(size-1); | 278 | region &= ~(size-1); |
274 | if (region) { | 279 | if (region) { |
275 | struct pci_bus_region bus_region; | 280 | struct pci_bus_region bus_region; |
276 | struct resource *res = dev->resource + nr; | 281 | struct resource *res = dev->resource + nr; |
277 | 282 | ||
278 | res->name = pci_name(dev); | 283 | res->name = pci_name(dev); |
279 | res->start = region; | 284 | res->start = region; |
280 | res->end = region + size - 1; | 285 | res->end = region + size - 1; |
281 | res->flags = IORESOURCE_IO; | 286 | res->flags = IORESOURCE_IO; |
282 | 287 | ||
283 | /* Convert from PCI bus to resource space. */ | 288 | /* Convert from PCI bus to resource space. */ |
284 | bus_region.start = res->start; | 289 | bus_region.start = res->start; |
285 | bus_region.end = res->end; | 290 | bus_region.end = res->end; |
286 | pcibios_bus_to_resource(dev, res, &bus_region); | 291 | pcibios_bus_to_resource(dev, res, &bus_region); |
287 | 292 | ||
288 | pci_claim_resource(dev, nr); | 293 | pci_claim_resource(dev, nr); |
289 | printk("PCI quirk: region %04x-%04x claimed by %s\n", region, region + size - 1, name); | 294 | printk("PCI quirk: region %04x-%04x claimed by %s\n", region, region + size - 1, name); |
290 | } | 295 | } |
291 | } | 296 | } |
292 | 297 | ||
293 | /* | 298 | /* |
294 | * ATI Northbridge setups MCE the processor if you even | 299 | * ATI Northbridge setups MCE the processor if you even |
295 | * read somewhere between 0x3b0->0x3bb or read 0x3d3 | 300 | * read somewhere between 0x3b0->0x3bb or read 0x3d3 |
296 | */ | 301 | */ |
297 | static void __devinit quirk_ati_exploding_mce(struct pci_dev *dev) | 302 | static void __devinit quirk_ati_exploding_mce(struct pci_dev *dev) |
298 | { | 303 | { |
299 | printk(KERN_INFO "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb.\n"); | 304 | printk(KERN_INFO "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb.\n"); |
300 | /* Mae rhaid i ni beidio ag edrych ar y lleoliadiau I/O hyn */ | 305 | /* Mae rhaid i ni beidio ag edrych ar y lleoliadiau I/O hyn */ |
301 | request_region(0x3b0, 0x0C, "RadeonIGP"); | 306 | request_region(0x3b0, 0x0C, "RadeonIGP"); |
302 | request_region(0x3d3, 0x01, "RadeonIGP"); | 307 | request_region(0x3d3, 0x01, "RadeonIGP"); |
303 | } | 308 | } |
304 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_exploding_mce ); | 309 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_exploding_mce ); |
305 | 310 | ||
306 | /* | 311 | /* |
307 | * Let's make the southbridge information explicit instead | 312 | * Let's make the southbridge information explicit instead |
308 | * of having to worry about people probing the ACPI areas, | 313 | * of having to worry about people probing the ACPI areas, |
309 | * for example.. (Yes, it happens, and if you read the wrong | 314 | * for example.. (Yes, it happens, and if you read the wrong |
310 | * ACPI register it will put the machine to sleep with no | 315 | * ACPI register it will put the machine to sleep with no |
311 | * way of waking it up again. Bummer). | 316 | * way of waking it up again. Bummer). |
312 | * | 317 | * |
313 | * ALI M7101: Two IO regions pointed to by words at | 318 | * ALI M7101: Two IO regions pointed to by words at |
314 | * 0xE0 (64 bytes of ACPI registers) | 319 | * 0xE0 (64 bytes of ACPI registers) |
315 | * 0xE2 (32 bytes of SMB registers) | 320 | * 0xE2 (32 bytes of SMB registers) |
316 | */ | 321 | */ |
317 | static void __devinit quirk_ali7101_acpi(struct pci_dev *dev) | 322 | static void __devinit quirk_ali7101_acpi(struct pci_dev *dev) |
318 | { | 323 | { |
319 | u16 region; | 324 | u16 region; |
320 | 325 | ||
321 | pci_read_config_word(dev, 0xE0, ®ion); | 326 | pci_read_config_word(dev, 0xE0, ®ion); |
322 | quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI"); | 327 | quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI"); |
323 | pci_read_config_word(dev, 0xE2, ®ion); | 328 | pci_read_config_word(dev, 0xE2, ®ion); |
324 | quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB"); | 329 | quirk_io_region(dev, region, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB"); |
325 | } | 330 | } |
326 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101_acpi ); | 331 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101_acpi ); |
327 | 332 | ||
328 | static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable) | 333 | static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable) |
329 | { | 334 | { |
330 | u32 devres; | 335 | u32 devres; |
331 | u32 mask, size, base; | 336 | u32 mask, size, base; |
332 | 337 | ||
333 | pci_read_config_dword(dev, port, &devres); | 338 | pci_read_config_dword(dev, port, &devres); |
334 | if ((devres & enable) != enable) | 339 | if ((devres & enable) != enable) |
335 | return; | 340 | return; |
336 | mask = (devres >> 16) & 15; | 341 | mask = (devres >> 16) & 15; |
337 | base = devres & 0xffff; | 342 | base = devres & 0xffff; |
338 | size = 16; | 343 | size = 16; |
339 | for (;;) { | 344 | for (;;) { |
340 | unsigned bit = size >> 1; | 345 | unsigned bit = size >> 1; |
341 | if ((bit & mask) == bit) | 346 | if ((bit & mask) == bit) |
342 | break; | 347 | break; |
343 | size = bit; | 348 | size = bit; |
344 | } | 349 | } |
345 | /* | 350 | /* |
346 | * For now we only print it out. Eventually we'll want to | 351 | * For now we only print it out. Eventually we'll want to |
347 | * reserve it (at least if it's in the 0x1000+ range), but | 352 | * reserve it (at least if it's in the 0x1000+ range), but |
348 | * let's get enough confirmation reports first. | 353 | * let's get enough confirmation reports first. |
349 | */ | 354 | */ |
350 | base &= -size; | 355 | base &= -size; |
351 | printk("%s PIO at %04x-%04x\n", name, base, base + size - 1); | 356 | printk("%s PIO at %04x-%04x\n", name, base, base + size - 1); |
352 | } | 357 | } |
353 | 358 | ||
354 | static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable) | 359 | static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable) |
355 | { | 360 | { |
356 | u32 devres; | 361 | u32 devres; |
357 | u32 mask, size, base; | 362 | u32 mask, size, base; |
358 | 363 | ||
359 | pci_read_config_dword(dev, port, &devres); | 364 | pci_read_config_dword(dev, port, &devres); |
360 | if ((devres & enable) != enable) | 365 | if ((devres & enable) != enable) |
361 | return; | 366 | return; |
362 | base = devres & 0xffff0000; | 367 | base = devres & 0xffff0000; |
363 | mask = (devres & 0x3f) << 16; | 368 | mask = (devres & 0x3f) << 16; |
364 | size = 128 << 16; | 369 | size = 128 << 16; |
365 | for (;;) { | 370 | for (;;) { |
366 | unsigned bit = size >> 1; | 371 | unsigned bit = size >> 1; |
367 | if ((bit & mask) == bit) | 372 | if ((bit & mask) == bit) |
368 | break; | 373 | break; |
369 | size = bit; | 374 | size = bit; |
370 | } | 375 | } |
371 | /* | 376 | /* |
372 | * For now we only print it out. Eventually we'll want to | 377 | * For now we only print it out. Eventually we'll want to |
373 | * reserve it, but let's get enough confirmation reports first. | 378 | * reserve it, but let's get enough confirmation reports first. |
374 | */ | 379 | */ |
375 | base &= -size; | 380 | base &= -size; |
376 | printk("%s MMIO at %04x-%04x\n", name, base, base + size - 1); | 381 | printk("%s MMIO at %04x-%04x\n", name, base, base + size - 1); |
377 | } | 382 | } |
378 | 383 | ||
379 | /* | 384 | /* |
380 | * PIIX4 ACPI: Two IO regions pointed to by longwords at | 385 | * PIIX4 ACPI: Two IO regions pointed to by longwords at |
381 | * 0x40 (64 bytes of ACPI registers) | 386 | * 0x40 (64 bytes of ACPI registers) |
382 | * 0x90 (16 bytes of SMB registers) | 387 | * 0x90 (16 bytes of SMB registers) |
383 | * and a few strange programmable PIIX4 device resources. | 388 | * and a few strange programmable PIIX4 device resources. |
384 | */ | 389 | */ |
385 | static void __devinit quirk_piix4_acpi(struct pci_dev *dev) | 390 | static void __devinit quirk_piix4_acpi(struct pci_dev *dev) |
386 | { | 391 | { |
387 | u32 region, res_a; | 392 | u32 region, res_a; |
388 | 393 | ||
389 | pci_read_config_dword(dev, 0x40, ®ion); | 394 | pci_read_config_dword(dev, 0x40, ®ion); |
390 | quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI"); | 395 | quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI"); |
391 | pci_read_config_dword(dev, 0x90, ®ion); | 396 | pci_read_config_dword(dev, 0x90, ®ion); |
392 | quirk_io_region(dev, region, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB"); | 397 | quirk_io_region(dev, region, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB"); |
393 | 398 | ||
394 | /* Device resource A has enables for some of the other ones */ | 399 | /* Device resource A has enables for some of the other ones */ |
395 | pci_read_config_dword(dev, 0x5c, &res_a); | 400 | pci_read_config_dword(dev, 0x5c, &res_a); |
396 | 401 | ||
397 | piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21); | 402 | piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21); |
398 | piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21); | 403 | piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21); |
399 | 404 | ||
400 | /* Device resource D is just bitfields for static resources */ | 405 | /* Device resource D is just bitfields for static resources */ |
401 | 406 | ||
402 | /* Device 12 enabled? */ | 407 | /* Device 12 enabled? */ |
403 | if (res_a & (1 << 29)) { | 408 | if (res_a & (1 << 29)) { |
404 | piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20); | 409 | piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20); |
405 | piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7); | 410 | piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7); |
406 | } | 411 | } |
407 | /* Device 13 enabled? */ | 412 | /* Device 13 enabled? */ |
408 | if (res_a & (1 << 30)) { | 413 | if (res_a & (1 << 30)) { |
409 | piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20); | 414 | piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20); |
410 | piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7); | 415 | piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7); |
411 | } | 416 | } |
412 | piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20); | 417 | piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20); |
413 | piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20); | 418 | piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20); |
414 | } | 419 | } |
415 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi ); | 420 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi ); |
416 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi ); | 421 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi ); |
417 | 422 | ||
418 | /* | 423 | /* |
419 | * ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at | 424 | * ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at |
420 | * 0x40 (128 bytes of ACPI, GPIO & TCO registers) | 425 | * 0x40 (128 bytes of ACPI, GPIO & TCO registers) |
421 | * 0x58 (64 bytes of GPIO I/O space) | 426 | * 0x58 (64 bytes of GPIO I/O space) |
422 | */ | 427 | */ |
423 | static void __devinit quirk_ich4_lpc_acpi(struct pci_dev *dev) | 428 | static void __devinit quirk_ich4_lpc_acpi(struct pci_dev *dev) |
424 | { | 429 | { |
425 | u32 region; | 430 | u32 region; |
426 | 431 | ||
427 | pci_read_config_dword(dev, 0x40, ®ion); | 432 | pci_read_config_dword(dev, 0x40, ®ion); |
428 | quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH4 ACPI/GPIO/TCO"); | 433 | quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH4 ACPI/GPIO/TCO"); |
429 | 434 | ||
430 | pci_read_config_dword(dev, 0x58, ®ion); | 435 | pci_read_config_dword(dev, 0x58, ®ion); |
431 | quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH4 GPIO"); | 436 | quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH4 GPIO"); |
432 | } | 437 | } |
433 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi ); | 438 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi ); |
434 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi ); | 439 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi ); |
435 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, quirk_ich4_lpc_acpi ); | 440 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, quirk_ich4_lpc_acpi ); |
436 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_10, quirk_ich4_lpc_acpi ); | 441 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_10, quirk_ich4_lpc_acpi ); |
437 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, quirk_ich4_lpc_acpi ); | 442 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, quirk_ich4_lpc_acpi ); |
438 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, quirk_ich4_lpc_acpi ); | 443 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, quirk_ich4_lpc_acpi ); |
439 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, quirk_ich4_lpc_acpi ); | 444 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, quirk_ich4_lpc_acpi ); |
440 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, quirk_ich4_lpc_acpi ); | 445 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, quirk_ich4_lpc_acpi ); |
441 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi ); | 446 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi ); |
442 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi ); | 447 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi ); |
443 | 448 | ||
444 | static void __devinit quirk_ich6_lpc_acpi(struct pci_dev *dev) | 449 | static void __devinit quirk_ich6_lpc_acpi(struct pci_dev *dev) |
445 | { | 450 | { |
446 | u32 region; | 451 | u32 region; |
447 | 452 | ||
448 | pci_read_config_dword(dev, 0x40, ®ion); | 453 | pci_read_config_dword(dev, 0x40, ®ion); |
449 | quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH6 ACPI/GPIO/TCO"); | 454 | quirk_io_region(dev, region, 128, PCI_BRIDGE_RESOURCES, "ICH6 ACPI/GPIO/TCO"); |
450 | 455 | ||
451 | pci_read_config_dword(dev, 0x48, ®ion); | 456 | pci_read_config_dword(dev, 0x48, ®ion); |
452 | quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO"); | 457 | quirk_io_region(dev, region, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO"); |
453 | } | 458 | } |
454 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc_acpi ); | 459 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc_acpi ); |
455 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc_acpi ); | 460 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc_acpi ); |
456 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich6_lpc_acpi ); | 461 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich6_lpc_acpi ); |
457 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich6_lpc_acpi ); | 462 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich6_lpc_acpi ); |
458 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich6_lpc_acpi ); | 463 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich6_lpc_acpi ); |
459 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich6_lpc_acpi ); | 464 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich6_lpc_acpi ); |
460 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich6_lpc_acpi ); | 465 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich6_lpc_acpi ); |
461 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich6_lpc_acpi ); | 466 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich6_lpc_acpi ); |
462 | 467 | ||
463 | /* | 468 | /* |
464 | * VIA ACPI: One IO region pointed to by longword at | 469 | * VIA ACPI: One IO region pointed to by longword at |
465 | * 0x48 or 0x20 (256 bytes of ACPI registers) | 470 | * 0x48 or 0x20 (256 bytes of ACPI registers) |
466 | */ | 471 | */ |
467 | static void __devinit quirk_vt82c586_acpi(struct pci_dev *dev) | 472 | static void __devinit quirk_vt82c586_acpi(struct pci_dev *dev) |
468 | { | 473 | { |
469 | u8 rev; | 474 | u8 rev; |
470 | u32 region; | 475 | u32 region; |
471 | 476 | ||
472 | pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); | 477 | pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev); |
473 | if (rev & 0x10) { | 478 | if (rev & 0x10) { |
474 | pci_read_config_dword(dev, 0x48, ®ion); | 479 | pci_read_config_dword(dev, 0x48, ®ion); |
475 | region &= PCI_BASE_ADDRESS_IO_MASK; | 480 | region &= PCI_BASE_ADDRESS_IO_MASK; |
476 | quirk_io_region(dev, region, 256, PCI_BRIDGE_RESOURCES, "vt82c586 ACPI"); | 481 | quirk_io_region(dev, region, 256, PCI_BRIDGE_RESOURCES, "vt82c586 ACPI"); |
477 | } | 482 | } |
478 | } | 483 | } |
479 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt82c586_acpi ); | 484 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt82c586_acpi ); |
480 | 485 | ||
481 | /* | 486 | /* |
482 | * VIA VT82C686 ACPI: Three IO region pointed to by (long)words at | 487 | * VIA VT82C686 ACPI: Three IO region pointed to by (long)words at |
483 | * 0x48 (256 bytes of ACPI registers) | 488 | * 0x48 (256 bytes of ACPI registers) |
484 | * 0x70 (128 bytes of hardware monitoring register) | 489 | * 0x70 (128 bytes of hardware monitoring register) |
485 | * 0x90 (16 bytes of SMB registers) | 490 | * 0x90 (16 bytes of SMB registers) |
486 | */ | 491 | */ |
487 | static void __devinit quirk_vt82c686_acpi(struct pci_dev *dev) | 492 | static void __devinit quirk_vt82c686_acpi(struct pci_dev *dev) |
488 | { | 493 | { |
489 | u16 hm; | 494 | u16 hm; |
490 | u32 smb; | 495 | u32 smb; |
491 | 496 | ||
492 | quirk_vt82c586_acpi(dev); | 497 | quirk_vt82c586_acpi(dev); |
493 | 498 | ||
494 | pci_read_config_word(dev, 0x70, &hm); | 499 | pci_read_config_word(dev, 0x70, &hm); |
495 | hm &= PCI_BASE_ADDRESS_IO_MASK; | 500 | hm &= PCI_BASE_ADDRESS_IO_MASK; |
496 | quirk_io_region(dev, hm, 128, PCI_BRIDGE_RESOURCES + 1, "vt82c686 HW-mon"); | 501 | quirk_io_region(dev, hm, 128, PCI_BRIDGE_RESOURCES + 1, "vt82c686 HW-mon"); |
497 | 502 | ||
498 | pci_read_config_dword(dev, 0x90, &smb); | 503 | pci_read_config_dword(dev, 0x90, &smb); |
499 | smb &= PCI_BASE_ADDRESS_IO_MASK; | 504 | smb &= PCI_BASE_ADDRESS_IO_MASK; |
500 | quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 2, "vt82c686 SMB"); | 505 | quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 2, "vt82c686 SMB"); |
501 | } | 506 | } |
502 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi ); | 507 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi ); |
503 | 508 | ||
504 | /* | 509 | /* |
505 | * VIA VT8235 ISA Bridge: Two IO regions pointed to by words at | 510 | * VIA VT8235 ISA Bridge: Two IO regions pointed to by words at |
506 | * 0x88 (128 bytes of power management registers) | 511 | * 0x88 (128 bytes of power management registers) |
507 | * 0xd0 (16 bytes of SMB registers) | 512 | * 0xd0 (16 bytes of SMB registers) |
508 | */ | 513 | */ |
509 | static void __devinit quirk_vt8235_acpi(struct pci_dev *dev) | 514 | static void __devinit quirk_vt8235_acpi(struct pci_dev *dev) |
510 | { | 515 | { |
511 | u16 pm, smb; | 516 | u16 pm, smb; |
512 | 517 | ||
513 | pci_read_config_word(dev, 0x88, &pm); | 518 | pci_read_config_word(dev, 0x88, &pm); |
514 | pm &= PCI_BASE_ADDRESS_IO_MASK; | 519 | pm &= PCI_BASE_ADDRESS_IO_MASK; |
515 | quirk_io_region(dev, pm, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM"); | 520 | quirk_io_region(dev, pm, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM"); |
516 | 521 | ||
517 | pci_read_config_word(dev, 0xd0, &smb); | 522 | pci_read_config_word(dev, 0xd0, &smb); |
518 | smb &= PCI_BASE_ADDRESS_IO_MASK; | 523 | smb &= PCI_BASE_ADDRESS_IO_MASK; |
519 | quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 1, "vt8235 SMB"); | 524 | quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 1, "vt8235 SMB"); |
520 | } | 525 | } |
521 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi); | 526 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi); |
522 | 527 | ||
523 | 528 | ||
524 | #ifdef CONFIG_X86_IO_APIC | 529 | #ifdef CONFIG_X86_IO_APIC |
525 | 530 | ||
526 | #include <asm/io_apic.h> | 531 | #include <asm/io_apic.h> |
527 | 532 | ||
528 | /* | 533 | /* |
529 | * VIA 686A/B: If an IO-APIC is active, we need to route all on-chip | 534 | * VIA 686A/B: If an IO-APIC is active, we need to route all on-chip |
530 | * devices to the external APIC. | 535 | * devices to the external APIC. |
531 | * | 536 | * |
532 | * TODO: When we have device-specific interrupt routers, | 537 | * TODO: When we have device-specific interrupt routers, |
533 | * this code will go away from quirks. | 538 | * this code will go away from quirks. |
534 | */ | 539 | */ |
535 | static void __devinit quirk_via_ioapic(struct pci_dev *dev) | 540 | static void quirk_via_ioapic(struct pci_dev *dev) |
536 | { | 541 | { |
537 | u8 tmp; | 542 | u8 tmp; |
538 | 543 | ||
539 | if (nr_ioapics < 1) | 544 | if (nr_ioapics < 1) |
540 | tmp = 0; /* nothing routed to external APIC */ | 545 | tmp = 0; /* nothing routed to external APIC */ |
541 | else | 546 | else |
542 | tmp = 0x1f; /* all known bits (4-0) routed to external APIC */ | 547 | tmp = 0x1f; /* all known bits (4-0) routed to external APIC */ |
543 | 548 | ||
544 | printk(KERN_INFO "PCI: %sbling Via external APIC routing\n", | 549 | printk(KERN_INFO "PCI: %sbling Via external APIC routing\n", |
545 | tmp == 0 ? "Disa" : "Ena"); | 550 | tmp == 0 ? "Disa" : "Ena"); |
546 | 551 | ||
547 | /* Offset 0x58: External APIC IRQ output control */ | 552 | /* Offset 0x58: External APIC IRQ output control */ |
548 | pci_write_config_byte (dev, 0x58, tmp); | 553 | pci_write_config_byte (dev, 0x58, tmp); |
549 | } | 554 | } |
550 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic ); | 555 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic ); |
556 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic ); | ||
551 | 557 | ||
552 | /* | 558 | /* |
553 | * VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit. | 559 | * VIA 8237: Some BIOSs don't set the 'Bypass APIC De-Assert Message' Bit. |
554 | * This leads to doubled level interrupt rates. | 560 | * This leads to doubled level interrupt rates. |
555 | * Set this bit to get rid of cycle wastage. | 561 | * Set this bit to get rid of cycle wastage. |
556 | * Otherwise uncritical. | 562 | * Otherwise uncritical. |
557 | */ | 563 | */ |
558 | static void __devinit quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev) | 564 | static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev) |
559 | { | 565 | { |
560 | u8 misc_control2; | 566 | u8 misc_control2; |
561 | #define BYPASS_APIC_DEASSERT 8 | 567 | #define BYPASS_APIC_DEASSERT 8 |
562 | 568 | ||
563 | pci_read_config_byte(dev, 0x5B, &misc_control2); | 569 | pci_read_config_byte(dev, 0x5B, &misc_control2); |
564 | if (!(misc_control2 & BYPASS_APIC_DEASSERT)) { | 570 | if (!(misc_control2 & BYPASS_APIC_DEASSERT)) { |
565 | printk(KERN_INFO "PCI: Bypassing VIA 8237 APIC De-Assert Message\n"); | 571 | printk(KERN_INFO "PCI: Bypassing VIA 8237 APIC De-Assert Message\n"); |
566 | pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT); | 572 | pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT); |
567 | } | 573 | } |
568 | } | 574 | } |
569 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); | 575 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); |
576 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); | ||
570 | 577 | ||
571 | /* | 578 | /* |
572 | * The AMD io apic can hang the box when an apic irq is masked. | 579 | * The AMD io apic can hang the box when an apic irq is masked. |
573 | * We check all revs >= B0 (yet not in the pre production!) as the bug | 580 | * We check all revs >= B0 (yet not in the pre production!) as the bug |
574 | * is currently marked NoFix | 581 | * is currently marked NoFix |
575 | * | 582 | * |
576 | * We have multiple reports of hangs with this chipset that went away with | 583 | * We have multiple reports of hangs with this chipset that went away with |
577 | * noapic specified. For the moment we assume it's the erratum. We may be wrong | 584 | * noapic specified. For the moment we assume it's the erratum. We may be wrong |
578 | * of course. However the advice is demonstrably good even if so.. | 585 | * of course. However the advice is demonstrably good even if so.. |
579 | */ | 586 | */ |
580 | static void __devinit quirk_amd_ioapic(struct pci_dev *dev) | 587 | static void __devinit quirk_amd_ioapic(struct pci_dev *dev) |
581 | { | 588 | { |
582 | u8 rev; | 589 | u8 rev; |
583 | 590 | ||
584 | pci_read_config_byte(dev, PCI_REVISION_ID, &rev); | 591 | pci_read_config_byte(dev, PCI_REVISION_ID, &rev); |
585 | if (rev >= 0x02) { | 592 | if (rev >= 0x02) { |
586 | printk(KERN_WARNING "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n"); | 593 | printk(KERN_WARNING "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n"); |
587 | printk(KERN_WARNING " : booting with the \"noapic\" option.\n"); | 594 | printk(KERN_WARNING " : booting with the \"noapic\" option.\n"); |
588 | } | 595 | } |
589 | } | 596 | } |
590 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic ); | 597 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic ); |
591 | 598 | ||
592 | static void __init quirk_ioapic_rmw(struct pci_dev *dev) | 599 | static void __init quirk_ioapic_rmw(struct pci_dev *dev) |
593 | { | 600 | { |
594 | if (dev->devfn == 0 && dev->bus->number == 0) | 601 | if (dev->devfn == 0 && dev->bus->number == 0) |
595 | sis_apic_bug = 1; | 602 | sis_apic_bug = 1; |
596 | } | 603 | } |
597 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw ); | 604 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw ); |
598 | 605 | ||
599 | #define AMD8131_revA0 0x01 | 606 | #define AMD8131_revA0 0x01 |
600 | #define AMD8131_revB0 0x11 | 607 | #define AMD8131_revB0 0x11 |
601 | #define AMD8131_MISC 0x40 | 608 | #define AMD8131_MISC 0x40 |
602 | #define AMD8131_NIOAMODE_BIT 0 | 609 | #define AMD8131_NIOAMODE_BIT 0 |
603 | static void __init quirk_amd_8131_ioapic(struct pci_dev *dev) | 610 | static void quirk_amd_8131_ioapic(struct pci_dev *dev) |
604 | { | 611 | { |
605 | unsigned char revid, tmp; | 612 | unsigned char revid, tmp; |
606 | 613 | ||
607 | if (nr_ioapics == 0) | 614 | if (nr_ioapics == 0) |
608 | return; | 615 | return; |
609 | 616 | ||
610 | pci_read_config_byte(dev, PCI_REVISION_ID, &revid); | 617 | pci_read_config_byte(dev, PCI_REVISION_ID, &revid); |
611 | if (revid == AMD8131_revA0 || revid == AMD8131_revB0) { | 618 | if (revid == AMD8131_revA0 || revid == AMD8131_revB0) { |
612 | printk(KERN_INFO "Fixing up AMD8131 IOAPIC mode\n"); | 619 | printk(KERN_INFO "Fixing up AMD8131 IOAPIC mode\n"); |
613 | pci_read_config_byte( dev, AMD8131_MISC, &tmp); | 620 | pci_read_config_byte( dev, AMD8131_MISC, &tmp); |
614 | tmp &= ~(1 << AMD8131_NIOAMODE_BIT); | 621 | tmp &= ~(1 << AMD8131_NIOAMODE_BIT); |
615 | pci_write_config_byte( dev, AMD8131_MISC, tmp); | 622 | pci_write_config_byte( dev, AMD8131_MISC, tmp); |
616 | } | 623 | } |
617 | } | 624 | } |
618 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); | 625 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); |
626 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_ioapic); | ||
619 | #endif /* CONFIG_X86_IO_APIC */ | 627 | #endif /* CONFIG_X86_IO_APIC */ |
620 | 628 | ||
621 | 629 | ||
622 | /* | 630 | /* |
623 | * FIXME: it is questionable that quirk_via_acpi | 631 | * FIXME: it is questionable that quirk_via_acpi |
624 | * is needed. It shows up as an ISA bridge, and does not | 632 | * is needed. It shows up as an ISA bridge, and does not |
625 | * support the PCI_INTERRUPT_LINE register at all. Therefore | 633 | * support the PCI_INTERRUPT_LINE register at all. Therefore |
626 | * it seems like setting the pci_dev's 'irq' to the | 634 | * it seems like setting the pci_dev's 'irq' to the |
627 | * value of the ACPI SCI interrupt is only done for convenience. | 635 | * value of the ACPI SCI interrupt is only done for convenience. |
628 | * -jgarzik | 636 | * -jgarzik |
629 | */ | 637 | */ |
630 | static void __devinit quirk_via_acpi(struct pci_dev *d) | 638 | static void __devinit quirk_via_acpi(struct pci_dev *d) |
631 | { | 639 | { |
632 | /* | 640 | /* |
633 | * VIA ACPI device: SCI IRQ line in PCI config byte 0x42 | 641 | * VIA ACPI device: SCI IRQ line in PCI config byte 0x42 |
634 | */ | 642 | */ |
635 | u8 irq; | 643 | u8 irq; |
636 | pci_read_config_byte(d, 0x42, &irq); | 644 | pci_read_config_byte(d, 0x42, &irq); |
637 | irq &= 0xf; | 645 | irq &= 0xf; |
638 | if (irq && (irq != 2)) | 646 | if (irq && (irq != 2)) |
639 | d->irq = irq; | 647 | d->irq = irq; |
640 | } | 648 | } |
641 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi ); | 649 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi ); |
642 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi ); | 650 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi ); |
643 | 651 | ||
644 | /* | ||
645 | * Via 686A/B: The PCI_INTERRUPT_LINE register for the on-chip | ||
646 | * devices, USB0/1, AC97, MC97, and ACPI, has an unusual feature: | ||
647 | * when written, it makes an internal connection to the PIC. | ||
648 | * For these devices, this register is defined to be 4 bits wide. | ||
649 | * Normally this is fine. However for IO-APIC motherboards, or | ||
650 | * non-x86 architectures (yes Via exists on PPC among other places), | ||
651 | * we must mask the PCI_INTERRUPT_LINE value versus 0xf to get | ||
652 | * interrupts delivered properly. | ||
653 | * | ||
654 | * Some of the on-chip devices are actually '586 devices' so they are | ||
655 | * listed here. | ||
656 | */ | ||
657 | 652 | ||
658 | static int via_irq_fixup_needed = -1; | ||
659 | |||
660 | /* | 653 | /* |
661 | * As some VIA hardware is available in PCI-card form, we need to restrict | 654 | * VIA bridges which have VLink |
662 | * this quirk to VIA PCI hardware built onto VIA-based motherboards only. | ||
663 | * We try to locate a VIA southbridge before deciding whether the quirk | ||
664 | * should be applied. | ||
665 | */ | 655 | */ |
666 | static const struct pci_device_id via_irq_fixup_tbl[] = { | 656 | |
667 | { | 657 | static const struct pci_device_id via_vlink_fixup_tbl[] = { |
668 | .vendor = PCI_VENDOR_ID_VIA, | 658 | /* Internal devices need IRQ line routing, pre VLink */ |
669 | .device = PCI_ANY_ID, | 659 | { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C686), 0 }, |
670 | .subvendor = PCI_ANY_ID, | 660 | { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_8231), 17 }, |
671 | .subdevice = PCI_ANY_ID, | 661 | /* Devices with VLink */ |
672 | .class = PCI_CLASS_BRIDGE_ISA << 8, | 662 | { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_8233_0), 17}, |
673 | .class_mask = 0xffff00, | 663 | { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_8233A), 17 }, |
674 | }, | 664 | { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_8233C_0), 17 }, |
665 | { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_8235), 16 }, | ||
666 | { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_8237), 15 }, | ||
667 | { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_8237A), 15 }, | ||
675 | { 0, }, | 668 | { 0, }, |
676 | }; | 669 | }; |
677 | 670 | ||
678 | static void quirk_via_irq(struct pci_dev *dev) | 671 | /** |
672 | * quirk_via_vlink - VIA VLink IRQ number update | ||
673 | * @dev: PCI device | ||
674 | * | ||
675 | * If the device we are dealing with is on a PIC IRQ we need to | ||
676 | * ensure that the IRQ line register which usually is not relevant | ||
677 | * for PCI cards, is actually written so that interrupts get sent | ||
678 | * to the right place | ||
679 | */ | ||
680 | |||
681 | static void quirk_via_vlink(struct pci_dev *dev) | ||
679 | { | 682 | { |
683 | const struct pci_device_id *via_vlink_fixup; | ||
684 | static int dev_lo = -1, dev_hi = 18; | ||
680 | u8 irq, new_irq; | 685 | u8 irq, new_irq; |
681 | 686 | ||
682 | if (via_irq_fixup_needed == -1) | 687 | /* Check if we have VLink and cache the result */ |
683 | via_irq_fixup_needed = pci_dev_present(via_irq_fixup_tbl); | ||
684 | 688 | ||
685 | if (!via_irq_fixup_needed) | 689 | /* Checked already - no */ |
690 | if (dev_lo == -2) | ||
686 | return; | 691 | return; |
687 | 692 | ||
693 | /* Not checked - see what bridge we have and find the device | ||
694 | ranges */ | ||
695 | |||
696 | if (dev_lo == -1) { | ||
697 | via_vlink_fixup = pci_find_present(via_vlink_fixup_tbl); | ||
698 | if (via_vlink_fixup == NULL) { | ||
699 | dev_lo = -2; | ||
700 | return; | ||
701 | } | ||
702 | dev_lo = via_vlink_fixup->driver_data; | ||
703 | /* 82C686 is special - 0/0 */ | ||
704 | if (dev_lo == 0) | ||
705 | dev_hi = 0; | ||
706 | } | ||
688 | new_irq = dev->irq; | 707 | new_irq = dev->irq; |
689 | 708 | ||
690 | /* Don't quirk interrupts outside the legacy IRQ range */ | 709 | /* Don't quirk interrupts outside the legacy IRQ range */ |
691 | if (!new_irq || new_irq > 15) | 710 | if (!new_irq || new_irq > 15) |
692 | return; | 711 | return; |
693 | 712 | ||
713 | /* Internal device ? */ | ||
714 | if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) > dev_hi || | ||
715 | PCI_SLOT(dev->devfn) < dev_lo) | ||
716 | return; | ||
717 | |||
718 | /* This is an internal VLink device on a PIC interrupt. The BIOS | ||
719 | ought to have set this but may not have, so we redo it */ | ||
720 | |||
694 | pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); | 721 | pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); |
695 | if (new_irq != irq) { | 722 | if (new_irq != irq) { |
696 | printk(KERN_INFO "PCI: VIA IRQ fixup for %s, from %d to %d\n", | 723 | printk(KERN_INFO "PCI: VIA VLink IRQ fixup for %s, from %d to %d\n", |
697 | pci_name(dev), irq, new_irq); | 724 | pci_name(dev), irq, new_irq); |
698 | udelay(15); /* unknown if delay really needed */ | 725 | udelay(15); /* unknown if delay really needed */ |
699 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq); | 726 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq); |
700 | } | 727 | } |
701 | } | 728 | } |
702 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_irq); | 729 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink); |
703 | 730 | ||
704 | /* | 731 | /* |
705 | * VIA VT82C598 has its device ID settable and many BIOSes | 732 | * VIA VT82C598 has its device ID settable and many BIOSes |
706 | * set it to the ID of VT82C597 for backward compatibility. | 733 | * set it to the ID of VT82C597 for backward compatibility. |
707 | * We need to switch it off to be able to recognize the real | 734 | * We need to switch it off to be able to recognize the real |
708 | * type of the chip. | 735 | * type of the chip. |
709 | */ | 736 | */ |
710 | static void __devinit quirk_vt82c598_id(struct pci_dev *dev) | 737 | static void __devinit quirk_vt82c598_id(struct pci_dev *dev) |
711 | { | 738 | { |
712 | pci_write_config_byte(dev, 0xfc, 0); | 739 | pci_write_config_byte(dev, 0xfc, 0); |
713 | pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device); | 740 | pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device); |
714 | } | 741 | } |
715 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt82c598_id ); | 742 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt82c598_id ); |
716 | 743 | ||
717 | /* | 744 | /* |
718 | * CardBus controllers have a legacy base address that enables them | 745 | * CardBus controllers have a legacy base address that enables them |
719 | * to respond as i82365 pcmcia controllers. We don't want them to | 746 | * to respond as i82365 pcmcia controllers. We don't want them to |
720 | * do this even if the Linux CardBus driver is not loaded, because | 747 | * do this even if the Linux CardBus driver is not loaded, because |
721 | * the Linux i82365 driver does not (and should not) handle CardBus. | 748 | * the Linux i82365 driver does not (and should not) handle CardBus. |
722 | */ | 749 | */ |
723 | static void __devinit quirk_cardbus_legacy(struct pci_dev *dev) | 750 | static void quirk_cardbus_legacy(struct pci_dev *dev) |
724 | { | 751 | { |
725 | if ((PCI_CLASS_BRIDGE_CARDBUS << 8) ^ dev->class) | 752 | if ((PCI_CLASS_BRIDGE_CARDBUS << 8) ^ dev->class) |
726 | return; | 753 | return; |
727 | pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0); | 754 | pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0); |
728 | } | 755 | } |
729 | DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy); | 756 | DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy); |
757 | DECLARE_PCI_FIXUP_RESUME(PCI_ANY_ID, PCI_ANY_ID, quirk_cardbus_legacy); | ||
730 | 758 | ||
731 | /* | 759 | /* |
732 | * Following the PCI ordering rules is optional on the AMD762. I'm not | 760 | * Following the PCI ordering rules is optional on the AMD762. I'm not |
733 | * sure what the designers were smoking but let's not inhale... | 761 | * sure what the designers were smoking but let's not inhale... |
734 | * | 762 | * |
735 | * To be fair to AMD, it follows the spec by default, its BIOS people | 763 | * To be fair to AMD, it follows the spec by default, its BIOS people |
736 | * who turn it off! | 764 | * who turn it off! |
737 | */ | 765 | */ |
738 | static void __devinit quirk_amd_ordering(struct pci_dev *dev) | 766 | static void quirk_amd_ordering(struct pci_dev *dev) |
739 | { | 767 | { |
740 | u32 pcic; | 768 | u32 pcic; |
741 | pci_read_config_dword(dev, 0x4C, &pcic); | 769 | pci_read_config_dword(dev, 0x4C, &pcic); |
742 | if ((pcic&6)!=6) { | 770 | if ((pcic&6)!=6) { |
743 | pcic |= 6; | 771 | pcic |= 6; |
744 | printk(KERN_WARNING "BIOS failed to enable PCI standards compliance, fixing this error.\n"); | 772 | printk(KERN_WARNING "BIOS failed to enable PCI standards compliance, fixing this error.\n"); |
745 | pci_write_config_dword(dev, 0x4C, pcic); | 773 | pci_write_config_dword(dev, 0x4C, pcic); |
746 | pci_read_config_dword(dev, 0x84, &pcic); | 774 | pci_read_config_dword(dev, 0x84, &pcic); |
747 | pcic |= (1<<23); /* Required in this mode */ | 775 | pcic |= (1<<23); /* Required in this mode */ |
748 | pci_write_config_dword(dev, 0x84, pcic); | 776 | pci_write_config_dword(dev, 0x84, pcic); |
749 | } | 777 | } |
750 | } | 778 | } |
751 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering ); | 779 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering ); |
780 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering ); | ||
752 | 781 | ||
753 | /* | 782 | /* |
754 | * DreamWorks provided workaround for Dunord I-3000 problem | 783 | * DreamWorks provided workaround for Dunord I-3000 problem |
755 | * | 784 | * |
756 | * This card decodes and responds to addresses not apparently | 785 | * This card decodes and responds to addresses not apparently |
757 | * assigned to it. We force a larger allocation to ensure that | 786 | * assigned to it. We force a larger allocation to ensure that |
758 | * nothing gets put too close to it. | 787 | * nothing gets put too close to it. |
759 | */ | 788 | */ |
760 | static void __devinit quirk_dunord ( struct pci_dev * dev ) | 789 | static void __devinit quirk_dunord ( struct pci_dev * dev ) |
761 | { | 790 | { |
762 | struct resource *r = &dev->resource [1]; | 791 | struct resource *r = &dev->resource [1]; |
763 | r->start = 0; | 792 | r->start = 0; |
764 | r->end = 0xffffff; | 793 | r->end = 0xffffff; |
765 | } | 794 | } |
766 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord ); | 795 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord ); |
767 | 796 | ||
768 | /* | 797 | /* |
769 | * i82380FB mobile docking controller: its PCI-to-PCI bridge | 798 | * i82380FB mobile docking controller: its PCI-to-PCI bridge |
770 | * is subtractive decoding (transparent), and does indicate this | 799 | * is subtractive decoding (transparent), and does indicate this |
771 | * in the ProgIf. Unfortunately, the ProgIf value is wrong - 0x80 | 800 | * in the ProgIf. Unfortunately, the ProgIf value is wrong - 0x80 |
772 | * instead of 0x01. | 801 | * instead of 0x01. |
773 | */ | 802 | */ |
774 | static void __devinit quirk_transparent_bridge(struct pci_dev *dev) | 803 | static void __devinit quirk_transparent_bridge(struct pci_dev *dev) |
775 | { | 804 | { |
776 | dev->transparent = 1; | 805 | dev->transparent = 1; |
777 | } | 806 | } |
778 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82380FB, quirk_transparent_bridge ); | 807 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82380FB, quirk_transparent_bridge ); |
779 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge ); | 808 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge ); |
780 | 809 | ||
781 | /* | 810 | /* |
782 | * Common misconfiguration of the MediaGX/Geode PCI master that will | 811 | * Common misconfiguration of the MediaGX/Geode PCI master that will |
783 | * reduce PCI bandwidth from 70MB/s to 25MB/s. See the GXM/GXLV/GX1 | 812 | * reduce PCI bandwidth from 70MB/s to 25MB/s. See the GXM/GXLV/GX1 |
784 | * datasheets found at http://www.national.com/ds/GX for info on what | 813 | * datasheets found at http://www.national.com/ds/GX for info on what |
785 | * these bits do. <christer@weinigel.se> | 814 | * these bits do. <christer@weinigel.se> |
786 | */ | 815 | */ |
787 | static void __init quirk_mediagx_master(struct pci_dev *dev) | 816 | static void quirk_mediagx_master(struct pci_dev *dev) |
788 | { | 817 | { |
789 | u8 reg; | 818 | u8 reg; |
790 | pci_read_config_byte(dev, 0x41, ®); | 819 | pci_read_config_byte(dev, 0x41, ®); |
791 | if (reg & 2) { | 820 | if (reg & 2) { |
792 | reg &= ~2; | 821 | reg &= ~2; |
793 | printk(KERN_INFO "PCI: Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n", reg); | 822 | printk(KERN_INFO "PCI: Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n", reg); |
794 | pci_write_config_byte(dev, 0x41, reg); | 823 | pci_write_config_byte(dev, 0x41, reg); |
795 | } | 824 | } |
796 | } | 825 | } |
797 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master ); | 826 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master ); |
827 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master ); | ||
798 | 828 | ||
799 | /* | 829 | /* |
800 | * Ensure C0 rev restreaming is off. This is normally done by | 830 | * Ensure C0 rev restreaming is off. This is normally done by |
801 | * the BIOS but in the odd case it is not the results are corruption | 831 | * the BIOS but in the odd case it is not the results are corruption |
802 | * hence the presence of a Linux check | 832 | * hence the presence of a Linux check |
803 | */ | 833 | */ |
804 | static void __init quirk_disable_pxb(struct pci_dev *pdev) | 834 | static void quirk_disable_pxb(struct pci_dev *pdev) |
805 | { | 835 | { |
806 | u16 config; | 836 | u16 config; |
807 | u8 rev; | 837 | u8 rev; |
808 | 838 | ||
809 | pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); | 839 | pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); |
810 | if (rev != 0x04) /* Only C0 requires this */ | 840 | if (rev != 0x04) /* Only C0 requires this */ |
811 | return; | 841 | return; |
812 | pci_read_config_word(pdev, 0x40, &config); | 842 | pci_read_config_word(pdev, 0x40, &config); |
813 | if (config & (1<<6)) { | 843 | if (config & (1<<6)) { |
814 | config &= ~(1<<6); | 844 | config &= ~(1<<6); |
815 | pci_write_config_word(pdev, 0x40, config); | 845 | pci_write_config_word(pdev, 0x40, config); |
816 | printk(KERN_INFO "PCI: C0 revision 450NX. Disabling PCI restreaming.\n"); | 846 | printk(KERN_INFO "PCI: C0 revision 450NX. Disabling PCI restreaming.\n"); |
817 | } | 847 | } |
818 | } | 848 | } |
819 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb ); | 849 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb ); |
850 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb ); | ||
820 | 851 | ||
821 | 852 | ||
822 | /* | 853 | /* |
823 | * Serverworks CSB5 IDE does not fully support native mode | 854 | * Serverworks CSB5 IDE does not fully support native mode |
824 | */ | 855 | */ |
825 | static void __devinit quirk_svwks_csb5ide(struct pci_dev *pdev) | 856 | static void __devinit quirk_svwks_csb5ide(struct pci_dev *pdev) |
826 | { | 857 | { |
827 | u8 prog; | 858 | u8 prog; |
828 | pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog); | 859 | pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog); |
829 | if (prog & 5) { | 860 | if (prog & 5) { |
830 | prog &= ~5; | 861 | prog &= ~5; |
831 | pdev->class &= ~5; | 862 | pdev->class &= ~5; |
832 | pci_write_config_byte(pdev, PCI_CLASS_PROG, prog); | 863 | pci_write_config_byte(pdev, PCI_CLASS_PROG, prog); |
833 | /* PCI layer will sort out resources */ | 864 | /* PCI layer will sort out resources */ |
834 | } | 865 | } |
835 | } | 866 | } |
836 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide ); | 867 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide ); |
837 | 868 | ||
838 | /* | 869 | /* |
839 | * Intel 82801CAM ICH3-M datasheet says IDE modes must be the same | 870 | * Intel 82801CAM ICH3-M datasheet says IDE modes must be the same |
840 | */ | 871 | */ |
841 | static void __init quirk_ide_samemode(struct pci_dev *pdev) | 872 | static void __init quirk_ide_samemode(struct pci_dev *pdev) |
842 | { | 873 | { |
843 | u8 prog; | 874 | u8 prog; |
844 | 875 | ||
845 | pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog); | 876 | pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog); |
846 | 877 | ||
847 | if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) { | 878 | if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) { |
848 | printk(KERN_INFO "PCI: IDE mode mismatch; forcing legacy mode\n"); | 879 | printk(KERN_INFO "PCI: IDE mode mismatch; forcing legacy mode\n"); |
849 | prog &= ~5; | 880 | prog &= ~5; |
850 | pdev->class &= ~5; | 881 | pdev->class &= ~5; |
851 | pci_write_config_byte(pdev, PCI_CLASS_PROG, prog); | 882 | pci_write_config_byte(pdev, PCI_CLASS_PROG, prog); |
852 | } | 883 | } |
853 | } | 884 | } |
854 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode); | 885 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode); |
855 | 886 | ||
856 | /* This was originally an Alpha specific thing, but it really fits here. | 887 | /* This was originally an Alpha specific thing, but it really fits here. |
857 | * The i82375 PCI/EISA bridge appears as non-classified. Fix that. | 888 | * The i82375 PCI/EISA bridge appears as non-classified. Fix that. |
858 | */ | 889 | */ |
859 | static void __init quirk_eisa_bridge(struct pci_dev *dev) | 890 | static void __init quirk_eisa_bridge(struct pci_dev *dev) |
860 | { | 891 | { |
861 | dev->class = PCI_CLASS_BRIDGE_EISA << 8; | 892 | dev->class = PCI_CLASS_BRIDGE_EISA << 8; |
862 | } | 893 | } |
863 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge ); | 894 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge ); |
864 | 895 | ||
865 | /* | 896 | /* |
866 | * On the MSI-K8T-Neo2Fir Board, the internal Soundcard is disabled | 897 | * On the MSI-K8T-Neo2Fir Board, the internal Soundcard is disabled |
867 | * when a PCI-Soundcard is added. The BIOS only gives Options | 898 | * when a PCI-Soundcard is added. The BIOS only gives Options |
868 | * "Disabled" and "AUTO". This Quirk Sets the corresponding | 899 | * "Disabled" and "AUTO". This Quirk Sets the corresponding |
869 | * Register-Value to enable the Soundcard. | 900 | * Register-Value to enable the Soundcard. |
870 | * | 901 | * |
871 | * FIXME: Presently this quirk will run on anything that has an 8237 | 902 | * FIXME: Presently this quirk will run on anything that has an 8237 |
872 | * which isn't correct, we need to check DMI tables or something in | 903 | * which isn't correct, we need to check DMI tables or something in |
873 | * order to make sure it only runs on the MSI-K8T-Neo2Fir. Because it | 904 | * order to make sure it only runs on the MSI-K8T-Neo2Fir. Because it |
874 | * runs everywhere at present we suppress the printk output in most | 905 | * runs everywhere at present we suppress the printk output in most |
875 | * irrelevant cases. | 906 | * irrelevant cases. |
876 | */ | 907 | */ |
877 | static void __init k8t_sound_hostbridge(struct pci_dev *dev) | 908 | static void k8t_sound_hostbridge(struct pci_dev *dev) |
878 | { | 909 | { |
879 | unsigned char val; | 910 | unsigned char val; |
880 | 911 | ||
881 | pci_read_config_byte(dev, 0x50, &val); | 912 | pci_read_config_byte(dev, 0x50, &val); |
882 | if (val == 0x88 || val == 0xc8) { | 913 | if (val == 0x88 || val == 0xc8) { |
883 | /* Assume it's probably a MSI-K8T-Neo2Fir */ | 914 | /* Assume it's probably a MSI-K8T-Neo2Fir */ |
884 | printk(KERN_INFO "PCI: MSI-K8T-Neo2Fir, attempting to turn soundcard ON\n"); | 915 | printk(KERN_INFO "PCI: MSI-K8T-Neo2Fir, attempting to turn soundcard ON\n"); |
885 | pci_write_config_byte(dev, 0x50, val & (~0x40)); | 916 | pci_write_config_byte(dev, 0x50, val & (~0x40)); |
886 | 917 | ||
887 | /* Verify the Change for Status output */ | 918 | /* Verify the Change for Status output */ |
888 | pci_read_config_byte(dev, 0x50, &val); | 919 | pci_read_config_byte(dev, 0x50, &val); |
889 | if (val & 0x40) | 920 | if (val & 0x40) |
890 | printk(KERN_INFO "PCI: MSI-K8T-Neo2Fir, soundcard still off\n"); | 921 | printk(KERN_INFO "PCI: MSI-K8T-Neo2Fir, soundcard still off\n"); |
891 | else | 922 | else |
892 | printk(KERN_INFO "PCI: MSI-K8T-Neo2Fir, soundcard on\n"); | 923 | printk(KERN_INFO "PCI: MSI-K8T-Neo2Fir, soundcard on\n"); |
893 | } | 924 | } |
894 | } | 925 | } |
895 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_hostbridge); | 926 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_hostbridge); |
927 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_hostbridge); | ||
896 | 928 | ||
897 | #ifndef CONFIG_ACPI_SLEEP | ||
898 | /* | 929 | /* |
899 | * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge | 930 | * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge |
900 | * is not activated. The myth is that Asus said that they do not want the | 931 | * is not activated. The myth is that Asus said that they do not want the |
901 | * users to be irritated by just another PCI Device in the Win98 device | 932 | * users to be irritated by just another PCI Device in the Win98 device |
902 | * manager. (see the file prog/hotplug/README.p4b in the lm_sensors | 933 | * manager. (see the file prog/hotplug/README.p4b in the lm_sensors |
903 | * package 2.7.0 for details) | 934 | * package 2.7.0 for details) |
904 | * | 935 | * |
905 | * The SMBus PCI Device can be activated by setting a bit in the ICH LPC | 936 | * The SMBus PCI Device can be activated by setting a bit in the ICH LPC |
906 | * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it | 937 | * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it |
907 | * becomes necessary to do this tweak in two steps -- I've chosen the Host | 938 | * becomes necessary to do this tweak in two steps -- I've chosen the Host |
908 | * bridge as trigger. | 939 | * bridge as trigger. |
909 | * | ||
910 | * Actually, leaving it unhidden and not redoing the quirk over suspend2ram | ||
911 | * will cause thermal management to break down, and causing machine to | ||
912 | * overheat. | ||
913 | */ | 940 | */ |
914 | static int __initdata asus_hides_smbus; | 941 | static int __initdata asus_hides_smbus; |
915 | 942 | ||
916 | static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev) | 943 | static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev) |
917 | { | 944 | { |
918 | if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) { | 945 | if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) { |
919 | if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB) | 946 | if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB) |
920 | switch(dev->subsystem_device) { | 947 | switch(dev->subsystem_device) { |
921 | case 0x8025: /* P4B-LX */ | 948 | case 0x8025: /* P4B-LX */ |
922 | case 0x8070: /* P4B */ | 949 | case 0x8070: /* P4B */ |
923 | case 0x8088: /* P4B533 */ | 950 | case 0x8088: /* P4B533 */ |
924 | case 0x1626: /* L3C notebook */ | 951 | case 0x1626: /* L3C notebook */ |
925 | asus_hides_smbus = 1; | 952 | asus_hides_smbus = 1; |
926 | } | 953 | } |
927 | if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) | 954 | if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) |
928 | switch(dev->subsystem_device) { | 955 | switch(dev->subsystem_device) { |
929 | case 0x80b1: /* P4GE-V */ | 956 | case 0x80b1: /* P4GE-V */ |
930 | case 0x80b2: /* P4PE */ | 957 | case 0x80b2: /* P4PE */ |
931 | case 0x8093: /* P4B533-V */ | 958 | case 0x8093: /* P4B533-V */ |
932 | asus_hides_smbus = 1; | 959 | asus_hides_smbus = 1; |
933 | } | 960 | } |
934 | if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB) | 961 | if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB) |
935 | switch(dev->subsystem_device) { | 962 | switch(dev->subsystem_device) { |
936 | case 0x8030: /* P4T533 */ | 963 | case 0x8030: /* P4T533 */ |
937 | asus_hides_smbus = 1; | 964 | asus_hides_smbus = 1; |
938 | } | 965 | } |
939 | if (dev->device == PCI_DEVICE_ID_INTEL_7205_0) | 966 | if (dev->device == PCI_DEVICE_ID_INTEL_7205_0) |
940 | switch (dev->subsystem_device) { | 967 | switch (dev->subsystem_device) { |
941 | case 0x8070: /* P4G8X Deluxe */ | 968 | case 0x8070: /* P4G8X Deluxe */ |
942 | asus_hides_smbus = 1; | 969 | asus_hides_smbus = 1; |
943 | } | 970 | } |
944 | if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH) | 971 | if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH) |
945 | switch (dev->subsystem_device) { | 972 | switch (dev->subsystem_device) { |
946 | case 0x80c9: /* PU-DLS */ | 973 | case 0x80c9: /* PU-DLS */ |
947 | asus_hides_smbus = 1; | 974 | asus_hides_smbus = 1; |
948 | } | 975 | } |
949 | if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB) | 976 | if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB) |
950 | switch (dev->subsystem_device) { | 977 | switch (dev->subsystem_device) { |
951 | case 0x1751: /* M2N notebook */ | 978 | case 0x1751: /* M2N notebook */ |
952 | case 0x1821: /* M5N notebook */ | 979 | case 0x1821: /* M5N notebook */ |
953 | asus_hides_smbus = 1; | 980 | asus_hides_smbus = 1; |
954 | } | 981 | } |
955 | if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) | 982 | if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) |
956 | switch (dev->subsystem_device) { | 983 | switch (dev->subsystem_device) { |
957 | case 0x184b: /* W1N notebook */ | 984 | case 0x184b: /* W1N notebook */ |
958 | case 0x186a: /* M6Ne notebook */ | 985 | case 0x186a: /* M6Ne notebook */ |
959 | asus_hides_smbus = 1; | 986 | asus_hides_smbus = 1; |
960 | } | 987 | } |
961 | if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) { | 988 | if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) { |
962 | switch (dev->subsystem_device) { | 989 | switch (dev->subsystem_device) { |
963 | case 0x1882: /* M6V notebook */ | 990 | case 0x1882: /* M6V notebook */ |
964 | case 0x1977: /* A6VA notebook */ | 991 | case 0x1977: /* A6VA notebook */ |
965 | asus_hides_smbus = 1; | 992 | asus_hides_smbus = 1; |
966 | } | 993 | } |
967 | } | 994 | } |
968 | } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) { | 995 | } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) { |
969 | if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) | 996 | if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) |
970 | switch(dev->subsystem_device) { | 997 | switch(dev->subsystem_device) { |
971 | case 0x088C: /* HP Compaq nc8000 */ | 998 | case 0x088C: /* HP Compaq nc8000 */ |
972 | case 0x0890: /* HP Compaq nc6000 */ | 999 | case 0x0890: /* HP Compaq nc6000 */ |
973 | asus_hides_smbus = 1; | 1000 | asus_hides_smbus = 1; |
974 | } | 1001 | } |
975 | if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB) | 1002 | if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB) |
976 | switch (dev->subsystem_device) { | 1003 | switch (dev->subsystem_device) { |
977 | case 0x12bc: /* HP D330L */ | 1004 | case 0x12bc: /* HP D330L */ |
978 | case 0x12bd: /* HP D530 */ | 1005 | case 0x12bd: /* HP D530 */ |
979 | asus_hides_smbus = 1; | 1006 | asus_hides_smbus = 1; |
980 | } | 1007 | } |
981 | if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) { | 1008 | if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) { |
982 | switch (dev->subsystem_device) { | 1009 | switch (dev->subsystem_device) { |
983 | case 0x099c: /* HP Compaq nx6110 */ | 1010 | case 0x099c: /* HP Compaq nx6110 */ |
984 | asus_hides_smbus = 1; | 1011 | asus_hides_smbus = 1; |
985 | } | 1012 | } |
986 | } | 1013 | } |
987 | } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_TOSHIBA)) { | 1014 | } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_TOSHIBA)) { |
988 | if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB) | 1015 | if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB) |
989 | switch(dev->subsystem_device) { | 1016 | switch(dev->subsystem_device) { |
990 | case 0x0001: /* Toshiba Satellite A40 */ | 1017 | case 0x0001: /* Toshiba Satellite A40 */ |
991 | asus_hides_smbus = 1; | 1018 | asus_hides_smbus = 1; |
992 | } | 1019 | } |
993 | if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) | 1020 | if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) |
994 | switch(dev->subsystem_device) { | 1021 | switch(dev->subsystem_device) { |
995 | case 0x0001: /* Toshiba Tecra M2 */ | 1022 | case 0x0001: /* Toshiba Tecra M2 */ |
996 | asus_hides_smbus = 1; | 1023 | asus_hides_smbus = 1; |
997 | } | 1024 | } |
998 | } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) { | 1025 | } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) { |
999 | if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) | 1026 | if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) |
1000 | switch(dev->subsystem_device) { | 1027 | switch(dev->subsystem_device) { |
1001 | case 0xC00C: /* Samsung P35 notebook */ | 1028 | case 0xC00C: /* Samsung P35 notebook */ |
1002 | asus_hides_smbus = 1; | 1029 | asus_hides_smbus = 1; |
1003 | } | 1030 | } |
1004 | } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) { | 1031 | } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) { |
1005 | if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) | 1032 | if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) |
1006 | switch(dev->subsystem_device) { | 1033 | switch(dev->subsystem_device) { |
1007 | case 0x0058: /* Compaq Evo N620c */ | 1034 | case 0x0058: /* Compaq Evo N620c */ |
1008 | asus_hides_smbus = 1; | 1035 | asus_hides_smbus = 1; |
1009 | } | 1036 | } |
1010 | } | 1037 | } |
1011 | } | 1038 | } |
1012 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge ); | 1039 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge ); |
1013 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_HB, asus_hides_smbus_hostbridge ); | 1040 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_HB, asus_hides_smbus_hostbridge ); |
1014 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge ); | 1041 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge ); |
1015 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge ); | 1042 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge ); |
1016 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge ); | 1043 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge ); |
1017 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7501_MCH, asus_hides_smbus_hostbridge ); | 1044 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7501_MCH, asus_hides_smbus_hostbridge ); |
1018 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge ); | 1045 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge ); |
1019 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge ); | 1046 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge ); |
1020 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge ); | 1047 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge ); |
1021 | 1048 | ||
1022 | static void __init asus_hides_smbus_lpc(struct pci_dev *dev) | 1049 | static void asus_hides_smbus_lpc(struct pci_dev *dev) |
1023 | { | 1050 | { |
1024 | u16 val; | 1051 | u16 val; |
1025 | 1052 | ||
1026 | if (likely(!asus_hides_smbus)) | 1053 | if (likely(!asus_hides_smbus)) |
1027 | return; | 1054 | return; |
1028 | 1055 | ||
1029 | pci_read_config_word(dev, 0xF2, &val); | 1056 | pci_read_config_word(dev, 0xF2, &val); |
1030 | if (val & 0x8) { | 1057 | if (val & 0x8) { |
1031 | pci_write_config_word(dev, 0xF2, val & (~0x8)); | 1058 | pci_write_config_word(dev, 0xF2, val & (~0x8)); |
1032 | pci_read_config_word(dev, 0xF2, &val); | 1059 | pci_read_config_word(dev, 0xF2, &val); |
1033 | if (val & 0x8) | 1060 | if (val & 0x8) |
1034 | printk(KERN_INFO "PCI: i801 SMBus device continues to play 'hide and seek'! 0x%x\n", val); | 1061 | printk(KERN_INFO "PCI: i801 SMBus device continues to play 'hide and seek'! 0x%x\n", val); |
1035 | else | 1062 | else |
1036 | printk(KERN_INFO "PCI: Enabled i801 SMBus device\n"); | 1063 | printk(KERN_INFO "PCI: Enabled i801 SMBus device\n"); |
1037 | } | 1064 | } |
1038 | } | 1065 | } |
1039 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc ); | 1066 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc ); |
1040 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc ); | 1067 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc ); |
1041 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc ); | 1068 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc ); |
1042 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc ); | 1069 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc ); |
1043 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc ); | 1070 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc ); |
1044 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc ); | 1071 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc ); |
1072 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc ); | ||
1073 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc ); | ||
1074 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc ); | ||
1075 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc ); | ||
1076 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc ); | ||
1077 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc ); | ||
1045 | 1078 | ||
1046 | static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev) | 1079 | static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev) |
1047 | { | 1080 | { |
1048 | u32 val, rcba; | 1081 | u32 val, rcba; |
1049 | void __iomem *base; | 1082 | void __iomem *base; |
1050 | 1083 | ||
1051 | if (likely(!asus_hides_smbus)) | 1084 | if (likely(!asus_hides_smbus)) |
1052 | return; | 1085 | return; |
1053 | pci_read_config_dword(dev, 0xF0, &rcba); | 1086 | pci_read_config_dword(dev, 0xF0, &rcba); |
1054 | base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000); /* use bits 31:14, 16 kB aligned */ | 1087 | base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000); /* use bits 31:14, 16 kB aligned */ |
1055 | if (base == NULL) return; | 1088 | if (base == NULL) return; |
1056 | val=readl(base + 0x3418); /* read the Function Disable register, dword mode only */ | 1089 | val=readl(base + 0x3418); /* read the Function Disable register, dword mode only */ |
1057 | writel(val & 0xFFFFFFF7, base + 0x3418); /* enable the SMBus device */ | 1090 | writel(val & 0xFFFFFFF7, base + 0x3418); /* enable the SMBus device */ |
1058 | iounmap(base); | 1091 | iounmap(base); |
1059 | printk(KERN_INFO "PCI: Enabled ICH6/i801 SMBus device\n"); | 1092 | printk(KERN_INFO "PCI: Enabled ICH6/i801 SMBus device\n"); |
1060 | } | 1093 | } |
1061 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6 ); | 1094 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6 ); |
1095 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6 ); | ||
1062 | 1096 | ||
1063 | #endif | ||
1064 | |||
1065 | /* | 1097 | /* |
1066 | * SiS 96x south bridge: BIOS typically hides SMBus device... | 1098 | * SiS 96x south bridge: BIOS typically hides SMBus device... |
1067 | */ | 1099 | */ |
1068 | static void __init quirk_sis_96x_smbus(struct pci_dev *dev) | 1100 | static void quirk_sis_96x_smbus(struct pci_dev *dev) |
1069 | { | 1101 | { |
1070 | u8 val = 0; | 1102 | u8 val = 0; |
1071 | printk(KERN_INFO "Enabling SiS 96x SMBus.\n"); | 1103 | printk(KERN_INFO "Enabling SiS 96x SMBus.\n"); |
1072 | pci_read_config_byte(dev, 0x77, &val); | 1104 | pci_read_config_byte(dev, 0x77, &val); |
1073 | pci_write_config_byte(dev, 0x77, val & ~0x10); | 1105 | pci_write_config_byte(dev, 0x77, val & ~0x10); |
1074 | pci_read_config_byte(dev, 0x77, &val); | 1106 | pci_read_config_byte(dev, 0x77, &val); |
1075 | } | 1107 | } |
1076 | 1108 | ||
1077 | /* | 1109 | /* |
1078 | * ... This is further complicated by the fact that some SiS96x south | 1110 | * ... This is further complicated by the fact that some SiS96x south |
1079 | * bridges pretend to be 85C503/5513 instead. In that case see if we | 1111 | * bridges pretend to be 85C503/5513 instead. In that case see if we |
1080 | * spotted a compatible north bridge to make sure. | 1112 | * spotted a compatible north bridge to make sure. |
1081 | * (pci_find_device doesn't work yet) | 1113 | * (pci_find_device doesn't work yet) |
1082 | * | 1114 | * |
1083 | * We can also enable the sis96x bit in the discovery register.. | 1115 | * We can also enable the sis96x bit in the discovery register.. |
1084 | */ | 1116 | */ |
1085 | static int __devinitdata sis_96x_compatible = 0; | 1117 | static int __devinitdata sis_96x_compatible = 0; |
1086 | 1118 | ||
1087 | #define SIS_DETECT_REGISTER 0x40 | 1119 | #define SIS_DETECT_REGISTER 0x40 |
1088 | 1120 | ||
1089 | static void __init quirk_sis_503(struct pci_dev *dev) | 1121 | static void quirk_sis_503(struct pci_dev *dev) |
1090 | { | 1122 | { |
1091 | u8 reg; | 1123 | u8 reg; |
1092 | u16 devid; | 1124 | u16 devid; |
1093 | 1125 | ||
1094 | pci_read_config_byte(dev, SIS_DETECT_REGISTER, ®); | 1126 | pci_read_config_byte(dev, SIS_DETECT_REGISTER, ®); |
1095 | pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg | (1 << 6)); | 1127 | pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg | (1 << 6)); |
1096 | pci_read_config_word(dev, PCI_DEVICE_ID, &devid); | 1128 | pci_read_config_word(dev, PCI_DEVICE_ID, &devid); |
1097 | if (((devid & 0xfff0) != 0x0960) && (devid != 0x0018)) { | 1129 | if (((devid & 0xfff0) != 0x0960) && (devid != 0x0018)) { |
1098 | pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg); | 1130 | pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg); |
1099 | return; | 1131 | return; |
1100 | } | 1132 | } |
1101 | 1133 | ||
1102 | /* Make people aware that we changed the config.. */ | 1134 | /* Make people aware that we changed the config.. */ |
1103 | printk(KERN_WARNING "Uncovering SIS%x that hid as a SIS503 (compatible=%d)\n", devid, sis_96x_compatible); | 1135 | printk(KERN_WARNING "Uncovering SIS%x that hid as a SIS503 (compatible=%d)\n", devid, sis_96x_compatible); |
1104 | 1136 | ||
1105 | /* | 1137 | /* |
1106 | * Ok, it now shows up as a 96x.. The 96x quirks are after | 1138 | * Ok, it now shows up as a 96x.. The 96x quirks are after |
1107 | * the 503 quirk in the quirk table, so they'll automatically | 1139 | * the 503 quirk in the quirk table, so they'll automatically |
1108 | * run and enable things like the SMBus device | 1140 | * run and enable things like the SMBus device |
1109 | */ | 1141 | */ |
1110 | dev->device = devid; | 1142 | dev->device = devid; |
1111 | } | 1143 | } |
1112 | 1144 | ||
1113 | static void __init quirk_sis_96x_compatible(struct pci_dev *dev) | 1145 | static void __init quirk_sis_96x_compatible(struct pci_dev *dev) |
1114 | { | 1146 | { |
1115 | sis_96x_compatible = 1; | 1147 | sis_96x_compatible = 1; |
1116 | } | 1148 | } |
1117 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_645, quirk_sis_96x_compatible ); | 1149 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_645, quirk_sis_96x_compatible ); |
1118 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_646, quirk_sis_96x_compatible ); | 1150 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_646, quirk_sis_96x_compatible ); |
1119 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_648, quirk_sis_96x_compatible ); | 1151 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_648, quirk_sis_96x_compatible ); |
1120 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_650, quirk_sis_96x_compatible ); | 1152 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_650, quirk_sis_96x_compatible ); |
1121 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_651, quirk_sis_96x_compatible ); | 1153 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_651, quirk_sis_96x_compatible ); |
1122 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_735, quirk_sis_96x_compatible ); | 1154 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_735, quirk_sis_96x_compatible ); |
1123 | 1155 | ||
1124 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503 ); | 1156 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503 ); |
1157 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503 ); | ||
1125 | /* | 1158 | /* |
1126 | * On ASUS A8V and A8V Deluxe boards, the onboard AC97 audio controller | 1159 | * On ASUS A8V and A8V Deluxe boards, the onboard AC97 audio controller |
1127 | * and MC97 modem controller are disabled when a second PCI soundcard is | 1160 | * and MC97 modem controller are disabled when a second PCI soundcard is |
1128 | * present. This patch, tweaking the VT8237 ISA bridge, enables them. | 1161 | * present. This patch, tweaking the VT8237 ISA bridge, enables them. |
1129 | * -- bjd | 1162 | * -- bjd |
1130 | */ | 1163 | */ |
1131 | static void __init asus_hides_ac97_lpc(struct pci_dev *dev) | 1164 | static void asus_hides_ac97_lpc(struct pci_dev *dev) |
1132 | { | 1165 | { |
1133 | u8 val; | 1166 | u8 val; |
1134 | int asus_hides_ac97 = 0; | 1167 | int asus_hides_ac97 = 0; |
1135 | 1168 | ||
1136 | if (likely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) { | 1169 | if (likely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) { |
1137 | if (dev->device == PCI_DEVICE_ID_VIA_8237) | 1170 | if (dev->device == PCI_DEVICE_ID_VIA_8237) |
1138 | asus_hides_ac97 = 1; | 1171 | asus_hides_ac97 = 1; |
1139 | } | 1172 | } |
1140 | 1173 | ||
1141 | if (!asus_hides_ac97) | 1174 | if (!asus_hides_ac97) |
1142 | return; | 1175 | return; |
1143 | 1176 | ||
1144 | pci_read_config_byte(dev, 0x50, &val); | 1177 | pci_read_config_byte(dev, 0x50, &val); |
1145 | if (val & 0xc0) { | 1178 | if (val & 0xc0) { |
1146 | pci_write_config_byte(dev, 0x50, val & (~0xc0)); | 1179 | pci_write_config_byte(dev, 0x50, val & (~0xc0)); |
1147 | pci_read_config_byte(dev, 0x50, &val); | 1180 | pci_read_config_byte(dev, 0x50, &val); |
1148 | if (val & 0xc0) | 1181 | if (val & 0xc0) |
1149 | printk(KERN_INFO "PCI: onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n", val); | 1182 | printk(KERN_INFO "PCI: onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n", val); |
1150 | else | 1183 | else |
1151 | printk(KERN_INFO "PCI: enabled onboard AC97/MC97 devices\n"); | 1184 | printk(KERN_INFO "PCI: enabled onboard AC97/MC97 devices\n"); |
1152 | } | 1185 | } |
1153 | } | 1186 | } |
1154 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc ); | 1187 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc ); |
1155 | 1188 | ||
1156 | 1189 | ||
1157 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus ); | 1190 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus ); |
1158 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus ); | 1191 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus ); |
1159 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus ); | 1192 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus ); |
1160 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus ); | 1193 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus ); |
1161 | 1194 | ||
1195 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc ); | ||
1196 | |||
1197 | |||
1198 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus ); | ||
1199 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus ); | ||
1200 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus ); | ||
1201 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus ); | ||
1202 | |||
1162 | #if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE) | 1203 | #if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE) |
1163 | 1204 | ||
1164 | /* | 1205 | /* |
1165 | * If we are using libata we can drive this chip properly but must | 1206 | * If we are using libata we can drive this chip properly but must |
1166 | * do this early on to make the additional device appear during | 1207 | * do this early on to make the additional device appear during |
1167 | * the PCI scanning. | 1208 | * the PCI scanning. |
1168 | */ | 1209 | */ |
1169 | 1210 | ||
1170 | static void __devinit quirk_jmicron_dualfn(struct pci_dev *pdev) | 1211 | static void quirk_jmicron_dualfn(struct pci_dev *pdev) |
1171 | { | 1212 | { |
1172 | u32 conf; | 1213 | u32 conf; |
1173 | u8 hdr; | 1214 | u8 hdr; |
1174 | 1215 | ||
1175 | /* Only poke fn 0 */ | 1216 | /* Only poke fn 0 */ |
1176 | if (PCI_FUNC(pdev->devfn)) | 1217 | if (PCI_FUNC(pdev->devfn)) |
1177 | return; | 1218 | return; |
1178 | 1219 | ||
1179 | switch(pdev->device) { | 1220 | switch(pdev->device) { |
1180 | case PCI_DEVICE_ID_JMICRON_JMB365: | 1221 | case PCI_DEVICE_ID_JMICRON_JMB365: |
1181 | case PCI_DEVICE_ID_JMICRON_JMB366: | 1222 | case PCI_DEVICE_ID_JMICRON_JMB366: |
1182 | /* Redirect IDE second PATA port to the right spot */ | 1223 | /* Redirect IDE second PATA port to the right spot */ |
1183 | pci_read_config_dword(pdev, 0x80, &conf); | 1224 | pci_read_config_dword(pdev, 0x80, &conf); |
1184 | conf |= (1 << 24); | 1225 | conf |= (1 << 24); |
1185 | /* Fall through */ | 1226 | /* Fall through */ |
1186 | pci_write_config_dword(pdev, 0x80, conf); | 1227 | pci_write_config_dword(pdev, 0x80, conf); |
1187 | case PCI_DEVICE_ID_JMICRON_JMB361: | 1228 | case PCI_DEVICE_ID_JMICRON_JMB361: |
1188 | case PCI_DEVICE_ID_JMICRON_JMB363: | 1229 | case PCI_DEVICE_ID_JMICRON_JMB363: |
1189 | pci_read_config_dword(pdev, 0x40, &conf); | 1230 | pci_read_config_dword(pdev, 0x40, &conf); |
1190 | /* Enable dual function mode, AHCI on fn 0, IDE fn1 */ | 1231 | /* Enable dual function mode, AHCI on fn 0, IDE fn1 */ |
1191 | /* Set the class codes correctly and then direct IDE 0 */ | 1232 | /* Set the class codes correctly and then direct IDE 0 */ |
1192 | conf &= ~0x000F0200; /* Clear bit 9 and 16-19 */ | 1233 | conf &= ~0x000F0200; /* Clear bit 9 and 16-19 */ |
1193 | conf |= 0x00C20002; /* Set bit 1, 17, 22, 23 */ | 1234 | conf |= 0x00C20002; /* Set bit 1, 17, 22, 23 */ |
1194 | pci_write_config_dword(pdev, 0x40, conf); | 1235 | pci_write_config_dword(pdev, 0x40, conf); |
1195 | 1236 | ||
1196 | /* Reconfigure so that the PCI scanner discovers the | 1237 | /* Reconfigure so that the PCI scanner discovers the |
1197 | device is now multifunction */ | 1238 | device is now multifunction */ |
1198 | 1239 | ||
1199 | pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr); | 1240 | pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr); |
1200 | pdev->hdr_type = hdr & 0x7f; | 1241 | pdev->hdr_type = hdr & 0x7f; |
1201 | pdev->multifunction = !!(hdr & 0x80); | 1242 | pdev->multifunction = !!(hdr & 0x80); |
1202 | 1243 | ||
1203 | break; | 1244 | break; |
1204 | } | 1245 | } |
1205 | } | 1246 | } |
1206 | 1247 | ||
1207 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, quirk_jmicron_dualfn); | 1248 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, quirk_jmicron_dualfn); |
1249 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, quirk_jmicron_dualfn); | ||
1208 | 1250 | ||
1209 | #endif | 1251 | #endif |
1210 | 1252 | ||
1211 | #ifdef CONFIG_X86_IO_APIC | 1253 | #ifdef CONFIG_X86_IO_APIC |
1212 | static void __init quirk_alder_ioapic(struct pci_dev *pdev) | 1254 | static void __init quirk_alder_ioapic(struct pci_dev *pdev) |
1213 | { | 1255 | { |
1214 | int i; | 1256 | int i; |
1215 | 1257 | ||
1216 | if ((pdev->class >> 8) != 0xff00) | 1258 | if ((pdev->class >> 8) != 0xff00) |
1217 | return; | 1259 | return; |
1218 | 1260 | ||
1219 | /* the first BAR is the location of the IO APIC...we must | 1261 | /* the first BAR is the location of the IO APIC...we must |
1220 | * not touch this (and it's already covered by the fixmap), so | 1262 | * not touch this (and it's already covered by the fixmap), so |
1221 | * forcibly insert it into the resource tree */ | 1263 | * forcibly insert it into the resource tree */ |
1222 | if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0)) | 1264 | if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0)) |
1223 | insert_resource(&iomem_resource, &pdev->resource[0]); | 1265 | insert_resource(&iomem_resource, &pdev->resource[0]); |
1224 | 1266 | ||
1225 | /* The next five BARs all seem to be rubbish, so just clean | 1267 | /* The next five BARs all seem to be rubbish, so just clean |
1226 | * them out */ | 1268 | * them out */ |
1227 | for (i=1; i < 6; i++) { | 1269 | for (i=1; i < 6; i++) { |
1228 | memset(&pdev->resource[i], 0, sizeof(pdev->resource[i])); | 1270 | memset(&pdev->resource[i], 0, sizeof(pdev->resource[i])); |
1229 | } | 1271 | } |
1230 | 1272 | ||
1231 | } | 1273 | } |
1232 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic ); | 1274 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic ); |
1233 | #endif | 1275 | #endif |
1234 | 1276 | ||
1235 | enum ide_combined_type { COMBINED = 0, IDE = 1, LIBATA = 2 }; | 1277 | enum ide_combined_type { COMBINED = 0, IDE = 1, LIBATA = 2 }; |
1236 | /* Defaults to combined */ | 1278 | /* Defaults to combined */ |
1237 | static enum ide_combined_type combined_mode; | 1279 | static enum ide_combined_type combined_mode; |
1238 | 1280 | ||
1239 | static int __init combined_setup(char *str) | 1281 | static int __init combined_setup(char *str) |
1240 | { | 1282 | { |
1241 | if (!strncmp(str, "ide", 3)) | 1283 | if (!strncmp(str, "ide", 3)) |
1242 | combined_mode = IDE; | 1284 | combined_mode = IDE; |
1243 | else if (!strncmp(str, "libata", 6)) | 1285 | else if (!strncmp(str, "libata", 6)) |
1244 | combined_mode = LIBATA; | 1286 | combined_mode = LIBATA; |
1245 | else /* "combined" or anything else defaults to old behavior */ | 1287 | else /* "combined" or anything else defaults to old behavior */ |
1246 | combined_mode = COMBINED; | 1288 | combined_mode = COMBINED; |
1247 | 1289 | ||
1248 | return 1; | 1290 | return 1; |
1249 | } | 1291 | } |
1250 | __setup("combined_mode=", combined_setup); | 1292 | __setup("combined_mode=", combined_setup); |
1251 | 1293 | ||
1252 | #ifdef CONFIG_SATA_INTEL_COMBINED | 1294 | #ifdef CONFIG_SATA_INTEL_COMBINED |
1253 | static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev) | 1295 | static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev) |
1254 | { | 1296 | { |
1255 | u8 prog, comb, tmp; | 1297 | u8 prog, comb, tmp; |
1256 | int ich = 0; | 1298 | int ich = 0; |
1257 | 1299 | ||
1258 | /* | 1300 | /* |
1259 | * Narrow down to Intel SATA PCI devices. | 1301 | * Narrow down to Intel SATA PCI devices. |
1260 | */ | 1302 | */ |
1261 | switch (pdev->device) { | 1303 | switch (pdev->device) { |
1262 | /* PCI ids taken from drivers/scsi/ata_piix.c */ | 1304 | /* PCI ids taken from drivers/scsi/ata_piix.c */ |
1263 | case 0x24d1: | 1305 | case 0x24d1: |
1264 | case 0x24df: | 1306 | case 0x24df: |
1265 | case 0x25a3: | 1307 | case 0x25a3: |
1266 | case 0x25b0: | 1308 | case 0x25b0: |
1267 | ich = 5; | 1309 | ich = 5; |
1268 | break; | 1310 | break; |
1269 | case 0x2651: | 1311 | case 0x2651: |
1270 | case 0x2652: | 1312 | case 0x2652: |
1271 | case 0x2653: | 1313 | case 0x2653: |
1272 | case 0x2680: /* ESB2 */ | 1314 | case 0x2680: /* ESB2 */ |
1273 | ich = 6; | 1315 | ich = 6; |
1274 | break; | 1316 | break; |
1275 | case 0x27c0: | 1317 | case 0x27c0: |
1276 | case 0x27c4: | 1318 | case 0x27c4: |
1277 | ich = 7; | 1319 | ich = 7; |
1278 | break; | 1320 | break; |
1279 | case 0x2828: /* ICH8M */ | 1321 | case 0x2828: /* ICH8M */ |
1280 | ich = 8; | 1322 | ich = 8; |
1281 | break; | 1323 | break; |
1282 | default: | 1324 | default: |
1283 | /* we do not handle this PCI device */ | 1325 | /* we do not handle this PCI device */ |
1284 | return; | 1326 | return; |
1285 | } | 1327 | } |
1286 | 1328 | ||
1287 | /* | 1329 | /* |
1288 | * Read combined mode register. | 1330 | * Read combined mode register. |
1289 | */ | 1331 | */ |
1290 | pci_read_config_byte(pdev, 0x90, &tmp); /* combined mode reg */ | 1332 | pci_read_config_byte(pdev, 0x90, &tmp); /* combined mode reg */ |
1291 | 1333 | ||
1292 | if (ich == 5) { | 1334 | if (ich == 5) { |
1293 | tmp &= 0x6; /* interesting bits 2:1, PATA primary/secondary */ | 1335 | tmp &= 0x6; /* interesting bits 2:1, PATA primary/secondary */ |
1294 | if (tmp == 0x4) /* bits 10x */ | 1336 | if (tmp == 0x4) /* bits 10x */ |
1295 | comb = (1 << 0); /* SATA port 0, PATA port 1 */ | 1337 | comb = (1 << 0); /* SATA port 0, PATA port 1 */ |
1296 | else if (tmp == 0x6) /* bits 11x */ | 1338 | else if (tmp == 0x6) /* bits 11x */ |
1297 | comb = (1 << 2); /* PATA port 0, SATA port 1 */ | 1339 | comb = (1 << 2); /* PATA port 0, SATA port 1 */ |
1298 | else | 1340 | else |
1299 | return; /* not in combined mode */ | 1341 | return; /* not in combined mode */ |
1300 | } else { | 1342 | } else { |
1301 | WARN_ON((ich != 6) && (ich != 7) && (ich != 8)); | 1343 | WARN_ON((ich != 6) && (ich != 7) && (ich != 8)); |
1302 | tmp &= 0x3; /* interesting bits 1:0 */ | 1344 | tmp &= 0x3; /* interesting bits 1:0 */ |
1303 | if (tmp & (1 << 0)) | 1345 | if (tmp & (1 << 0)) |
1304 | comb = (1 << 2); /* PATA port 0, SATA port 1 */ | 1346 | comb = (1 << 2); /* PATA port 0, SATA port 1 */ |
1305 | else if (tmp & (1 << 1)) | 1347 | else if (tmp & (1 << 1)) |
1306 | comb = (1 << 0); /* SATA port 0, PATA port 1 */ | 1348 | comb = (1 << 0); /* SATA port 0, PATA port 1 */ |
1307 | else | 1349 | else |
1308 | return; /* not in combined mode */ | 1350 | return; /* not in combined mode */ |
1309 | } | 1351 | } |
1310 | 1352 | ||
1311 | /* | 1353 | /* |
1312 | * Read programming interface register. | 1354 | * Read programming interface register. |
1313 | * (Tells us if it's legacy or native mode) | 1355 | * (Tells us if it's legacy or native mode) |
1314 | */ | 1356 | */ |
1315 | pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog); | 1357 | pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog); |
1316 | 1358 | ||
1317 | /* if SATA port is in native mode, we're ok. */ | 1359 | /* if SATA port is in native mode, we're ok. */ |
1318 | if (prog & comb) | 1360 | if (prog & comb) |
1319 | return; | 1361 | return; |
1320 | 1362 | ||
1321 | /* Don't reserve any so the IDE driver can get them (but only if | 1363 | /* Don't reserve any so the IDE driver can get them (but only if |
1322 | * combined_mode=ide). | 1364 | * combined_mode=ide). |
1323 | */ | 1365 | */ |
1324 | if (combined_mode == IDE) | 1366 | if (combined_mode == IDE) |
1325 | return; | 1367 | return; |
1326 | 1368 | ||
1327 | /* Grab them both for libata if combined_mode=libata. */ | 1369 | /* Grab them both for libata if combined_mode=libata. */ |
1328 | if (combined_mode == LIBATA) { | 1370 | if (combined_mode == LIBATA) { |
1329 | request_region(0x1f0, 8, "libata"); /* port 0 */ | 1371 | request_region(0x1f0, 8, "libata"); /* port 0 */ |
1330 | request_region(0x170, 8, "libata"); /* port 1 */ | 1372 | request_region(0x170, 8, "libata"); /* port 1 */ |
1331 | return; | 1373 | return; |
1332 | } | 1374 | } |
1333 | 1375 | ||
1334 | /* SATA port is in legacy mode. Reserve port so that | 1376 | /* SATA port is in legacy mode. Reserve port so that |
1335 | * IDE driver does not attempt to use it. If request_region | 1377 | * IDE driver does not attempt to use it. If request_region |
1336 | * fails, it will be obvious at boot time, so we don't bother | 1378 | * fails, it will be obvious at boot time, so we don't bother |
1337 | * checking return values. | 1379 | * checking return values. |
1338 | */ | 1380 | */ |
1339 | if (comb == (1 << 0)) | 1381 | if (comb == (1 << 0)) |
1340 | request_region(0x1f0, 8, "libata"); /* port 0 */ | 1382 | request_region(0x1f0, 8, "libata"); /* port 0 */ |
1341 | else | 1383 | else |
1342 | request_region(0x170, 8, "libata"); /* port 1 */ | 1384 | request_region(0x170, 8, "libata"); /* port 1 */ |
1343 | } | 1385 | } |
1344 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_intel_ide_combined ); | 1386 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_intel_ide_combined ); |
1345 | #endif /* CONFIG_SATA_INTEL_COMBINED */ | 1387 | #endif /* CONFIG_SATA_INTEL_COMBINED */ |
1346 | 1388 | ||
1347 | 1389 | ||
1348 | int pcie_mch_quirk; | 1390 | int pcie_mch_quirk; |
1349 | 1391 | ||
1350 | static void __devinit quirk_pcie_mch(struct pci_dev *pdev) | 1392 | static void __devinit quirk_pcie_mch(struct pci_dev *pdev) |
1351 | { | 1393 | { |
1352 | pcie_mch_quirk = 1; | 1394 | pcie_mch_quirk = 1; |
1353 | } | 1395 | } |
1354 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch ); | 1396 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch ); |
1355 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch ); | 1397 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch ); |
1356 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch ); | 1398 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch ); |
1357 | 1399 | ||
1358 | 1400 | ||
1359 | /* | 1401 | /* |
1360 | * It's possible for the MSI to get corrupted if shpc and acpi | 1402 | * It's possible for the MSI to get corrupted if shpc and acpi |
1361 | * are used together on certain PXH-based systems. | 1403 | * are used together on certain PXH-based systems. |
1362 | */ | 1404 | */ |
1363 | static void __devinit quirk_pcie_pxh(struct pci_dev *dev) | 1405 | static void __devinit quirk_pcie_pxh(struct pci_dev *dev) |
1364 | { | 1406 | { |
1365 | disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), | 1407 | disable_msi_mode(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), |
1366 | PCI_CAP_ID_MSI); | 1408 | PCI_CAP_ID_MSI); |
1367 | dev->no_msi = 1; | 1409 | dev->no_msi = 1; |
1368 | 1410 | ||
1369 | printk(KERN_WARNING "PCI: PXH quirk detected, " | 1411 | printk(KERN_WARNING "PCI: PXH quirk detected, " |
1370 | "disabling MSI for SHPC device\n"); | 1412 | "disabling MSI for SHPC device\n"); |
1371 | } | 1413 | } |
1372 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_0, quirk_pcie_pxh); | 1414 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_0, quirk_pcie_pxh); |
1373 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_1, quirk_pcie_pxh); | 1415 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_1, quirk_pcie_pxh); |
1374 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_pcie_pxh); | 1416 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_pcie_pxh); |
1375 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pcie_pxh); | 1417 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pcie_pxh); |
1376 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh); | 1418 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh); |
1377 | 1419 | ||
1378 | /* | 1420 | /* |
1379 | * Some Intel PCI Express chipsets have trouble with downstream | 1421 | * Some Intel PCI Express chipsets have trouble with downstream |
1380 | * device power management. | 1422 | * device power management. |
1381 | */ | 1423 | */ |
1382 | static void quirk_intel_pcie_pm(struct pci_dev * dev) | 1424 | static void quirk_intel_pcie_pm(struct pci_dev * dev) |
1383 | { | 1425 | { |
1384 | pci_pm_d3_delay = 120; | 1426 | pci_pm_d3_delay = 120; |
1385 | dev->no_d1d2 = 1; | 1427 | dev->no_d1d2 = 1; |
1386 | } | 1428 | } |
1387 | 1429 | ||
1388 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm); | 1430 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm); |
1389 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_pcie_pm); | 1431 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_pcie_pm); |
1390 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_pcie_pm); | 1432 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_pcie_pm); |
1391 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_pcie_pm); | 1433 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_pcie_pm); |
1392 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_pcie_pm); | 1434 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_pcie_pm); |
1393 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_pcie_pm); | 1435 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_pcie_pm); |
1394 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_pcie_pm); | 1436 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_pcie_pm); |
1395 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_pcie_pm); | 1437 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_pcie_pm); |
1396 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_pcie_pm); | 1438 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_pcie_pm); |
1397 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_pcie_pm); | 1439 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_pcie_pm); |
1398 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2601, quirk_intel_pcie_pm); | 1440 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2601, quirk_intel_pcie_pm); |
1399 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2602, quirk_intel_pcie_pm); | 1441 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2602, quirk_intel_pcie_pm); |
1400 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2603, quirk_intel_pcie_pm); | 1442 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2603, quirk_intel_pcie_pm); |
1401 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2604, quirk_intel_pcie_pm); | 1443 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2604, quirk_intel_pcie_pm); |
1402 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2605, quirk_intel_pcie_pm); | 1444 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2605, quirk_intel_pcie_pm); |
1403 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2606, quirk_intel_pcie_pm); | 1445 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2606, quirk_intel_pcie_pm); |
1404 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2607, quirk_intel_pcie_pm); | 1446 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2607, quirk_intel_pcie_pm); |
1405 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2608, quirk_intel_pcie_pm); | 1447 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2608, quirk_intel_pcie_pm); |
1406 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm); | 1448 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm); |
1407 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); | 1449 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); |
1408 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); | 1450 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); |
1409 | 1451 | ||
1410 | static void __devinit quirk_netmos(struct pci_dev *dev) | 1452 | static void __devinit quirk_netmos(struct pci_dev *dev) |
1411 | { | 1453 | { |
1412 | unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4; | 1454 | unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4; |
1413 | unsigned int num_serial = dev->subsystem_device & 0xf; | 1455 | unsigned int num_serial = dev->subsystem_device & 0xf; |
1414 | 1456 | ||
1415 | /* | 1457 | /* |
1416 | * These Netmos parts are multiport serial devices with optional | 1458 | * These Netmos parts are multiport serial devices with optional |
1417 | * parallel ports. Even when parallel ports are present, they | 1459 | * parallel ports. Even when parallel ports are present, they |
1418 | * are identified as class SERIAL, which means the serial driver | 1460 | * are identified as class SERIAL, which means the serial driver |
1419 | * will claim them. To prevent this, mark them as class OTHER. | 1461 | * will claim them. To prevent this, mark them as class OTHER. |
1420 | * These combo devices should be claimed by parport_serial. | 1462 | * These combo devices should be claimed by parport_serial. |
1421 | * | 1463 | * |
1422 | * The subdevice ID is of the form 0x00PS, where <P> is the number | 1464 | * The subdevice ID is of the form 0x00PS, where <P> is the number |
1423 | * of parallel ports and <S> is the number of serial ports. | 1465 | * of parallel ports and <S> is the number of serial ports. |
1424 | */ | 1466 | */ |
1425 | switch (dev->device) { | 1467 | switch (dev->device) { |
1426 | case PCI_DEVICE_ID_NETMOS_9735: | 1468 | case PCI_DEVICE_ID_NETMOS_9735: |
1427 | case PCI_DEVICE_ID_NETMOS_9745: | 1469 | case PCI_DEVICE_ID_NETMOS_9745: |
1428 | case PCI_DEVICE_ID_NETMOS_9835: | 1470 | case PCI_DEVICE_ID_NETMOS_9835: |
1429 | case PCI_DEVICE_ID_NETMOS_9845: | 1471 | case PCI_DEVICE_ID_NETMOS_9845: |
1430 | case PCI_DEVICE_ID_NETMOS_9855: | 1472 | case PCI_DEVICE_ID_NETMOS_9855: |
1431 | if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_SERIAL && | 1473 | if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_SERIAL && |
1432 | num_parallel) { | 1474 | num_parallel) { |
1433 | printk(KERN_INFO "PCI: Netmos %04x (%u parallel, " | 1475 | printk(KERN_INFO "PCI: Netmos %04x (%u parallel, " |
1434 | "%u serial); changing class SERIAL to OTHER " | 1476 | "%u serial); changing class SERIAL to OTHER " |
1435 | "(use parport_serial)\n", | 1477 | "(use parport_serial)\n", |
1436 | dev->device, num_parallel, num_serial); | 1478 | dev->device, num_parallel, num_serial); |
1437 | dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) | | 1479 | dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) | |
1438 | (dev->class & 0xff); | 1480 | (dev->class & 0xff); |
1439 | } | 1481 | } |
1440 | } | 1482 | } |
1441 | } | 1483 | } |
1442 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos); | 1484 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos); |
1443 | 1485 | ||
1444 | static void __devinit quirk_e100_interrupt(struct pci_dev *dev) | 1486 | static void __devinit quirk_e100_interrupt(struct pci_dev *dev) |
1445 | { | 1487 | { |
1446 | u16 command; | 1488 | u16 command; |
1447 | u32 bar; | 1489 | u32 bar; |
1448 | u8 __iomem *csr; | 1490 | u8 __iomem *csr; |
1449 | u8 cmd_hi; | 1491 | u8 cmd_hi; |
1450 | 1492 | ||
1451 | switch (dev->device) { | 1493 | switch (dev->device) { |
1452 | /* PCI IDs taken from drivers/net/e100.c */ | 1494 | /* PCI IDs taken from drivers/net/e100.c */ |
1453 | case 0x1029: | 1495 | case 0x1029: |
1454 | case 0x1030 ... 0x1034: | 1496 | case 0x1030 ... 0x1034: |
1455 | case 0x1038 ... 0x103E: | 1497 | case 0x1038 ... 0x103E: |
1456 | case 0x1050 ... 0x1057: | 1498 | case 0x1050 ... 0x1057: |
1457 | case 0x1059: | 1499 | case 0x1059: |
1458 | case 0x1064 ... 0x106B: | 1500 | case 0x1064 ... 0x106B: |
1459 | case 0x1091 ... 0x1095: | 1501 | case 0x1091 ... 0x1095: |
1460 | case 0x1209: | 1502 | case 0x1209: |
1461 | case 0x1229: | 1503 | case 0x1229: |
1462 | case 0x2449: | 1504 | case 0x2449: |
1463 | case 0x2459: | 1505 | case 0x2459: |
1464 | case 0x245D: | 1506 | case 0x245D: |
1465 | case 0x27DC: | 1507 | case 0x27DC: |
1466 | break; | 1508 | break; |
1467 | default: | 1509 | default: |
1468 | return; | 1510 | return; |
1469 | } | 1511 | } |
1470 | 1512 | ||
1471 | /* | 1513 | /* |
1472 | * Some firmware hands off the e100 with interrupts enabled, | 1514 | * Some firmware hands off the e100 with interrupts enabled, |
1473 | * which can cause a flood of interrupts if packets are | 1515 | * which can cause a flood of interrupts if packets are |
1474 | * received before the driver attaches to the device. So | 1516 | * received before the driver attaches to the device. So |
1475 | * disable all e100 interrupts here. The driver will | 1517 | * disable all e100 interrupts here. The driver will |
1476 | * re-enable them when it's ready. | 1518 | * re-enable them when it's ready. |
1477 | */ | 1519 | */ |
1478 | pci_read_config_word(dev, PCI_COMMAND, &command); | 1520 | pci_read_config_word(dev, PCI_COMMAND, &command); |
1479 | pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &bar); | 1521 | pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &bar); |
1480 | 1522 | ||
1481 | if (!(command & PCI_COMMAND_MEMORY) || !bar) | 1523 | if (!(command & PCI_COMMAND_MEMORY) || !bar) |
1482 | return; | 1524 | return; |
1483 | 1525 | ||
1484 | csr = ioremap(bar, 8); | 1526 | csr = ioremap(bar, 8); |
1485 | if (!csr) { | 1527 | if (!csr) { |
1486 | printk(KERN_WARNING "PCI: Can't map %s e100 registers\n", | 1528 | printk(KERN_WARNING "PCI: Can't map %s e100 registers\n", |
1487 | pci_name(dev)); | 1529 | pci_name(dev)); |
1488 | return; | 1530 | return; |
1489 | } | 1531 | } |
1490 | 1532 | ||
1491 | cmd_hi = readb(csr + 3); | 1533 | cmd_hi = readb(csr + 3); |
1492 | if (cmd_hi == 0) { | 1534 | if (cmd_hi == 0) { |
1493 | printk(KERN_WARNING "PCI: Firmware left %s e100 interrupts " | 1535 | printk(KERN_WARNING "PCI: Firmware left %s e100 interrupts " |
1494 | "enabled, disabling\n", pci_name(dev)); | 1536 | "enabled, disabling\n", pci_name(dev)); |
1495 | writeb(1, csr + 3); | 1537 | writeb(1, csr + 3); |
1496 | } | 1538 | } |
1497 | 1539 | ||
1498 | iounmap(csr); | 1540 | iounmap(csr); |
1499 | } | 1541 | } |
1500 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_e100_interrupt); | 1542 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_e100_interrupt); |
1501 | 1543 | ||
1502 | static void __devinit fixup_rev1_53c810(struct pci_dev* dev) | 1544 | static void __devinit fixup_rev1_53c810(struct pci_dev* dev) |
1503 | { | 1545 | { |
1504 | /* rev 1 ncr53c810 chips don't set the class at all which means | 1546 | /* rev 1 ncr53c810 chips don't set the class at all which means |
1505 | * they don't get their resources remapped. Fix that here. | 1547 | * they don't get their resources remapped. Fix that here. |
1506 | */ | 1548 | */ |
1507 | 1549 | ||
1508 | if (dev->class == PCI_CLASS_NOT_DEFINED) { | 1550 | if (dev->class == PCI_CLASS_NOT_DEFINED) { |
1509 | printk(KERN_INFO "NCR 53c810 rev 1 detected, setting PCI class.\n"); | 1551 | printk(KERN_INFO "NCR 53c810 rev 1 detected, setting PCI class.\n"); |
1510 | dev->class = PCI_CLASS_STORAGE_SCSI; | 1552 | dev->class = PCI_CLASS_STORAGE_SCSI; |
1511 | } | 1553 | } |
1512 | } | 1554 | } |
1513 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810); | 1555 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810); |
1514 | 1556 | ||
1515 | static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) | 1557 | static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) |
1516 | { | 1558 | { |
1517 | while (f < end) { | 1559 | while (f < end) { |
1518 | if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) && | 1560 | if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) && |
1519 | (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) { | 1561 | (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) { |
1520 | pr_debug("PCI: Calling quirk %p for %s\n", f->hook, pci_name(dev)); | 1562 | pr_debug("PCI: Calling quirk %p for %s\n", f->hook, pci_name(dev)); |
1521 | f->hook(dev); | 1563 | f->hook(dev); |
1522 | } | 1564 | } |
1523 | f++; | 1565 | f++; |
1524 | } | 1566 | } |
1525 | } | 1567 | } |
1526 | 1568 | ||
1527 | extern struct pci_fixup __start_pci_fixups_early[]; | 1569 | extern struct pci_fixup __start_pci_fixups_early[]; |
1528 | extern struct pci_fixup __end_pci_fixups_early[]; | 1570 | extern struct pci_fixup __end_pci_fixups_early[]; |
1529 | extern struct pci_fixup __start_pci_fixups_header[]; | 1571 | extern struct pci_fixup __start_pci_fixups_header[]; |
1530 | extern struct pci_fixup __end_pci_fixups_header[]; | 1572 | extern struct pci_fixup __end_pci_fixups_header[]; |
1531 | extern struct pci_fixup __start_pci_fixups_final[]; | 1573 | extern struct pci_fixup __start_pci_fixups_final[]; |
1532 | extern struct pci_fixup __end_pci_fixups_final[]; | 1574 | extern struct pci_fixup __end_pci_fixups_final[]; |
1533 | extern struct pci_fixup __start_pci_fixups_enable[]; | 1575 | extern struct pci_fixup __start_pci_fixups_enable[]; |
1534 | extern struct pci_fixup __end_pci_fixups_enable[]; | 1576 | extern struct pci_fixup __end_pci_fixups_enable[]; |
1577 | extern struct pci_fixup __start_pci_fixups_resume[]; | ||
1578 | extern struct pci_fixup __end_pci_fixups_resume[]; | ||
1535 | 1579 | ||
1536 | 1580 | ||
1537 | void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) | 1581 | void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) |
1538 | { | 1582 | { |
1539 | struct pci_fixup *start, *end; | 1583 | struct pci_fixup *start, *end; |
1540 | 1584 | ||
1541 | switch(pass) { | 1585 | switch(pass) { |
1542 | case pci_fixup_early: | 1586 | case pci_fixup_early: |
1543 | start = __start_pci_fixups_early; | 1587 | start = __start_pci_fixups_early; |
1544 | end = __end_pci_fixups_early; | 1588 | end = __end_pci_fixups_early; |
1545 | break; | 1589 | break; |
1546 | 1590 | ||
1547 | case pci_fixup_header: | 1591 | case pci_fixup_header: |
1548 | start = __start_pci_fixups_header; | 1592 | start = __start_pci_fixups_header; |
1549 | end = __end_pci_fixups_header; | 1593 | end = __end_pci_fixups_header; |
1550 | break; | 1594 | break; |
1551 | 1595 | ||
1552 | case pci_fixup_final: | 1596 | case pci_fixup_final: |
1553 | start = __start_pci_fixups_final; | 1597 | start = __start_pci_fixups_final; |
1554 | end = __end_pci_fixups_final; | 1598 | end = __end_pci_fixups_final; |
1555 | break; | 1599 | break; |
1556 | 1600 | ||
1557 | case pci_fixup_enable: | 1601 | case pci_fixup_enable: |
1558 | start = __start_pci_fixups_enable; | 1602 | start = __start_pci_fixups_enable; |
1559 | end = __end_pci_fixups_enable; | 1603 | end = __end_pci_fixups_enable; |
1560 | break; | 1604 | break; |
1561 | 1605 | ||
1606 | case pci_fixup_resume: | ||
1607 | start = __start_pci_fixups_resume; | ||
1608 | end = __end_pci_fixups_resume; | ||
1609 | break; | ||
1610 | |||
1562 | default: | 1611 | default: |
1563 | /* stupid compiler warning, you would think with an enum... */ | 1612 | /* stupid compiler warning, you would think with an enum... */ |
1564 | return; | 1613 | return; |
1565 | } | 1614 | } |
1566 | pci_do_fixups(dev, start, end); | 1615 | pci_do_fixups(dev, start, end); |
1567 | } | 1616 | } |
1568 | 1617 | ||
1569 | /* Enable 1k I/O space granularity on the Intel P64H2 */ | 1618 | /* Enable 1k I/O space granularity on the Intel P64H2 */ |
1570 | static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev) | 1619 | static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev) |
1571 | { | 1620 | { |
1572 | u16 en1k; | 1621 | u16 en1k; |
1573 | u8 io_base_lo, io_limit_lo; | 1622 | u8 io_base_lo, io_limit_lo; |
1574 | unsigned long base, limit; | 1623 | unsigned long base, limit; |
1575 | struct resource *res = dev->resource + PCI_BRIDGE_RESOURCES; | 1624 | struct resource *res = dev->resource + PCI_BRIDGE_RESOURCES; |
1576 | 1625 | ||
1577 | pci_read_config_word(dev, 0x40, &en1k); | 1626 | pci_read_config_word(dev, 0x40, &en1k); |
1578 | 1627 | ||
1579 | if (en1k & 0x200) { | 1628 | if (en1k & 0x200) { |
1580 | printk(KERN_INFO "PCI: Enable I/O Space to 1 KB Granularity\n"); | 1629 | printk(KERN_INFO "PCI: Enable I/O Space to 1 KB Granularity\n"); |
1581 | 1630 | ||
1582 | pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); | 1631 | pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); |
1583 | pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); | 1632 | pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); |
1584 | base = (io_base_lo & (PCI_IO_RANGE_MASK | 0x0c)) << 8; | 1633 | base = (io_base_lo & (PCI_IO_RANGE_MASK | 0x0c)) << 8; |
1585 | limit = (io_limit_lo & (PCI_IO_RANGE_MASK | 0x0c)) << 8; | 1634 | limit = (io_limit_lo & (PCI_IO_RANGE_MASK | 0x0c)) << 8; |
1586 | 1635 | ||
1587 | if (base <= limit) { | 1636 | if (base <= limit) { |
1588 | res->start = base; | 1637 | res->start = base; |
1589 | res->end = limit + 0x3ff; | 1638 | res->end = limit + 0x3ff; |
1590 | } | 1639 | } |
1591 | } | 1640 | } |
1592 | } | 1641 | } |
1593 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io); | 1642 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io); |
1594 | 1643 | ||
1595 | /* Under some circumstances, AER is not linked with extended capabilities. | 1644 | /* Under some circumstances, AER is not linked with extended capabilities. |
1596 | * Force it to be linked by setting the corresponding control bit in the | 1645 | * Force it to be linked by setting the corresponding control bit in the |
1597 | * config space. | 1646 | * config space. |
1598 | */ | 1647 | */ |
1599 | static void __devinit quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev) | 1648 | static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev) |
1600 | { | 1649 | { |
1601 | uint8_t b; | 1650 | uint8_t b; |
1602 | if (pci_read_config_byte(dev, 0xf41, &b) == 0) { | 1651 | if (pci_read_config_byte(dev, 0xf41, &b) == 0) { |
1603 | if (!(b & 0x20)) { | 1652 | if (!(b & 0x20)) { |
1604 | pci_write_config_byte(dev, 0xf41, b | 0x20); | 1653 | pci_write_config_byte(dev, 0xf41, b | 0x20); |
1605 | printk(KERN_INFO | 1654 | printk(KERN_INFO |
1606 | "PCI: Linking AER extended capability on %s\n", | 1655 | "PCI: Linking AER extended capability on %s\n", |
1607 | pci_name(dev)); | 1656 | pci_name(dev)); |
1608 | } | 1657 | } |
1609 | } | 1658 | } |
1610 | } | 1659 | } |
1611 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, | 1660 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, |
1661 | quirk_nvidia_ck804_pcie_aer_ext_cap); | ||
1662 | DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, | ||
1612 | quirk_nvidia_ck804_pcie_aer_ext_cap); | 1663 | quirk_nvidia_ck804_pcie_aer_ext_cap); |
1613 | 1664 | ||
1614 | #ifdef CONFIG_PCI_MSI | 1665 | #ifdef CONFIG_PCI_MSI |
1615 | /* To disable MSI globally */ | 1666 | /* To disable MSI globally */ |
1616 | int pci_msi_quirk; | 1667 | int pci_msi_quirk; |
1617 | 1668 | ||
1618 | /* The Serverworks PCI-X chipset does not support MSI. We cannot easily rely | 1669 | /* The Serverworks PCI-X chipset does not support MSI. We cannot easily rely |
1619 | * on setting PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually | 1670 | * on setting PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually |
1620 | * some other busses controlled by the chipset even if Linux is not aware of it. | 1671 | * some other busses controlled by the chipset even if Linux is not aware of it. |
1621 | * Instead of setting the flag on all busses in the machine, simply disable MSI | 1672 | * Instead of setting the flag on all busses in the machine, simply disable MSI |
1622 | * globally. | 1673 | * globally. |
1623 | */ | 1674 | */ |
1624 | static void __init quirk_svw_msi(struct pci_dev *dev) | 1675 | static void __init quirk_svw_msi(struct pci_dev *dev) |
1625 | { | 1676 | { |
1626 | pci_msi_quirk = 1; | 1677 | pci_msi_quirk = 1; |
1627 | printk(KERN_WARNING "PCI: MSI quirk detected. pci_msi_quirk set.\n"); | 1678 | printk(KERN_WARNING "PCI: MSI quirk detected. pci_msi_quirk set.\n"); |
1628 | } | 1679 | } |
1629 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_svw_msi); | 1680 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_svw_msi); |
1630 | 1681 | ||
1631 | /* Disable MSI on chipsets that are known to not support it */ | 1682 | /* Disable MSI on chipsets that are known to not support it */ |
1632 | static void __devinit quirk_disable_msi(struct pci_dev *dev) | 1683 | static void __devinit quirk_disable_msi(struct pci_dev *dev) |
1633 | { | 1684 | { |
1634 | if (dev->subordinate) { | 1685 | if (dev->subordinate) { |
1635 | printk(KERN_WARNING "PCI: MSI quirk detected. " | 1686 | printk(KERN_WARNING "PCI: MSI quirk detected. " |
1636 | "PCI_BUS_FLAGS_NO_MSI set for %s subordinate bus.\n", | 1687 | "PCI_BUS_FLAGS_NO_MSI set for %s subordinate bus.\n", |
1637 | pci_name(dev)); | 1688 | pci_name(dev)); |
1638 | dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; | 1689 | dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; |
1639 | } | 1690 | } |
1640 | } | 1691 | } |
1641 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi); | 1692 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi); |
1642 | 1693 | ||
1643 | /* Go through the list of Hypertransport capabilities and | 1694 | /* Go through the list of Hypertransport capabilities and |
1644 | * return 1 if a HT MSI capability is found and enabled */ | 1695 | * return 1 if a HT MSI capability is found and enabled */ |
1645 | static int __devinit msi_ht_cap_enabled(struct pci_dev *dev) | 1696 | static int __devinit msi_ht_cap_enabled(struct pci_dev *dev) |
1646 | { | 1697 | { |
1647 | int pos, ttl = 48; | 1698 | int pos, ttl = 48; |
1648 | 1699 | ||
1649 | pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); | 1700 | pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); |
1650 | while (pos && ttl--) { | 1701 | while (pos && ttl--) { |
1651 | u8 flags; | 1702 | u8 flags; |
1652 | 1703 | ||
1653 | if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, | 1704 | if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, |
1654 | &flags) == 0) | 1705 | &flags) == 0) |
1655 | { | 1706 | { |
1656 | printk(KERN_INFO "PCI: Found %s HT MSI Mapping on %s\n", | 1707 | printk(KERN_INFO "PCI: Found %s HT MSI Mapping on %s\n", |
1657 | flags & HT_MSI_FLAGS_ENABLE ? | 1708 | flags & HT_MSI_FLAGS_ENABLE ? |
1658 | "enabled" : "disabled", pci_name(dev)); | 1709 | "enabled" : "disabled", pci_name(dev)); |
1659 | return (flags & HT_MSI_FLAGS_ENABLE) != 0; | 1710 | return (flags & HT_MSI_FLAGS_ENABLE) != 0; |
1660 | } | 1711 | } |
1661 | 1712 | ||
1662 | pos = pci_find_next_ht_capability(dev, pos, | 1713 | pos = pci_find_next_ht_capability(dev, pos, |
1663 | HT_CAPTYPE_MSI_MAPPING); | 1714 | HT_CAPTYPE_MSI_MAPPING); |
1664 | } | 1715 | } |
1665 | return 0; | 1716 | return 0; |
1666 | } | 1717 | } |
1667 | 1718 | ||
1668 | /* Check the hypertransport MSI mapping to know whether MSI is enabled or not */ | 1719 | /* Check the hypertransport MSI mapping to know whether MSI is enabled or not */ |
1669 | static void __devinit quirk_msi_ht_cap(struct pci_dev *dev) | 1720 | static void __devinit quirk_msi_ht_cap(struct pci_dev *dev) |
1670 | { | 1721 | { |
1671 | if (dev->subordinate && !msi_ht_cap_enabled(dev)) { | 1722 | if (dev->subordinate && !msi_ht_cap_enabled(dev)) { |
1672 | printk(KERN_WARNING "PCI: MSI quirk detected. " | 1723 | printk(KERN_WARNING "PCI: MSI quirk detected. " |
1673 | "MSI disabled on chipset %s.\n", | 1724 | "MSI disabled on chipset %s.\n", |
1674 | pci_name(dev)); | 1725 | pci_name(dev)); |
1675 | dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; | 1726 | dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; |
1676 | } | 1727 | } |
1677 | } | 1728 | } |
1678 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE, | 1729 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE, |
1679 | quirk_msi_ht_cap); | 1730 | quirk_msi_ht_cap); |
1680 | 1731 | ||
1681 | /* The nVidia CK804 chipset may have 2 HT MSI mappings. | 1732 | /* The nVidia CK804 chipset may have 2 HT MSI mappings. |
1682 | * MSI are supported if the MSI capability set in any of these mappings. | 1733 | * MSI are supported if the MSI capability set in any of these mappings. |
1683 | */ | 1734 | */ |
1684 | static void __devinit quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev) | 1735 | static void __devinit quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev) |
1685 | { | 1736 | { |
drivers/pci/search.c
1 | /* | 1 | /* |
2 | * PCI searching functions. | 2 | * PCI searching functions. |
3 | * | 3 | * |
4 | * Copyright (C) 1993 -- 1997 Drew Eckhardt, Frederic Potter, | 4 | * Copyright (C) 1993 -- 1997 Drew Eckhardt, Frederic Potter, |
5 | * David Mosberger-Tang | 5 | * David Mosberger-Tang |
6 | * Copyright (C) 1997 -- 2000 Martin Mares <mj@ucw.cz> | 6 | * Copyright (C) 1997 -- 2000 Martin Mares <mj@ucw.cz> |
7 | * Copyright (C) 2003 -- 2004 Greg Kroah-Hartman <greg@kroah.com> | 7 | * Copyright (C) 2003 -- 2004 Greg Kroah-Hartman <greg@kroah.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
14 | #include "pci.h" | 14 | #include "pci.h" |
15 | 15 | ||
16 | DECLARE_RWSEM(pci_bus_sem); | 16 | DECLARE_RWSEM(pci_bus_sem); |
17 | 17 | ||
18 | static struct pci_bus * __devinit | 18 | static struct pci_bus * __devinit |
19 | pci_do_find_bus(struct pci_bus* bus, unsigned char busnr) | 19 | pci_do_find_bus(struct pci_bus* bus, unsigned char busnr) |
20 | { | 20 | { |
21 | struct pci_bus* child; | 21 | struct pci_bus* child; |
22 | struct list_head *tmp; | 22 | struct list_head *tmp; |
23 | 23 | ||
24 | if(bus->number == busnr) | 24 | if(bus->number == busnr) |
25 | return bus; | 25 | return bus; |
26 | 26 | ||
27 | list_for_each(tmp, &bus->children) { | 27 | list_for_each(tmp, &bus->children) { |
28 | child = pci_do_find_bus(pci_bus_b(tmp), busnr); | 28 | child = pci_do_find_bus(pci_bus_b(tmp), busnr); |
29 | if(child) | 29 | if(child) |
30 | return child; | 30 | return child; |
31 | } | 31 | } |
32 | return NULL; | 32 | return NULL; |
33 | } | 33 | } |
34 | 34 | ||
35 | /** | 35 | /** |
36 | * pci_find_bus - locate PCI bus from a given domain and bus number | 36 | * pci_find_bus - locate PCI bus from a given domain and bus number |
37 | * @domain: number of PCI domain to search | 37 | * @domain: number of PCI domain to search |
38 | * @busnr: number of desired PCI bus | 38 | * @busnr: number of desired PCI bus |
39 | * | 39 | * |
40 | * Given a PCI bus number and domain number, the desired PCI bus is located | 40 | * Given a PCI bus number and domain number, the desired PCI bus is located |
41 | * in the global list of PCI buses. If the bus is found, a pointer to its | 41 | * in the global list of PCI buses. If the bus is found, a pointer to its |
42 | * data structure is returned. If no bus is found, %NULL is returned. | 42 | * data structure is returned. If no bus is found, %NULL is returned. |
43 | */ | 43 | */ |
44 | struct pci_bus * pci_find_bus(int domain, int busnr) | 44 | struct pci_bus * pci_find_bus(int domain, int busnr) |
45 | { | 45 | { |
46 | struct pci_bus *bus = NULL; | 46 | struct pci_bus *bus = NULL; |
47 | struct pci_bus *tmp_bus; | 47 | struct pci_bus *tmp_bus; |
48 | 48 | ||
49 | while ((bus = pci_find_next_bus(bus)) != NULL) { | 49 | while ((bus = pci_find_next_bus(bus)) != NULL) { |
50 | if (pci_domain_nr(bus) != domain) | 50 | if (pci_domain_nr(bus) != domain) |
51 | continue; | 51 | continue; |
52 | tmp_bus = pci_do_find_bus(bus, busnr); | 52 | tmp_bus = pci_do_find_bus(bus, busnr); |
53 | if (tmp_bus) | 53 | if (tmp_bus) |
54 | return tmp_bus; | 54 | return tmp_bus; |
55 | } | 55 | } |
56 | return NULL; | 56 | return NULL; |
57 | } | 57 | } |
58 | 58 | ||
59 | /** | 59 | /** |
60 | * pci_find_next_bus - begin or continue searching for a PCI bus | 60 | * pci_find_next_bus - begin or continue searching for a PCI bus |
61 | * @from: Previous PCI bus found, or %NULL for new search. | 61 | * @from: Previous PCI bus found, or %NULL for new search. |
62 | * | 62 | * |
63 | * Iterates through the list of known PCI busses. A new search is | 63 | * Iterates through the list of known PCI busses. A new search is |
64 | * initiated by passing %NULL as the @from argument. Otherwise if | 64 | * initiated by passing %NULL as the @from argument. Otherwise if |
65 | * @from is not %NULL, searches continue from next device on the | 65 | * @from is not %NULL, searches continue from next device on the |
66 | * global list. | 66 | * global list. |
67 | */ | 67 | */ |
68 | struct pci_bus * | 68 | struct pci_bus * |
69 | pci_find_next_bus(const struct pci_bus *from) | 69 | pci_find_next_bus(const struct pci_bus *from) |
70 | { | 70 | { |
71 | struct list_head *n; | 71 | struct list_head *n; |
72 | struct pci_bus *b = NULL; | 72 | struct pci_bus *b = NULL; |
73 | 73 | ||
74 | WARN_ON(in_interrupt()); | 74 | WARN_ON(in_interrupt()); |
75 | down_read(&pci_bus_sem); | 75 | down_read(&pci_bus_sem); |
76 | n = from ? from->node.next : pci_root_buses.next; | 76 | n = from ? from->node.next : pci_root_buses.next; |
77 | if (n != &pci_root_buses) | 77 | if (n != &pci_root_buses) |
78 | b = pci_bus_b(n); | 78 | b = pci_bus_b(n); |
79 | up_read(&pci_bus_sem); | 79 | up_read(&pci_bus_sem); |
80 | return b; | 80 | return b; |
81 | } | 81 | } |
82 | 82 | ||
83 | /** | 83 | /** |
84 | * pci_find_slot - locate PCI device from a given PCI slot | 84 | * pci_find_slot - locate PCI device from a given PCI slot |
85 | * @bus: number of PCI bus on which desired PCI device resides | 85 | * @bus: number of PCI bus on which desired PCI device resides |
86 | * @devfn: encodes number of PCI slot in which the desired PCI | 86 | * @devfn: encodes number of PCI slot in which the desired PCI |
87 | * device resides and the logical device number within that slot | 87 | * device resides and the logical device number within that slot |
88 | * in case of multi-function devices. | 88 | * in case of multi-function devices. |
89 | * | 89 | * |
90 | * Given a PCI bus and slot/function number, the desired PCI device | 90 | * Given a PCI bus and slot/function number, the desired PCI device |
91 | * is located in system global list of PCI devices. If the device | 91 | * is located in system global list of PCI devices. If the device |
92 | * is found, a pointer to its data structure is returned. If no | 92 | * is found, a pointer to its data structure is returned. If no |
93 | * device is found, %NULL is returned. | 93 | * device is found, %NULL is returned. |
94 | */ | 94 | */ |
95 | struct pci_dev * | 95 | struct pci_dev * |
96 | pci_find_slot(unsigned int bus, unsigned int devfn) | 96 | pci_find_slot(unsigned int bus, unsigned int devfn) |
97 | { | 97 | { |
98 | struct pci_dev *dev = NULL; | 98 | struct pci_dev *dev = NULL; |
99 | 99 | ||
100 | while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | 100 | while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { |
101 | if (dev->bus->number == bus && dev->devfn == devfn) | 101 | if (dev->bus->number == bus && dev->devfn == devfn) |
102 | return dev; | 102 | return dev; |
103 | } | 103 | } |
104 | return NULL; | 104 | return NULL; |
105 | } | 105 | } |
106 | 106 | ||
107 | /** | 107 | /** |
108 | * pci_get_slot - locate PCI device for a given PCI slot | 108 | * pci_get_slot - locate PCI device for a given PCI slot |
109 | * @bus: PCI bus on which desired PCI device resides | 109 | * @bus: PCI bus on which desired PCI device resides |
110 | * @devfn: encodes number of PCI slot in which the desired PCI | 110 | * @devfn: encodes number of PCI slot in which the desired PCI |
111 | * device resides and the logical device number within that slot | 111 | * device resides and the logical device number within that slot |
112 | * in case of multi-function devices. | 112 | * in case of multi-function devices. |
113 | * | 113 | * |
114 | * Given a PCI bus and slot/function number, the desired PCI device | 114 | * Given a PCI bus and slot/function number, the desired PCI device |
115 | * is located in the list of PCI devices. | 115 | * is located in the list of PCI devices. |
116 | * If the device is found, its reference count is increased and this | 116 | * If the device is found, its reference count is increased and this |
117 | * function returns a pointer to its data structure. The caller must | 117 | * function returns a pointer to its data structure. The caller must |
118 | * decrement the reference count by calling pci_dev_put(). | 118 | * decrement the reference count by calling pci_dev_put(). |
119 | * If no device is found, %NULL is returned. | 119 | * If no device is found, %NULL is returned. |
120 | */ | 120 | */ |
121 | struct pci_dev * pci_get_slot(struct pci_bus *bus, unsigned int devfn) | 121 | struct pci_dev * pci_get_slot(struct pci_bus *bus, unsigned int devfn) |
122 | { | 122 | { |
123 | struct list_head *tmp; | 123 | struct list_head *tmp; |
124 | struct pci_dev *dev; | 124 | struct pci_dev *dev; |
125 | 125 | ||
126 | WARN_ON(in_interrupt()); | 126 | WARN_ON(in_interrupt()); |
127 | down_read(&pci_bus_sem); | 127 | down_read(&pci_bus_sem); |
128 | 128 | ||
129 | list_for_each(tmp, &bus->devices) { | 129 | list_for_each(tmp, &bus->devices) { |
130 | dev = pci_dev_b(tmp); | 130 | dev = pci_dev_b(tmp); |
131 | if (dev->devfn == devfn) | 131 | if (dev->devfn == devfn) |
132 | goto out; | 132 | goto out; |
133 | } | 133 | } |
134 | 134 | ||
135 | dev = NULL; | 135 | dev = NULL; |
136 | out: | 136 | out: |
137 | pci_dev_get(dev); | 137 | pci_dev_get(dev); |
138 | up_read(&pci_bus_sem); | 138 | up_read(&pci_bus_sem); |
139 | return dev; | 139 | return dev; |
140 | } | 140 | } |
141 | 141 | ||
142 | /** | 142 | /** |
143 | * pci_get_bus_and_slot - locate PCI device from a given PCI slot | 143 | * pci_get_bus_and_slot - locate PCI device from a given PCI slot |
144 | * @bus: number of PCI bus on which desired PCI device resides | 144 | * @bus: number of PCI bus on which desired PCI device resides |
145 | * @devfn: encodes number of PCI slot in which the desired PCI | 145 | * @devfn: encodes number of PCI slot in which the desired PCI |
146 | * device resides and the logical device number within that slot | 146 | * device resides and the logical device number within that slot |
147 | * in case of multi-function devices. | 147 | * in case of multi-function devices. |
148 | * | 148 | * |
149 | * Given a PCI bus and slot/function number, the desired PCI device | 149 | * Given a PCI bus and slot/function number, the desired PCI device |
150 | * is located in system global list of PCI devices. If the device | 150 | * is located in system global list of PCI devices. If the device |
151 | * is found, a pointer to its data structure is returned. If no | 151 | * is found, a pointer to its data structure is returned. If no |
152 | * device is found, %NULL is returned. The returned device has its | 152 | * device is found, %NULL is returned. The returned device has its |
153 | * reference count bumped by one. | 153 | * reference count bumped by one. |
154 | */ | 154 | */ |
155 | 155 | ||
156 | struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) | 156 | struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) |
157 | { | 157 | { |
158 | struct pci_dev *dev = NULL; | 158 | struct pci_dev *dev = NULL; |
159 | 159 | ||
160 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | 160 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { |
161 | if (dev->bus->number == bus && dev->devfn == devfn) | 161 | if (dev->bus->number == bus && dev->devfn == devfn) |
162 | return dev; | 162 | return dev; |
163 | } | 163 | } |
164 | return NULL; | 164 | return NULL; |
165 | } | 165 | } |
166 | 166 | ||
167 | /** | 167 | /** |
168 | * pci_find_subsys - begin or continue searching for a PCI device by vendor/subvendor/device/subdevice id | 168 | * pci_find_subsys - begin or continue searching for a PCI device by vendor/subvendor/device/subdevice id |
169 | * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids | 169 | * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids |
170 | * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids | 170 | * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids |
171 | * @ss_vendor: PCI subsystem vendor id to match, or %PCI_ANY_ID to match all vendor ids | 171 | * @ss_vendor: PCI subsystem vendor id to match, or %PCI_ANY_ID to match all vendor ids |
172 | * @ss_device: PCI subsystem device id to match, or %PCI_ANY_ID to match all device ids | 172 | * @ss_device: PCI subsystem device id to match, or %PCI_ANY_ID to match all device ids |
173 | * @from: Previous PCI device found in search, or %NULL for new search. | 173 | * @from: Previous PCI device found in search, or %NULL for new search. |
174 | * | 174 | * |
175 | * Iterates through the list of known PCI devices. If a PCI device is | 175 | * Iterates through the list of known PCI devices. If a PCI device is |
176 | * found with a matching @vendor, @device, @ss_vendor and @ss_device, a | 176 | * found with a matching @vendor, @device, @ss_vendor and @ss_device, a |
177 | * pointer to its device structure is returned. Otherwise, %NULL is returned. | 177 | * pointer to its device structure is returned. Otherwise, %NULL is returned. |
178 | * A new search is initiated by passing %NULL as the @from argument. | 178 | * A new search is initiated by passing %NULL as the @from argument. |
179 | * Otherwise if @from is not %NULL, searches continue from next device | 179 | * Otherwise if @from is not %NULL, searches continue from next device |
180 | * on the global list. | 180 | * on the global list. |
181 | * | 181 | * |
182 | * NOTE: Do not use this function any more; use pci_get_subsys() instead, as | 182 | * NOTE: Do not use this function any more; use pci_get_subsys() instead, as |
183 | * the PCI device returned by this function can disappear at any moment in | 183 | * the PCI device returned by this function can disappear at any moment in |
184 | * time. | 184 | * time. |
185 | */ | 185 | */ |
186 | static struct pci_dev * pci_find_subsys(unsigned int vendor, | 186 | static struct pci_dev * pci_find_subsys(unsigned int vendor, |
187 | unsigned int device, | 187 | unsigned int device, |
188 | unsigned int ss_vendor, | 188 | unsigned int ss_vendor, |
189 | unsigned int ss_device, | 189 | unsigned int ss_device, |
190 | const struct pci_dev *from) | 190 | const struct pci_dev *from) |
191 | { | 191 | { |
192 | struct list_head *n; | 192 | struct list_head *n; |
193 | struct pci_dev *dev; | 193 | struct pci_dev *dev; |
194 | 194 | ||
195 | WARN_ON(in_interrupt()); | 195 | WARN_ON(in_interrupt()); |
196 | down_read(&pci_bus_sem); | 196 | down_read(&pci_bus_sem); |
197 | n = from ? from->global_list.next : pci_devices.next; | 197 | n = from ? from->global_list.next : pci_devices.next; |
198 | 198 | ||
199 | while (n && (n != &pci_devices)) { | 199 | while (n && (n != &pci_devices)) { |
200 | dev = pci_dev_g(n); | 200 | dev = pci_dev_g(n); |
201 | if ((vendor == PCI_ANY_ID || dev->vendor == vendor) && | 201 | if ((vendor == PCI_ANY_ID || dev->vendor == vendor) && |
202 | (device == PCI_ANY_ID || dev->device == device) && | 202 | (device == PCI_ANY_ID || dev->device == device) && |
203 | (ss_vendor == PCI_ANY_ID || dev->subsystem_vendor == ss_vendor) && | 203 | (ss_vendor == PCI_ANY_ID || dev->subsystem_vendor == ss_vendor) && |
204 | (ss_device == PCI_ANY_ID || dev->subsystem_device == ss_device)) | 204 | (ss_device == PCI_ANY_ID || dev->subsystem_device == ss_device)) |
205 | goto exit; | 205 | goto exit; |
206 | n = n->next; | 206 | n = n->next; |
207 | } | 207 | } |
208 | dev = NULL; | 208 | dev = NULL; |
209 | exit: | 209 | exit: |
210 | up_read(&pci_bus_sem); | 210 | up_read(&pci_bus_sem); |
211 | return dev; | 211 | return dev; |
212 | } | 212 | } |
213 | 213 | ||
214 | /** | 214 | /** |
215 | * pci_find_device - begin or continue searching for a PCI device by vendor/device id | 215 | * pci_find_device - begin or continue searching for a PCI device by vendor/device id |
216 | * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids | 216 | * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids |
217 | * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids | 217 | * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids |
218 | * @from: Previous PCI device found in search, or %NULL for new search. | 218 | * @from: Previous PCI device found in search, or %NULL for new search. |
219 | * | 219 | * |
220 | * Iterates through the list of known PCI devices. If a PCI device is found | 220 | * Iterates through the list of known PCI devices. If a PCI device is found |
221 | * with a matching @vendor and @device, a pointer to its device structure is | 221 | * with a matching @vendor and @device, a pointer to its device structure is |
222 | * returned. Otherwise, %NULL is returned. | 222 | * returned. Otherwise, %NULL is returned. |
223 | * A new search is initiated by passing %NULL as the @from argument. | 223 | * A new search is initiated by passing %NULL as the @from argument. |
224 | * Otherwise if @from is not %NULL, searches continue from next device | 224 | * Otherwise if @from is not %NULL, searches continue from next device |
225 | * on the global list. | 225 | * on the global list. |
226 | * | 226 | * |
227 | * NOTE: Do not use this function any more; use pci_get_device() instead, as | 227 | * NOTE: Do not use this function any more; use pci_get_device() instead, as |
228 | * the PCI device returned by this function can disappear at any moment in | 228 | * the PCI device returned by this function can disappear at any moment in |
229 | * time. | 229 | * time. |
230 | */ | 230 | */ |
231 | struct pci_dev * | 231 | struct pci_dev * |
232 | pci_find_device(unsigned int vendor, unsigned int device, const struct pci_dev *from) | 232 | pci_find_device(unsigned int vendor, unsigned int device, const struct pci_dev *from) |
233 | { | 233 | { |
234 | return pci_find_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); | 234 | return pci_find_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); |
235 | } | 235 | } |
236 | 236 | ||
237 | /** | 237 | /** |
238 | * pci_get_subsys - begin or continue searching for a PCI device by vendor/subvendor/device/subdevice id | 238 | * pci_get_subsys - begin or continue searching for a PCI device by vendor/subvendor/device/subdevice id |
239 | * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids | 239 | * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids |
240 | * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids | 240 | * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids |
241 | * @ss_vendor: PCI subsystem vendor id to match, or %PCI_ANY_ID to match all vendor ids | 241 | * @ss_vendor: PCI subsystem vendor id to match, or %PCI_ANY_ID to match all vendor ids |
242 | * @ss_device: PCI subsystem device id to match, or %PCI_ANY_ID to match all device ids | 242 | * @ss_device: PCI subsystem device id to match, or %PCI_ANY_ID to match all device ids |
243 | * @from: Previous PCI device found in search, or %NULL for new search. | 243 | * @from: Previous PCI device found in search, or %NULL for new search. |
244 | * | 244 | * |
245 | * Iterates through the list of known PCI devices. If a PCI device is found | 245 | * Iterates through the list of known PCI devices. If a PCI device is found |
246 | * with a matching @vendor, @device, @ss_vendor and @ss_device, a pointer to its | 246 | * with a matching @vendor, @device, @ss_vendor and @ss_device, a pointer to its |
247 | * device structure is returned, and the reference count to the device is | 247 | * device structure is returned, and the reference count to the device is |
248 | * incremented. Otherwise, %NULL is returned. A new search is initiated by | 248 | * incremented. Otherwise, %NULL is returned. A new search is initiated by |
249 | * passing %NULL as the @from argument. Otherwise if @from is not %NULL, | 249 | * passing %NULL as the @from argument. Otherwise if @from is not %NULL, |
250 | * searches continue from next device on the global list. | 250 | * searches continue from next device on the global list. |
251 | * The reference count for @from is always decremented if it is not %NULL. | 251 | * The reference count for @from is always decremented if it is not %NULL. |
252 | */ | 252 | */ |
253 | struct pci_dev * | 253 | struct pci_dev * |
254 | pci_get_subsys(unsigned int vendor, unsigned int device, | 254 | pci_get_subsys(unsigned int vendor, unsigned int device, |
255 | unsigned int ss_vendor, unsigned int ss_device, | 255 | unsigned int ss_vendor, unsigned int ss_device, |
256 | struct pci_dev *from) | 256 | struct pci_dev *from) |
257 | { | 257 | { |
258 | struct list_head *n; | 258 | struct list_head *n; |
259 | struct pci_dev *dev; | 259 | struct pci_dev *dev; |
260 | 260 | ||
261 | WARN_ON(in_interrupt()); | 261 | WARN_ON(in_interrupt()); |
262 | down_read(&pci_bus_sem); | 262 | down_read(&pci_bus_sem); |
263 | n = from ? from->global_list.next : pci_devices.next; | 263 | n = from ? from->global_list.next : pci_devices.next; |
264 | 264 | ||
265 | while (n && (n != &pci_devices)) { | 265 | while (n && (n != &pci_devices)) { |
266 | dev = pci_dev_g(n); | 266 | dev = pci_dev_g(n); |
267 | if ((vendor == PCI_ANY_ID || dev->vendor == vendor) && | 267 | if ((vendor == PCI_ANY_ID || dev->vendor == vendor) && |
268 | (device == PCI_ANY_ID || dev->device == device) && | 268 | (device == PCI_ANY_ID || dev->device == device) && |
269 | (ss_vendor == PCI_ANY_ID || dev->subsystem_vendor == ss_vendor) && | 269 | (ss_vendor == PCI_ANY_ID || dev->subsystem_vendor == ss_vendor) && |
270 | (ss_device == PCI_ANY_ID || dev->subsystem_device == ss_device)) | 270 | (ss_device == PCI_ANY_ID || dev->subsystem_device == ss_device)) |
271 | goto exit; | 271 | goto exit; |
272 | n = n->next; | 272 | n = n->next; |
273 | } | 273 | } |
274 | dev = NULL; | 274 | dev = NULL; |
275 | exit: | 275 | exit: |
276 | dev = pci_dev_get(dev); | 276 | dev = pci_dev_get(dev); |
277 | up_read(&pci_bus_sem); | 277 | up_read(&pci_bus_sem); |
278 | pci_dev_put(from); | 278 | pci_dev_put(from); |
279 | return dev; | 279 | return dev; |
280 | } | 280 | } |
281 | 281 | ||
282 | /** | 282 | /** |
283 | * pci_get_device - begin or continue searching for a PCI device by vendor/device id | 283 | * pci_get_device - begin or continue searching for a PCI device by vendor/device id |
284 | * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids | 284 | * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids |
285 | * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids | 285 | * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids |
286 | * @from: Previous PCI device found in search, or %NULL for new search. | 286 | * @from: Previous PCI device found in search, or %NULL for new search. |
287 | * | 287 | * |
288 | * Iterates through the list of known PCI devices. If a PCI device is | 288 | * Iterates through the list of known PCI devices. If a PCI device is |
289 | * found with a matching @vendor and @device, the reference count to the | 289 | * found with a matching @vendor and @device, the reference count to the |
290 | * device is incremented and a pointer to its device structure is returned. | 290 | * device is incremented and a pointer to its device structure is returned. |
291 | * Otherwise, %NULL is returned. A new search is initiated by passing %NULL | 291 | * Otherwise, %NULL is returned. A new search is initiated by passing %NULL |
292 | * as the @from argument. Otherwise if @from is not %NULL, searches continue | 292 | * as the @from argument. Otherwise if @from is not %NULL, searches continue |
293 | * from next device on the global list. The reference count for @from is | 293 | * from next device on the global list. The reference count for @from is |
294 | * always decremented if it is not %NULL. | 294 | * always decremented if it is not %NULL. |
295 | */ | 295 | */ |
296 | struct pci_dev * | 296 | struct pci_dev * |
297 | pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) | 297 | pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) |
298 | { | 298 | { |
299 | return pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); | 299 | return pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); |
300 | } | 300 | } |
301 | 301 | ||
302 | /** | 302 | /** |
303 | * pci_get_device_reverse - begin or continue searching for a PCI device by vendor/device id | 303 | * pci_get_device_reverse - begin or continue searching for a PCI device by vendor/device id |
304 | * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids | 304 | * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids |
305 | * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids | 305 | * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids |
306 | * @from: Previous PCI device found in search, or %NULL for new search. | 306 | * @from: Previous PCI device found in search, or %NULL for new search. |
307 | * | 307 | * |
308 | * Iterates through the list of known PCI devices in the reverse order of | 308 | * Iterates through the list of known PCI devices in the reverse order of |
309 | * pci_get_device. | 309 | * pci_get_device. |
310 | * If a PCI device is found with a matching @vendor and @device, the reference | 310 | * If a PCI device is found with a matching @vendor and @device, the reference |
311 | * count to the device is incremented and a pointer to its device structure | 311 | * count to the device is incremented and a pointer to its device structure |
312 | * is returned Otherwise, %NULL is returned. A new search is initiated by | 312 | * is returned Otherwise, %NULL is returned. A new search is initiated by |
313 | * passing %NULL as the @from argument. Otherwise if @from is not %NULL, | 313 | * passing %NULL as the @from argument. Otherwise if @from is not %NULL, |
314 | * searches continue from next device on the global list. The reference | 314 | * searches continue from next device on the global list. The reference |
315 | * count for @from is always decremented if it is not %NULL. | 315 | * count for @from is always decremented if it is not %NULL. |
316 | */ | 316 | */ |
317 | struct pci_dev * | 317 | struct pci_dev * |
318 | pci_get_device_reverse(unsigned int vendor, unsigned int device, struct pci_dev *from) | 318 | pci_get_device_reverse(unsigned int vendor, unsigned int device, struct pci_dev *from) |
319 | { | 319 | { |
320 | struct list_head *n; | 320 | struct list_head *n; |
321 | struct pci_dev *dev; | 321 | struct pci_dev *dev; |
322 | 322 | ||
323 | WARN_ON(in_interrupt()); | 323 | WARN_ON(in_interrupt()); |
324 | down_read(&pci_bus_sem); | 324 | down_read(&pci_bus_sem); |
325 | n = from ? from->global_list.prev : pci_devices.prev; | 325 | n = from ? from->global_list.prev : pci_devices.prev; |
326 | 326 | ||
327 | while (n && (n != &pci_devices)) { | 327 | while (n && (n != &pci_devices)) { |
328 | dev = pci_dev_g(n); | 328 | dev = pci_dev_g(n); |
329 | if ((vendor == PCI_ANY_ID || dev->vendor == vendor) && | 329 | if ((vendor == PCI_ANY_ID || dev->vendor == vendor) && |
330 | (device == PCI_ANY_ID || dev->device == device)) | 330 | (device == PCI_ANY_ID || dev->device == device)) |
331 | goto exit; | 331 | goto exit; |
332 | n = n->prev; | 332 | n = n->prev; |
333 | } | 333 | } |
334 | dev = NULL; | 334 | dev = NULL; |
335 | exit: | 335 | exit: |
336 | dev = pci_dev_get(dev); | 336 | dev = pci_dev_get(dev); |
337 | up_read(&pci_bus_sem); | 337 | up_read(&pci_bus_sem); |
338 | pci_dev_put(from); | 338 | pci_dev_put(from); |
339 | return dev; | 339 | return dev; |
340 | } | 340 | } |
341 | 341 | ||
342 | /** | 342 | /** |
343 | * pci_find_device_reverse - begin or continue searching for a PCI device by vendor/device id | 343 | * pci_find_device_reverse - begin or continue searching for a PCI device by vendor/device id |
344 | * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids | 344 | * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids |
345 | * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids | 345 | * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids |
346 | * @from: Previous PCI device found in search, or %NULL for new search. | 346 | * @from: Previous PCI device found in search, or %NULL for new search. |
347 | * | 347 | * |
348 | * Iterates through the list of known PCI devices in the reverse order of | 348 | * Iterates through the list of known PCI devices in the reverse order of |
349 | * pci_find_device(). | 349 | * pci_find_device(). |
350 | * If a PCI device is found with a matching @vendor and @device, a pointer to | 350 | * If a PCI device is found with a matching @vendor and @device, a pointer to |
351 | * its device structure is returned. Otherwise, %NULL is returned. | 351 | * its device structure is returned. Otherwise, %NULL is returned. |
352 | * A new search is initiated by passing %NULL as the @from argument. | 352 | * A new search is initiated by passing %NULL as the @from argument. |
353 | * Otherwise if @from is not %NULL, searches continue from previous device | 353 | * Otherwise if @from is not %NULL, searches continue from previous device |
354 | * on the global list. | 354 | * on the global list. |
355 | */ | 355 | */ |
356 | struct pci_dev * | 356 | struct pci_dev * |
357 | pci_find_device_reverse(unsigned int vendor, unsigned int device, const struct pci_dev *from) | 357 | pci_find_device_reverse(unsigned int vendor, unsigned int device, const struct pci_dev *from) |
358 | { | 358 | { |
359 | struct list_head *n; | 359 | struct list_head *n; |
360 | struct pci_dev *dev; | 360 | struct pci_dev *dev; |
361 | 361 | ||
362 | WARN_ON(in_interrupt()); | 362 | WARN_ON(in_interrupt()); |
363 | down_read(&pci_bus_sem); | 363 | down_read(&pci_bus_sem); |
364 | n = from ? from->global_list.prev : pci_devices.prev; | 364 | n = from ? from->global_list.prev : pci_devices.prev; |
365 | 365 | ||
366 | while (n && (n != &pci_devices)) { | 366 | while (n && (n != &pci_devices)) { |
367 | dev = pci_dev_g(n); | 367 | dev = pci_dev_g(n); |
368 | if ((vendor == PCI_ANY_ID || dev->vendor == vendor) && | 368 | if ((vendor == PCI_ANY_ID || dev->vendor == vendor) && |
369 | (device == PCI_ANY_ID || dev->device == device)) | 369 | (device == PCI_ANY_ID || dev->device == device)) |
370 | goto exit; | 370 | goto exit; |
371 | n = n->prev; | 371 | n = n->prev; |
372 | } | 372 | } |
373 | dev = NULL; | 373 | dev = NULL; |
374 | exit: | 374 | exit: |
375 | up_read(&pci_bus_sem); | 375 | up_read(&pci_bus_sem); |
376 | return dev; | 376 | return dev; |
377 | } | 377 | } |
378 | 378 | ||
379 | /** | 379 | /** |
380 | * pci_get_class - begin or continue searching for a PCI device by class | 380 | * pci_get_class - begin or continue searching for a PCI device by class |
381 | * @class: search for a PCI device with this class designation | 381 | * @class: search for a PCI device with this class designation |
382 | * @from: Previous PCI device found in search, or %NULL for new search. | 382 | * @from: Previous PCI device found in search, or %NULL for new search. |
383 | * | 383 | * |
384 | * Iterates through the list of known PCI devices. If a PCI device is | 384 | * Iterates through the list of known PCI devices. If a PCI device is |
385 | * found with a matching @class, the reference count to the device is | 385 | * found with a matching @class, the reference count to the device is |
386 | * incremented and a pointer to its device structure is returned. | 386 | * incremented and a pointer to its device structure is returned. |
387 | * Otherwise, %NULL is returned. | 387 | * Otherwise, %NULL is returned. |
388 | * A new search is initiated by passing %NULL as the @from argument. | 388 | * A new search is initiated by passing %NULL as the @from argument. |
389 | * Otherwise if @from is not %NULL, searches continue from next device | 389 | * Otherwise if @from is not %NULL, searches continue from next device |
390 | * on the global list. The reference count for @from is always decremented | 390 | * on the global list. The reference count for @from is always decremented |
391 | * if it is not %NULL. | 391 | * if it is not %NULL. |
392 | */ | 392 | */ |
393 | struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) | 393 | struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) |
394 | { | 394 | { |
395 | struct list_head *n; | 395 | struct list_head *n; |
396 | struct pci_dev *dev; | 396 | struct pci_dev *dev; |
397 | 397 | ||
398 | WARN_ON(in_interrupt()); | 398 | WARN_ON(in_interrupt()); |
399 | down_read(&pci_bus_sem); | 399 | down_read(&pci_bus_sem); |
400 | n = from ? from->global_list.next : pci_devices.next; | 400 | n = from ? from->global_list.next : pci_devices.next; |
401 | 401 | ||
402 | while (n && (n != &pci_devices)) { | 402 | while (n && (n != &pci_devices)) { |
403 | dev = pci_dev_g(n); | 403 | dev = pci_dev_g(n); |
404 | if (dev->class == class) | 404 | if (dev->class == class) |
405 | goto exit; | 405 | goto exit; |
406 | n = n->next; | 406 | n = n->next; |
407 | } | 407 | } |
408 | dev = NULL; | 408 | dev = NULL; |
409 | exit: | 409 | exit: |
410 | dev = pci_dev_get(dev); | 410 | dev = pci_dev_get(dev); |
411 | up_read(&pci_bus_sem); | 411 | up_read(&pci_bus_sem); |
412 | pci_dev_put(from); | 412 | pci_dev_put(from); |
413 | return dev; | 413 | return dev; |
414 | } | 414 | } |
415 | 415 | ||
416 | const struct pci_device_id *pci_find_present(const struct pci_device_id *ids) | 416 | const struct pci_device_id *pci_find_present(const struct pci_device_id *ids) |
417 | { | 417 | { |
418 | struct pci_dev *dev; | 418 | struct pci_dev *dev; |
419 | struct pci_device_id * found = NULL; | 419 | const struct pci_device_id *found = NULL; |
420 | 420 | ||
421 | WARN_ON(in_interrupt()); | 421 | WARN_ON(in_interrupt()); |
422 | down_read(&pci_bus_sem); | 422 | down_read(&pci_bus_sem); |
423 | while (ids->vendor || ids->subvendor || ids->class_mask) { | 423 | while (ids->vendor || ids->subvendor || ids->class_mask) { |
424 | list_for_each_entry(dev, &pci_devices, global_list) { | 424 | list_for_each_entry(dev, &pci_devices, global_list) { |
425 | if ((found = pci_match_one_device(ids, dev)) != NULL) | 425 | if ((found = pci_match_one_device(ids, dev)) != NULL) |
426 | break; | 426 | break; |
427 | } | 427 | } |
428 | ids++; | 428 | ids++; |
429 | } | 429 | } |
430 | up_read(&pci_bus_sem); | 430 | up_read(&pci_bus_sem); |
431 | return found; | 431 | return found; |
432 | } | 432 | } |
433 | 433 | ||
434 | /** | 434 | /** |
435 | * pci_dev_present - Returns 1 if device matching the device list is present, 0 if not. | 435 | * pci_dev_present - Returns 1 if device matching the device list is present, 0 if not. |
436 | * @ids: A pointer to a null terminated list of struct pci_device_id structures | 436 | * @ids: A pointer to a null terminated list of struct pci_device_id structures |
437 | * that describe the type of PCI device the caller is trying to find. | 437 | * that describe the type of PCI device the caller is trying to find. |
438 | * | 438 | * |
439 | * Obvious fact: You do not have a reference to any device that might be found | 439 | * Obvious fact: You do not have a reference to any device that might be found |
440 | * by this function, so if that device is removed from the system right after | 440 | * by this function, so if that device is removed from the system right after |
441 | * this function is finished, the value will be stale. Use this function to | 441 | * this function is finished, the value will be stale. Use this function to |
442 | * find devices that are usually built into a system, or for a general hint as | 442 | * find devices that are usually built into a system, or for a general hint as |
443 | * to if another device happens to be present at this specific moment in time. | 443 | * to if another device happens to be present at this specific moment in time. |
444 | */ | 444 | */ |
445 | |||
446 | int pci_dev_present(const struct pci_device_id *ids) | 445 | int pci_dev_present(const struct pci_device_id *ids) |
447 | { | 446 | { |
448 | return pci_find_present(ids) == NULL ? 0 : 1; | 447 | return pci_find_present(ids) == NULL ? 0 : 1; |
449 | } | 448 | } |
450 | 449 | ||
451 | EXPORT_SYMBOL(pci_dev_present); | 450 | EXPORT_SYMBOL(pci_dev_present); |
452 | EXPORT_SYMBOL(pci_find_present); | 451 | EXPORT_SYMBOL(pci_find_present); |
453 | 452 | ||
454 | EXPORT_SYMBOL(pci_find_device); | 453 | EXPORT_SYMBOL(pci_find_device); |
455 | EXPORT_SYMBOL(pci_find_device_reverse); | 454 | EXPORT_SYMBOL(pci_find_device_reverse); |
456 | EXPORT_SYMBOL(pci_find_slot); | 455 | EXPORT_SYMBOL(pci_find_slot); |
457 | /* For boot time work */ | 456 | /* For boot time work */ |
458 | EXPORT_SYMBOL(pci_find_bus); | 457 | EXPORT_SYMBOL(pci_find_bus); |
459 | EXPORT_SYMBOL(pci_find_next_bus); | 458 | EXPORT_SYMBOL(pci_find_next_bus); |
460 | /* For everyone */ | 459 | /* For everyone */ |
461 | EXPORT_SYMBOL(pci_get_device); | 460 | EXPORT_SYMBOL(pci_get_device); |
462 | EXPORT_SYMBOL(pci_get_device_reverse); | 461 | EXPORT_SYMBOL(pci_get_device_reverse); |
463 | EXPORT_SYMBOL(pci_get_subsys); | 462 | EXPORT_SYMBOL(pci_get_subsys); |
464 | EXPORT_SYMBOL(pci_get_slot); | 463 | EXPORT_SYMBOL(pci_get_slot); |
465 | EXPORT_SYMBOL(pci_get_bus_and_slot); | 464 | EXPORT_SYMBOL(pci_get_bus_and_slot); |
466 | EXPORT_SYMBOL(pci_get_class); | 465 | EXPORT_SYMBOL(pci_get_class); |
467 | 466 |
include/asm-generic/vmlinux.lds.h
1 | #ifndef LOAD_OFFSET | 1 | #ifndef LOAD_OFFSET |
2 | #define LOAD_OFFSET 0 | 2 | #define LOAD_OFFSET 0 |
3 | #endif | 3 | #endif |
4 | 4 | ||
5 | #ifndef VMLINUX_SYMBOL | 5 | #ifndef VMLINUX_SYMBOL |
6 | #define VMLINUX_SYMBOL(_sym_) _sym_ | 6 | #define VMLINUX_SYMBOL(_sym_) _sym_ |
7 | #endif | 7 | #endif |
8 | 8 | ||
9 | /* Align . to a 8 byte boundary equals to maximum function alignment. */ | 9 | /* Align . to a 8 byte boundary equals to maximum function alignment. */ |
10 | #define ALIGN_FUNCTION() . = ALIGN(8) | 10 | #define ALIGN_FUNCTION() . = ALIGN(8) |
11 | 11 | ||
12 | #define RODATA \ | 12 | #define RODATA \ |
13 | . = ALIGN(4096); \ | 13 | . = ALIGN(4096); \ |
14 | .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ | 14 | .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ |
15 | VMLINUX_SYMBOL(__start_rodata) = .; \ | 15 | VMLINUX_SYMBOL(__start_rodata) = .; \ |
16 | *(.rodata) *(.rodata.*) \ | 16 | *(.rodata) *(.rodata.*) \ |
17 | *(__vermagic) /* Kernel version magic */ \ | 17 | *(__vermagic) /* Kernel version magic */ \ |
18 | } \ | 18 | } \ |
19 | \ | 19 | \ |
20 | .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ | 20 | .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \ |
21 | *(.rodata1) \ | 21 | *(.rodata1) \ |
22 | } \ | 22 | } \ |
23 | \ | 23 | \ |
24 | /* PCI quirks */ \ | 24 | /* PCI quirks */ \ |
25 | .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ | 25 | .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ |
26 | VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ | 26 | VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ |
27 | *(.pci_fixup_early) \ | 27 | *(.pci_fixup_early) \ |
28 | VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ | 28 | VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \ |
29 | VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ | 29 | VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \ |
30 | *(.pci_fixup_header) \ | 30 | *(.pci_fixup_header) \ |
31 | VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ | 31 | VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \ |
32 | VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ | 32 | VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \ |
33 | *(.pci_fixup_final) \ | 33 | *(.pci_fixup_final) \ |
34 | VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ | 34 | VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \ |
35 | VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ | 35 | VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \ |
36 | *(.pci_fixup_enable) \ | 36 | *(.pci_fixup_enable) \ |
37 | VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ | 37 | VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \ |
38 | VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \ | ||
39 | *(.pci_fixup_resume) \ | ||
40 | VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \ | ||
38 | } \ | 41 | } \ |
39 | \ | 42 | \ |
40 | /* RapidIO route ops */ \ | 43 | /* RapidIO route ops */ \ |
41 | .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \ | 44 | .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \ |
42 | VMLINUX_SYMBOL(__start_rio_route_ops) = .; \ | 45 | VMLINUX_SYMBOL(__start_rio_route_ops) = .; \ |
43 | *(.rio_route_ops) \ | 46 | *(.rio_route_ops) \ |
44 | VMLINUX_SYMBOL(__end_rio_route_ops) = .; \ | 47 | VMLINUX_SYMBOL(__end_rio_route_ops) = .; \ |
45 | } \ | 48 | } \ |
46 | \ | 49 | \ |
47 | /* Kernel symbol table: Normal symbols */ \ | 50 | /* Kernel symbol table: Normal symbols */ \ |
48 | __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ | 51 | __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ |
49 | VMLINUX_SYMBOL(__start___ksymtab) = .; \ | 52 | VMLINUX_SYMBOL(__start___ksymtab) = .; \ |
50 | *(__ksymtab) \ | 53 | *(__ksymtab) \ |
51 | VMLINUX_SYMBOL(__stop___ksymtab) = .; \ | 54 | VMLINUX_SYMBOL(__stop___ksymtab) = .; \ |
52 | } \ | 55 | } \ |
53 | \ | 56 | \ |
54 | /* Kernel symbol table: GPL-only symbols */ \ | 57 | /* Kernel symbol table: GPL-only symbols */ \ |
55 | __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ | 58 | __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ |
56 | VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ | 59 | VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ |
57 | *(__ksymtab_gpl) \ | 60 | *(__ksymtab_gpl) \ |
58 | VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ | 61 | VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ |
59 | } \ | 62 | } \ |
60 | \ | 63 | \ |
61 | /* Kernel symbol table: Normal unused symbols */ \ | 64 | /* Kernel symbol table: Normal unused symbols */ \ |
62 | __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ | 65 | __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ |
63 | VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ | 66 | VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ |
64 | *(__ksymtab_unused) \ | 67 | *(__ksymtab_unused) \ |
65 | VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ | 68 | VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ |
66 | } \ | 69 | } \ |
67 | \ | 70 | \ |
68 | /* Kernel symbol table: GPL-only unused symbols */ \ | 71 | /* Kernel symbol table: GPL-only unused symbols */ \ |
69 | __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ | 72 | __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ |
70 | VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ | 73 | VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ |
71 | *(__ksymtab_unused_gpl) \ | 74 | *(__ksymtab_unused_gpl) \ |
72 | VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ | 75 | VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ |
73 | } \ | 76 | } \ |
74 | \ | 77 | \ |
75 | /* Kernel symbol table: GPL-future-only symbols */ \ | 78 | /* Kernel symbol table: GPL-future-only symbols */ \ |
76 | __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ | 79 | __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ |
77 | VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ | 80 | VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ |
78 | *(__ksymtab_gpl_future) \ | 81 | *(__ksymtab_gpl_future) \ |
79 | VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ | 82 | VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ |
80 | } \ | 83 | } \ |
81 | \ | 84 | \ |
82 | /* Kernel symbol table: Normal symbols */ \ | 85 | /* Kernel symbol table: Normal symbols */ \ |
83 | __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ | 86 | __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ |
84 | VMLINUX_SYMBOL(__start___kcrctab) = .; \ | 87 | VMLINUX_SYMBOL(__start___kcrctab) = .; \ |
85 | *(__kcrctab) \ | 88 | *(__kcrctab) \ |
86 | VMLINUX_SYMBOL(__stop___kcrctab) = .; \ | 89 | VMLINUX_SYMBOL(__stop___kcrctab) = .; \ |
87 | } \ | 90 | } \ |
88 | \ | 91 | \ |
89 | /* Kernel symbol table: GPL-only symbols */ \ | 92 | /* Kernel symbol table: GPL-only symbols */ \ |
90 | __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ | 93 | __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ |
91 | VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ | 94 | VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ |
92 | *(__kcrctab_gpl) \ | 95 | *(__kcrctab_gpl) \ |
93 | VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ | 96 | VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ |
94 | } \ | 97 | } \ |
95 | \ | 98 | \ |
96 | /* Kernel symbol table: Normal unused symbols */ \ | 99 | /* Kernel symbol table: Normal unused symbols */ \ |
97 | __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ | 100 | __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ |
98 | VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ | 101 | VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ |
99 | *(__kcrctab_unused) \ | 102 | *(__kcrctab_unused) \ |
100 | VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ | 103 | VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ |
101 | } \ | 104 | } \ |
102 | \ | 105 | \ |
103 | /* Kernel symbol table: GPL-only unused symbols */ \ | 106 | /* Kernel symbol table: GPL-only unused symbols */ \ |
104 | __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ | 107 | __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ |
105 | VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ | 108 | VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ |
106 | *(__kcrctab_unused_gpl) \ | 109 | *(__kcrctab_unused_gpl) \ |
107 | VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ | 110 | VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ |
108 | } \ | 111 | } \ |
109 | \ | 112 | \ |
110 | /* Kernel symbol table: GPL-future-only symbols */ \ | 113 | /* Kernel symbol table: GPL-future-only symbols */ \ |
111 | __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ | 114 | __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ |
112 | VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ | 115 | VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ |
113 | *(__kcrctab_gpl_future) \ | 116 | *(__kcrctab_gpl_future) \ |
114 | VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ | 117 | VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ |
115 | } \ | 118 | } \ |
116 | \ | 119 | \ |
117 | /* Kernel symbol table: strings */ \ | 120 | /* Kernel symbol table: strings */ \ |
118 | __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ | 121 | __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \ |
119 | *(__ksymtab_strings) \ | 122 | *(__ksymtab_strings) \ |
120 | } \ | 123 | } \ |
121 | \ | 124 | \ |
122 | /* Built-in module parameters. */ \ | 125 | /* Built-in module parameters. */ \ |
123 | __param : AT(ADDR(__param) - LOAD_OFFSET) { \ | 126 | __param : AT(ADDR(__param) - LOAD_OFFSET) { \ |
124 | VMLINUX_SYMBOL(__start___param) = .; \ | 127 | VMLINUX_SYMBOL(__start___param) = .; \ |
125 | *(__param) \ | 128 | *(__param) \ |
126 | VMLINUX_SYMBOL(__stop___param) = .; \ | 129 | VMLINUX_SYMBOL(__stop___param) = .; \ |
127 | VMLINUX_SYMBOL(__end_rodata) = .; \ | 130 | VMLINUX_SYMBOL(__end_rodata) = .; \ |
128 | } \ | 131 | } \ |
129 | \ | 132 | \ |
130 | . = ALIGN(4096); | 133 | . = ALIGN(4096); |
131 | 134 | ||
132 | #define SECURITY_INIT \ | 135 | #define SECURITY_INIT \ |
133 | .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ | 136 | .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ |
134 | VMLINUX_SYMBOL(__security_initcall_start) = .; \ | 137 | VMLINUX_SYMBOL(__security_initcall_start) = .; \ |
135 | *(.security_initcall.init) \ | 138 | *(.security_initcall.init) \ |
136 | VMLINUX_SYMBOL(__security_initcall_end) = .; \ | 139 | VMLINUX_SYMBOL(__security_initcall_end) = .; \ |
137 | } | 140 | } |
138 | 141 | ||
139 | /* sched.text is aling to function alignment to secure we have same | 142 | /* sched.text is aling to function alignment to secure we have same |
140 | * address even at second ld pass when generating System.map */ | 143 | * address even at second ld pass when generating System.map */ |
141 | #define SCHED_TEXT \ | 144 | #define SCHED_TEXT \ |
142 | ALIGN_FUNCTION(); \ | 145 | ALIGN_FUNCTION(); \ |
143 | VMLINUX_SYMBOL(__sched_text_start) = .; \ | 146 | VMLINUX_SYMBOL(__sched_text_start) = .; \ |
144 | *(.sched.text) \ | 147 | *(.sched.text) \ |
145 | VMLINUX_SYMBOL(__sched_text_end) = .; | 148 | VMLINUX_SYMBOL(__sched_text_end) = .; |
146 | 149 | ||
147 | /* spinlock.text is aling to function alignment to secure we have same | 150 | /* spinlock.text is aling to function alignment to secure we have same |
148 | * address even at second ld pass when generating System.map */ | 151 | * address even at second ld pass when generating System.map */ |
149 | #define LOCK_TEXT \ | 152 | #define LOCK_TEXT \ |
150 | ALIGN_FUNCTION(); \ | 153 | ALIGN_FUNCTION(); \ |
151 | VMLINUX_SYMBOL(__lock_text_start) = .; \ | 154 | VMLINUX_SYMBOL(__lock_text_start) = .; \ |
152 | *(.spinlock.text) \ | 155 | *(.spinlock.text) \ |
153 | VMLINUX_SYMBOL(__lock_text_end) = .; | 156 | VMLINUX_SYMBOL(__lock_text_end) = .; |
154 | 157 | ||
155 | #define KPROBES_TEXT \ | 158 | #define KPROBES_TEXT \ |
156 | ALIGN_FUNCTION(); \ | 159 | ALIGN_FUNCTION(); \ |
157 | VMLINUX_SYMBOL(__kprobes_text_start) = .; \ | 160 | VMLINUX_SYMBOL(__kprobes_text_start) = .; \ |
158 | *(.kprobes.text) \ | 161 | *(.kprobes.text) \ |
159 | VMLINUX_SYMBOL(__kprobes_text_end) = .; | 162 | VMLINUX_SYMBOL(__kprobes_text_end) = .; |
160 | 163 | ||
161 | /* DWARF debug sections. | 164 | /* DWARF debug sections. |
162 | Symbols in the DWARF debugging sections are relative to | 165 | Symbols in the DWARF debugging sections are relative to |
163 | the beginning of the section so we begin them at 0. */ | 166 | the beginning of the section so we begin them at 0. */ |
164 | #define DWARF_DEBUG \ | 167 | #define DWARF_DEBUG \ |
165 | /* DWARF 1 */ \ | 168 | /* DWARF 1 */ \ |
166 | .debug 0 : { *(.debug) } \ | 169 | .debug 0 : { *(.debug) } \ |
167 | .line 0 : { *(.line) } \ | 170 | .line 0 : { *(.line) } \ |
168 | /* GNU DWARF 1 extensions */ \ | 171 | /* GNU DWARF 1 extensions */ \ |
169 | .debug_srcinfo 0 : { *(.debug_srcinfo) } \ | 172 | .debug_srcinfo 0 : { *(.debug_srcinfo) } \ |
170 | .debug_sfnames 0 : { *(.debug_sfnames) } \ | 173 | .debug_sfnames 0 : { *(.debug_sfnames) } \ |
171 | /* DWARF 1.1 and DWARF 2 */ \ | 174 | /* DWARF 1.1 and DWARF 2 */ \ |
172 | .debug_aranges 0 : { *(.debug_aranges) } \ | 175 | .debug_aranges 0 : { *(.debug_aranges) } \ |
173 | .debug_pubnames 0 : { *(.debug_pubnames) } \ | 176 | .debug_pubnames 0 : { *(.debug_pubnames) } \ |
174 | /* DWARF 2 */ \ | 177 | /* DWARF 2 */ \ |
175 | .debug_info 0 : { *(.debug_info \ | 178 | .debug_info 0 : { *(.debug_info \ |
176 | .gnu.linkonce.wi.*) } \ | 179 | .gnu.linkonce.wi.*) } \ |
177 | .debug_abbrev 0 : { *(.debug_abbrev) } \ | 180 | .debug_abbrev 0 : { *(.debug_abbrev) } \ |
178 | .debug_line 0 : { *(.debug_line) } \ | 181 | .debug_line 0 : { *(.debug_line) } \ |
179 | .debug_frame 0 : { *(.debug_frame) } \ | 182 | .debug_frame 0 : { *(.debug_frame) } \ |
180 | .debug_str 0 : { *(.debug_str) } \ | 183 | .debug_str 0 : { *(.debug_str) } \ |
181 | .debug_loc 0 : { *(.debug_loc) } \ | 184 | .debug_loc 0 : { *(.debug_loc) } \ |
182 | .debug_macinfo 0 : { *(.debug_macinfo) } \ | 185 | .debug_macinfo 0 : { *(.debug_macinfo) } \ |
183 | /* SGI/MIPS DWARF 2 extensions */ \ | 186 | /* SGI/MIPS DWARF 2 extensions */ \ |
184 | .debug_weaknames 0 : { *(.debug_weaknames) } \ | 187 | .debug_weaknames 0 : { *(.debug_weaknames) } \ |
185 | .debug_funcnames 0 : { *(.debug_funcnames) } \ | 188 | .debug_funcnames 0 : { *(.debug_funcnames) } \ |
186 | .debug_typenames 0 : { *(.debug_typenames) } \ | 189 | .debug_typenames 0 : { *(.debug_typenames) } \ |
187 | .debug_varnames 0 : { *(.debug_varnames) } \ | 190 | .debug_varnames 0 : { *(.debug_varnames) } \ |
188 | 191 | ||
189 | /* Stabs debugging sections. */ | 192 | /* Stabs debugging sections. */ |
190 | #define STABS_DEBUG \ | 193 | #define STABS_DEBUG \ |
191 | .stab 0 : { *(.stab) } \ | 194 | .stab 0 : { *(.stab) } \ |
192 | .stabstr 0 : { *(.stabstr) } \ | 195 | .stabstr 0 : { *(.stabstr) } \ |
193 | .stab.excl 0 : { *(.stab.excl) } \ | 196 | .stab.excl 0 : { *(.stab.excl) } \ |
194 | .stab.exclstr 0 : { *(.stab.exclstr) } \ | 197 | .stab.exclstr 0 : { *(.stab.exclstr) } \ |
195 | .stab.index 0 : { *(.stab.index) } \ | 198 | .stab.index 0 : { *(.stab.index) } \ |
196 | .stab.indexstr 0 : { *(.stab.indexstr) } \ | 199 | .stab.indexstr 0 : { *(.stab.indexstr) } \ |
197 | .comment 0 : { *(.comment) } | 200 | .comment 0 : { *(.comment) } |
198 | 201 | ||
199 | #define BUG_TABLE \ | 202 | #define BUG_TABLE \ |
200 | . = ALIGN(8); \ | 203 | . = ALIGN(8); \ |
201 | __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ | 204 | __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \ |
202 | __start___bug_table = .; \ | 205 | __start___bug_table = .; \ |
203 | *(__bug_table) \ | 206 | *(__bug_table) \ |
204 | __stop___bug_table = .; \ | 207 | __stop___bug_table = .; \ |
205 | } | 208 | } |
206 | 209 | ||
207 | #define NOTES \ | 210 | #define NOTES \ |
208 | .notes : { *(.note.*) } :note | 211 | .notes : { *(.note.*) } :note |
209 | 212 | ||
210 | #define INITCALLS \ | 213 | #define INITCALLS \ |
211 | *(.initcall0.init) \ | 214 | *(.initcall0.init) \ |
212 | *(.initcall0s.init) \ | 215 | *(.initcall0s.init) \ |
213 | *(.initcall1.init) \ | 216 | *(.initcall1.init) \ |
214 | *(.initcall1s.init) \ | 217 | *(.initcall1s.init) \ |
215 | *(.initcall2.init) \ | 218 | *(.initcall2.init) \ |
216 | *(.initcall2s.init) \ | 219 | *(.initcall2s.init) \ |
217 | *(.initcall3.init) \ | 220 | *(.initcall3.init) \ |
218 | *(.initcall3s.init) \ | 221 | *(.initcall3s.init) \ |
219 | *(.initcall4.init) \ | 222 | *(.initcall4.init) \ |
220 | *(.initcall4s.init) \ | 223 | *(.initcall4s.init) \ |
221 | *(.initcall5.init) \ | 224 | *(.initcall5.init) \ |
222 | *(.initcall5s.init) \ | 225 | *(.initcall5s.init) \ |
223 | *(.initcallrootfs.init) \ | 226 | *(.initcallrootfs.init) \ |
224 | *(.initcall6.init) \ | 227 | *(.initcall6.init) \ |
225 | *(.initcall6s.init) \ | 228 | *(.initcall6s.init) \ |
226 | *(.initcall7.init) \ | 229 | *(.initcall7.init) \ |
227 | *(.initcall7s.init) | 230 | *(.initcall7s.init) |
228 | 231 | ||
229 | 232 |
include/linux/pci.h
1 | /* | 1 | /* |
2 | * pci.h | 2 | * pci.h |
3 | * | 3 | * |
4 | * PCI defines and function prototypes | 4 | * PCI defines and function prototypes |
5 | * Copyright 1994, Drew Eckhardt | 5 | * Copyright 1994, Drew Eckhardt |
6 | * Copyright 1997--1999 Martin Mares <mj@ucw.cz> | 6 | * Copyright 1997--1999 Martin Mares <mj@ucw.cz> |
7 | * | 7 | * |
8 | * For more information, please consult the following manuals (look at | 8 | * For more information, please consult the following manuals (look at |
9 | * http://www.pcisig.com/ for how to get them): | 9 | * http://www.pcisig.com/ for how to get them): |
10 | * | 10 | * |
11 | * PCI BIOS Specification | 11 | * PCI BIOS Specification |
12 | * PCI Local Bus Specification | 12 | * PCI Local Bus Specification |
13 | * PCI to PCI Bridge Specification | 13 | * PCI to PCI Bridge Specification |
14 | * PCI System Design Guide | 14 | * PCI System Design Guide |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #ifndef LINUX_PCI_H | 17 | #ifndef LINUX_PCI_H |
18 | #define LINUX_PCI_H | 18 | #define LINUX_PCI_H |
19 | 19 | ||
20 | /* Include the pci register defines */ | 20 | /* Include the pci register defines */ |
21 | #include <linux/pci_regs.h> | 21 | #include <linux/pci_regs.h> |
22 | 22 | ||
23 | /* Include the ID list */ | 23 | /* Include the ID list */ |
24 | #include <linux/pci_ids.h> | 24 | #include <linux/pci_ids.h> |
25 | 25 | ||
26 | /* | 26 | /* |
27 | * The PCI interface treats multi-function devices as independent | 27 | * The PCI interface treats multi-function devices as independent |
28 | * devices. The slot/function address of each device is encoded | 28 | * devices. The slot/function address of each device is encoded |
29 | * in a single byte as follows: | 29 | * in a single byte as follows: |
30 | * | 30 | * |
31 | * 7:3 = slot | 31 | * 7:3 = slot |
32 | * 2:0 = function | 32 | * 2:0 = function |
33 | */ | 33 | */ |
34 | #define PCI_DEVFN(slot,func) ((((slot) & 0x1f) << 3) | ((func) & 0x07)) | 34 | #define PCI_DEVFN(slot,func) ((((slot) & 0x1f) << 3) | ((func) & 0x07)) |
35 | #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) | 35 | #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) |
36 | #define PCI_FUNC(devfn) ((devfn) & 0x07) | 36 | #define PCI_FUNC(devfn) ((devfn) & 0x07) |
37 | 37 | ||
38 | /* Ioctls for /proc/bus/pci/X/Y nodes. */ | 38 | /* Ioctls for /proc/bus/pci/X/Y nodes. */ |
39 | #define PCIIOC_BASE ('P' << 24 | 'C' << 16 | 'I' << 8) | 39 | #define PCIIOC_BASE ('P' << 24 | 'C' << 16 | 'I' << 8) |
40 | #define PCIIOC_CONTROLLER (PCIIOC_BASE | 0x00) /* Get controller for PCI device. */ | 40 | #define PCIIOC_CONTROLLER (PCIIOC_BASE | 0x00) /* Get controller for PCI device. */ |
41 | #define PCIIOC_MMAP_IS_IO (PCIIOC_BASE | 0x01) /* Set mmap state to I/O space. */ | 41 | #define PCIIOC_MMAP_IS_IO (PCIIOC_BASE | 0x01) /* Set mmap state to I/O space. */ |
42 | #define PCIIOC_MMAP_IS_MEM (PCIIOC_BASE | 0x02) /* Set mmap state to MEM space. */ | 42 | #define PCIIOC_MMAP_IS_MEM (PCIIOC_BASE | 0x02) /* Set mmap state to MEM space. */ |
43 | #define PCIIOC_WRITE_COMBINE (PCIIOC_BASE | 0x03) /* Enable/disable write-combining. */ | 43 | #define PCIIOC_WRITE_COMBINE (PCIIOC_BASE | 0x03) /* Enable/disable write-combining. */ |
44 | 44 | ||
45 | #ifdef __KERNEL__ | 45 | #ifdef __KERNEL__ |
46 | 46 | ||
47 | #include <linux/mod_devicetable.h> | 47 | #include <linux/mod_devicetable.h> |
48 | 48 | ||
49 | #include <linux/types.h> | 49 | #include <linux/types.h> |
50 | #include <linux/ioport.h> | 50 | #include <linux/ioport.h> |
51 | #include <linux/list.h> | 51 | #include <linux/list.h> |
52 | #include <linux/compiler.h> | 52 | #include <linux/compiler.h> |
53 | #include <linux/errno.h> | 53 | #include <linux/errno.h> |
54 | #include <asm/atomic.h> | 54 | #include <asm/atomic.h> |
55 | #include <linux/device.h> | 55 | #include <linux/device.h> |
56 | 56 | ||
57 | /* File state for mmap()s on /proc/bus/pci/X/Y */ | 57 | /* File state for mmap()s on /proc/bus/pci/X/Y */ |
58 | enum pci_mmap_state { | 58 | enum pci_mmap_state { |
59 | pci_mmap_io, | 59 | pci_mmap_io, |
60 | pci_mmap_mem | 60 | pci_mmap_mem |
61 | }; | 61 | }; |
62 | 62 | ||
63 | /* This defines the direction arg to the DMA mapping routines. */ | 63 | /* This defines the direction arg to the DMA mapping routines. */ |
64 | #define PCI_DMA_BIDIRECTIONAL 0 | 64 | #define PCI_DMA_BIDIRECTIONAL 0 |
65 | #define PCI_DMA_TODEVICE 1 | 65 | #define PCI_DMA_TODEVICE 1 |
66 | #define PCI_DMA_FROMDEVICE 2 | 66 | #define PCI_DMA_FROMDEVICE 2 |
67 | #define PCI_DMA_NONE 3 | 67 | #define PCI_DMA_NONE 3 |
68 | 68 | ||
69 | #define DEVICE_COUNT_COMPATIBLE 4 | 69 | #define DEVICE_COUNT_COMPATIBLE 4 |
70 | #define DEVICE_COUNT_RESOURCE 12 | 70 | #define DEVICE_COUNT_RESOURCE 12 |
71 | 71 | ||
72 | typedef int __bitwise pci_power_t; | 72 | typedef int __bitwise pci_power_t; |
73 | 73 | ||
74 | #define PCI_D0 ((pci_power_t __force) 0) | 74 | #define PCI_D0 ((pci_power_t __force) 0) |
75 | #define PCI_D1 ((pci_power_t __force) 1) | 75 | #define PCI_D1 ((pci_power_t __force) 1) |
76 | #define PCI_D2 ((pci_power_t __force) 2) | 76 | #define PCI_D2 ((pci_power_t __force) 2) |
77 | #define PCI_D3hot ((pci_power_t __force) 3) | 77 | #define PCI_D3hot ((pci_power_t __force) 3) |
78 | #define PCI_D3cold ((pci_power_t __force) 4) | 78 | #define PCI_D3cold ((pci_power_t __force) 4) |
79 | #define PCI_UNKNOWN ((pci_power_t __force) 5) | 79 | #define PCI_UNKNOWN ((pci_power_t __force) 5) |
80 | #define PCI_POWER_ERROR ((pci_power_t __force) -1) | 80 | #define PCI_POWER_ERROR ((pci_power_t __force) -1) |
81 | 81 | ||
82 | /** The pci_channel state describes connectivity between the CPU and | 82 | /** The pci_channel state describes connectivity between the CPU and |
83 | * the pci device. If some PCI bus between here and the pci device | 83 | * the pci device. If some PCI bus between here and the pci device |
84 | * has crashed or locked up, this info is reflected here. | 84 | * has crashed or locked up, this info is reflected here. |
85 | */ | 85 | */ |
86 | typedef unsigned int __bitwise pci_channel_state_t; | 86 | typedef unsigned int __bitwise pci_channel_state_t; |
87 | 87 | ||
88 | enum pci_channel_state { | 88 | enum pci_channel_state { |
89 | /* I/O channel is in normal state */ | 89 | /* I/O channel is in normal state */ |
90 | pci_channel_io_normal = (__force pci_channel_state_t) 1, | 90 | pci_channel_io_normal = (__force pci_channel_state_t) 1, |
91 | 91 | ||
92 | /* I/O to channel is blocked */ | 92 | /* I/O to channel is blocked */ |
93 | pci_channel_io_frozen = (__force pci_channel_state_t) 2, | 93 | pci_channel_io_frozen = (__force pci_channel_state_t) 2, |
94 | 94 | ||
95 | /* PCI card is dead */ | 95 | /* PCI card is dead */ |
96 | pci_channel_io_perm_failure = (__force pci_channel_state_t) 3, | 96 | pci_channel_io_perm_failure = (__force pci_channel_state_t) 3, |
97 | }; | 97 | }; |
98 | 98 | ||
99 | typedef unsigned short __bitwise pci_bus_flags_t; | 99 | typedef unsigned short __bitwise pci_bus_flags_t; |
100 | enum pci_bus_flags { | 100 | enum pci_bus_flags { |
101 | PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1, | 101 | PCI_BUS_FLAGS_NO_MSI = (__force pci_bus_flags_t) 1, |
102 | }; | 102 | }; |
103 | 103 | ||
104 | struct pci_cap_saved_state { | 104 | struct pci_cap_saved_state { |
105 | struct hlist_node next; | 105 | struct hlist_node next; |
106 | char cap_nr; | 106 | char cap_nr; |
107 | u32 data[0]; | 107 | u32 data[0]; |
108 | }; | 108 | }; |
109 | 109 | ||
110 | /* | 110 | /* |
111 | * The pci_dev structure is used to describe PCI devices. | 111 | * The pci_dev structure is used to describe PCI devices. |
112 | */ | 112 | */ |
113 | struct pci_dev { | 113 | struct pci_dev { |
114 | struct list_head global_list; /* node in list of all PCI devices */ | 114 | struct list_head global_list; /* node in list of all PCI devices */ |
115 | struct list_head bus_list; /* node in per-bus list */ | 115 | struct list_head bus_list; /* node in per-bus list */ |
116 | struct pci_bus *bus; /* bus this device is on */ | 116 | struct pci_bus *bus; /* bus this device is on */ |
117 | struct pci_bus *subordinate; /* bus this device bridges to */ | 117 | struct pci_bus *subordinate; /* bus this device bridges to */ |
118 | 118 | ||
119 | void *sysdata; /* hook for sys-specific extension */ | 119 | void *sysdata; /* hook for sys-specific extension */ |
120 | struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ | 120 | struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ |
121 | 121 | ||
122 | unsigned int devfn; /* encoded device & function index */ | 122 | unsigned int devfn; /* encoded device & function index */ |
123 | unsigned short vendor; | 123 | unsigned short vendor; |
124 | unsigned short device; | 124 | unsigned short device; |
125 | unsigned short subsystem_vendor; | 125 | unsigned short subsystem_vendor; |
126 | unsigned short subsystem_device; | 126 | unsigned short subsystem_device; |
127 | unsigned int class; /* 3 bytes: (base,sub,prog-if) */ | 127 | unsigned int class; /* 3 bytes: (base,sub,prog-if) */ |
128 | u8 hdr_type; /* PCI header type (`multi' flag masked out) */ | 128 | u8 hdr_type; /* PCI header type (`multi' flag masked out) */ |
129 | u8 rom_base_reg; /* which config register controls the ROM */ | 129 | u8 rom_base_reg; /* which config register controls the ROM */ |
130 | u8 pin; /* which interrupt pin this device uses */ | 130 | u8 pin; /* which interrupt pin this device uses */ |
131 | 131 | ||
132 | struct pci_driver *driver; /* which driver has allocated this device */ | 132 | struct pci_driver *driver; /* which driver has allocated this device */ |
133 | u64 dma_mask; /* Mask of the bits of bus address this | 133 | u64 dma_mask; /* Mask of the bits of bus address this |
134 | device implements. Normally this is | 134 | device implements. Normally this is |
135 | 0xffffffff. You only need to change | 135 | 0xffffffff. You only need to change |
136 | this if your device has broken DMA | 136 | this if your device has broken DMA |
137 | or supports 64-bit transfers. */ | 137 | or supports 64-bit transfers. */ |
138 | 138 | ||
139 | pci_power_t current_state; /* Current operating state. In ACPI-speak, | 139 | pci_power_t current_state; /* Current operating state. In ACPI-speak, |
140 | this is D0-D3, D0 being fully functional, | 140 | this is D0-D3, D0 being fully functional, |
141 | and D3 being off. */ | 141 | and D3 being off. */ |
142 | 142 | ||
143 | pci_channel_state_t error_state; /* current connectivity state */ | 143 | pci_channel_state_t error_state; /* current connectivity state */ |
144 | struct device dev; /* Generic device interface */ | 144 | struct device dev; /* Generic device interface */ |
145 | 145 | ||
146 | /* device is compatible with these IDs */ | 146 | /* device is compatible with these IDs */ |
147 | unsigned short vendor_compatible[DEVICE_COUNT_COMPATIBLE]; | 147 | unsigned short vendor_compatible[DEVICE_COUNT_COMPATIBLE]; |
148 | unsigned short device_compatible[DEVICE_COUNT_COMPATIBLE]; | 148 | unsigned short device_compatible[DEVICE_COUNT_COMPATIBLE]; |
149 | 149 | ||
150 | int cfg_size; /* Size of configuration space */ | 150 | int cfg_size; /* Size of configuration space */ |
151 | 151 | ||
152 | /* | 152 | /* |
153 | * Instead of touching interrupt line and base address registers | 153 | * Instead of touching interrupt line and base address registers |
154 | * directly, use the values stored here. They might be different! | 154 | * directly, use the values stored here. They might be different! |
155 | */ | 155 | */ |
156 | unsigned int irq; | 156 | unsigned int irq; |
157 | struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ | 157 | struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ |
158 | 158 | ||
159 | /* These fields are used by common fixups */ | 159 | /* These fields are used by common fixups */ |
160 | unsigned int transparent:1; /* Transparent PCI bridge */ | 160 | unsigned int transparent:1; /* Transparent PCI bridge */ |
161 | unsigned int multifunction:1;/* Part of multi-function device */ | 161 | unsigned int multifunction:1;/* Part of multi-function device */ |
162 | /* keep track of device state */ | 162 | /* keep track of device state */ |
163 | unsigned int is_busmaster:1; /* device is busmaster */ | 163 | unsigned int is_busmaster:1; /* device is busmaster */ |
164 | unsigned int no_msi:1; /* device may not use msi */ | 164 | unsigned int no_msi:1; /* device may not use msi */ |
165 | unsigned int no_d1d2:1; /* only allow d0 or d3 */ | 165 | unsigned int no_d1d2:1; /* only allow d0 or d3 */ |
166 | unsigned int block_ucfg_access:1; /* userspace config space access is blocked */ | 166 | unsigned int block_ucfg_access:1; /* userspace config space access is blocked */ |
167 | unsigned int broken_parity_status:1; /* Device generates false positive parity */ | 167 | unsigned int broken_parity_status:1; /* Device generates false positive parity */ |
168 | unsigned int msi_enabled:1; | 168 | unsigned int msi_enabled:1; |
169 | unsigned int msix_enabled:1; | 169 | unsigned int msix_enabled:1; |
170 | atomic_t enable_cnt; /* pci_enable_device has been called */ | 170 | atomic_t enable_cnt; /* pci_enable_device has been called */ |
171 | 171 | ||
172 | u32 saved_config_space[16]; /* config space saved at suspend time */ | 172 | u32 saved_config_space[16]; /* config space saved at suspend time */ |
173 | struct hlist_head saved_cap_space; | 173 | struct hlist_head saved_cap_space; |
174 | struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */ | 174 | struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */ |
175 | int rom_attr_enabled; /* has display of the rom attribute been enabled? */ | 175 | int rom_attr_enabled; /* has display of the rom attribute been enabled? */ |
176 | struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ | 176 | struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ |
177 | }; | 177 | }; |
178 | 178 | ||
179 | #define pci_dev_g(n) list_entry(n, struct pci_dev, global_list) | 179 | #define pci_dev_g(n) list_entry(n, struct pci_dev, global_list) |
180 | #define pci_dev_b(n) list_entry(n, struct pci_dev, bus_list) | 180 | #define pci_dev_b(n) list_entry(n, struct pci_dev, bus_list) |
181 | #define to_pci_dev(n) container_of(n, struct pci_dev, dev) | 181 | #define to_pci_dev(n) container_of(n, struct pci_dev, dev) |
182 | #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL) | 182 | #define for_each_pci_dev(d) while ((d = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, d)) != NULL) |
183 | 183 | ||
184 | static inline struct pci_cap_saved_state *pci_find_saved_cap( | 184 | static inline struct pci_cap_saved_state *pci_find_saved_cap( |
185 | struct pci_dev *pci_dev,char cap) | 185 | struct pci_dev *pci_dev,char cap) |
186 | { | 186 | { |
187 | struct pci_cap_saved_state *tmp; | 187 | struct pci_cap_saved_state *tmp; |
188 | struct hlist_node *pos; | 188 | struct hlist_node *pos; |
189 | 189 | ||
190 | hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) { | 190 | hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) { |
191 | if (tmp->cap_nr == cap) | 191 | if (tmp->cap_nr == cap) |
192 | return tmp; | 192 | return tmp; |
193 | } | 193 | } |
194 | return NULL; | 194 | return NULL; |
195 | } | 195 | } |
196 | 196 | ||
197 | static inline void pci_add_saved_cap(struct pci_dev *pci_dev, | 197 | static inline void pci_add_saved_cap(struct pci_dev *pci_dev, |
198 | struct pci_cap_saved_state *new_cap) | 198 | struct pci_cap_saved_state *new_cap) |
199 | { | 199 | { |
200 | hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space); | 200 | hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space); |
201 | } | 201 | } |
202 | 202 | ||
203 | static inline void pci_remove_saved_cap(struct pci_cap_saved_state *cap) | 203 | static inline void pci_remove_saved_cap(struct pci_cap_saved_state *cap) |
204 | { | 204 | { |
205 | hlist_del(&cap->next); | 205 | hlist_del(&cap->next); |
206 | } | 206 | } |
207 | 207 | ||
208 | /* | 208 | /* |
209 | * For PCI devices, the region numbers are assigned this way: | 209 | * For PCI devices, the region numbers are assigned this way: |
210 | * | 210 | * |
211 | * 0-5 standard PCI regions | 211 | * 0-5 standard PCI regions |
212 | * 6 expansion ROM | 212 | * 6 expansion ROM |
213 | * 7-10 bridges: address space assigned to buses behind the bridge | 213 | * 7-10 bridges: address space assigned to buses behind the bridge |
214 | */ | 214 | */ |
215 | 215 | ||
216 | #define PCI_ROM_RESOURCE 6 | 216 | #define PCI_ROM_RESOURCE 6 |
217 | #define PCI_BRIDGE_RESOURCES 7 | 217 | #define PCI_BRIDGE_RESOURCES 7 |
218 | #define PCI_NUM_RESOURCES 11 | 218 | #define PCI_NUM_RESOURCES 11 |
219 | 219 | ||
220 | #ifndef PCI_BUS_NUM_RESOURCES | 220 | #ifndef PCI_BUS_NUM_RESOURCES |
221 | #define PCI_BUS_NUM_RESOURCES 8 | 221 | #define PCI_BUS_NUM_RESOURCES 8 |
222 | #endif | 222 | #endif |
223 | 223 | ||
224 | #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ | 224 | #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ |
225 | 225 | ||
226 | struct pci_bus { | 226 | struct pci_bus { |
227 | struct list_head node; /* node in list of buses */ | 227 | struct list_head node; /* node in list of buses */ |
228 | struct pci_bus *parent; /* parent bus this bridge is on */ | 228 | struct pci_bus *parent; /* parent bus this bridge is on */ |
229 | struct list_head children; /* list of child buses */ | 229 | struct list_head children; /* list of child buses */ |
230 | struct list_head devices; /* list of devices on this bus */ | 230 | struct list_head devices; /* list of devices on this bus */ |
231 | struct pci_dev *self; /* bridge device as seen by parent */ | 231 | struct pci_dev *self; /* bridge device as seen by parent */ |
232 | struct resource *resource[PCI_BUS_NUM_RESOURCES]; | 232 | struct resource *resource[PCI_BUS_NUM_RESOURCES]; |
233 | /* address space routed to this bus */ | 233 | /* address space routed to this bus */ |
234 | 234 | ||
235 | struct pci_ops *ops; /* configuration access functions */ | 235 | struct pci_ops *ops; /* configuration access functions */ |
236 | void *sysdata; /* hook for sys-specific extension */ | 236 | void *sysdata; /* hook for sys-specific extension */ |
237 | struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ | 237 | struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ |
238 | 238 | ||
239 | unsigned char number; /* bus number */ | 239 | unsigned char number; /* bus number */ |
240 | unsigned char primary; /* number of primary bridge */ | 240 | unsigned char primary; /* number of primary bridge */ |
241 | unsigned char secondary; /* number of secondary bridge */ | 241 | unsigned char secondary; /* number of secondary bridge */ |
242 | unsigned char subordinate; /* max number of subordinate buses */ | 242 | unsigned char subordinate; /* max number of subordinate buses */ |
243 | 243 | ||
244 | char name[48]; | 244 | char name[48]; |
245 | 245 | ||
246 | unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */ | 246 | unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */ |
247 | pci_bus_flags_t bus_flags; /* Inherited by child busses */ | 247 | pci_bus_flags_t bus_flags; /* Inherited by child busses */ |
248 | struct device *bridge; | 248 | struct device *bridge; |
249 | struct class_device class_dev; | 249 | struct class_device class_dev; |
250 | struct bin_attribute *legacy_io; /* legacy I/O for this bus */ | 250 | struct bin_attribute *legacy_io; /* legacy I/O for this bus */ |
251 | struct bin_attribute *legacy_mem; /* legacy mem */ | 251 | struct bin_attribute *legacy_mem; /* legacy mem */ |
252 | }; | 252 | }; |
253 | 253 | ||
254 | #define pci_bus_b(n) list_entry(n, struct pci_bus, node) | 254 | #define pci_bus_b(n) list_entry(n, struct pci_bus, node) |
255 | #define to_pci_bus(n) container_of(n, struct pci_bus, class_dev) | 255 | #define to_pci_bus(n) container_of(n, struct pci_bus, class_dev) |
256 | 256 | ||
257 | /* | 257 | /* |
258 | * Error values that may be returned by PCI functions. | 258 | * Error values that may be returned by PCI functions. |
259 | */ | 259 | */ |
260 | #define PCIBIOS_SUCCESSFUL 0x00 | 260 | #define PCIBIOS_SUCCESSFUL 0x00 |
261 | #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81 | 261 | #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81 |
262 | #define PCIBIOS_BAD_VENDOR_ID 0x83 | 262 | #define PCIBIOS_BAD_VENDOR_ID 0x83 |
263 | #define PCIBIOS_DEVICE_NOT_FOUND 0x86 | 263 | #define PCIBIOS_DEVICE_NOT_FOUND 0x86 |
264 | #define PCIBIOS_BAD_REGISTER_NUMBER 0x87 | 264 | #define PCIBIOS_BAD_REGISTER_NUMBER 0x87 |
265 | #define PCIBIOS_SET_FAILED 0x88 | 265 | #define PCIBIOS_SET_FAILED 0x88 |
266 | #define PCIBIOS_BUFFER_TOO_SMALL 0x89 | 266 | #define PCIBIOS_BUFFER_TOO_SMALL 0x89 |
267 | 267 | ||
268 | /* Low-level architecture-dependent routines */ | 268 | /* Low-level architecture-dependent routines */ |
269 | 269 | ||
270 | struct pci_ops { | 270 | struct pci_ops { |
271 | int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val); | 271 | int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val); |
272 | int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); | 272 | int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); |
273 | }; | 273 | }; |
274 | 274 | ||
275 | struct pci_raw_ops { | 275 | struct pci_raw_ops { |
276 | int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, | 276 | int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, |
277 | int reg, int len, u32 *val); | 277 | int reg, int len, u32 *val); |
278 | int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn, | 278 | int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn, |
279 | int reg, int len, u32 val); | 279 | int reg, int len, u32 val); |
280 | }; | 280 | }; |
281 | 281 | ||
282 | extern struct pci_raw_ops *raw_pci_ops; | 282 | extern struct pci_raw_ops *raw_pci_ops; |
283 | 283 | ||
284 | struct pci_bus_region { | 284 | struct pci_bus_region { |
285 | unsigned long start; | 285 | unsigned long start; |
286 | unsigned long end; | 286 | unsigned long end; |
287 | }; | 287 | }; |
288 | 288 | ||
289 | struct pci_dynids { | 289 | struct pci_dynids { |
290 | spinlock_t lock; /* protects list, index */ | 290 | spinlock_t lock; /* protects list, index */ |
291 | struct list_head list; /* for IDs added at runtime */ | 291 | struct list_head list; /* for IDs added at runtime */ |
292 | unsigned int use_driver_data:1; /* pci_driver->driver_data is used */ | 292 | unsigned int use_driver_data:1; /* pci_driver->driver_data is used */ |
293 | }; | 293 | }; |
294 | 294 | ||
295 | /* ---------------------------------------------------------------- */ | 295 | /* ---------------------------------------------------------------- */ |
296 | /** PCI Error Recovery System (PCI-ERS). If a PCI device driver provides | 296 | /** PCI Error Recovery System (PCI-ERS). If a PCI device driver provides |
297 | * a set fof callbacks in struct pci_error_handlers, then that device driver | 297 | * a set fof callbacks in struct pci_error_handlers, then that device driver |
298 | * will be notified of PCI bus errors, and will be driven to recovery | 298 | * will be notified of PCI bus errors, and will be driven to recovery |
299 | * when an error occurs. | 299 | * when an error occurs. |
300 | */ | 300 | */ |
301 | 301 | ||
302 | typedef unsigned int __bitwise pci_ers_result_t; | 302 | typedef unsigned int __bitwise pci_ers_result_t; |
303 | 303 | ||
304 | enum pci_ers_result { | 304 | enum pci_ers_result { |
305 | /* no result/none/not supported in device driver */ | 305 | /* no result/none/not supported in device driver */ |
306 | PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1, | 306 | PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1, |
307 | 307 | ||
308 | /* Device driver can recover without slot reset */ | 308 | /* Device driver can recover without slot reset */ |
309 | PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2, | 309 | PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2, |
310 | 310 | ||
311 | /* Device driver wants slot to be reset. */ | 311 | /* Device driver wants slot to be reset. */ |
312 | PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3, | 312 | PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3, |
313 | 313 | ||
314 | /* Device has completely failed, is unrecoverable */ | 314 | /* Device has completely failed, is unrecoverable */ |
315 | PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4, | 315 | PCI_ERS_RESULT_DISCONNECT = (__force pci_ers_result_t) 4, |
316 | 316 | ||
317 | /* Device driver is fully recovered and operational */ | 317 | /* Device driver is fully recovered and operational */ |
318 | PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5, | 318 | PCI_ERS_RESULT_RECOVERED = (__force pci_ers_result_t) 5, |
319 | }; | 319 | }; |
320 | 320 | ||
321 | /* PCI bus error event callbacks */ | 321 | /* PCI bus error event callbacks */ |
322 | struct pci_error_handlers | 322 | struct pci_error_handlers |
323 | { | 323 | { |
324 | /* PCI bus error detected on this device */ | 324 | /* PCI bus error detected on this device */ |
325 | pci_ers_result_t (*error_detected)(struct pci_dev *dev, | 325 | pci_ers_result_t (*error_detected)(struct pci_dev *dev, |
326 | enum pci_channel_state error); | 326 | enum pci_channel_state error); |
327 | 327 | ||
328 | /* MMIO has been re-enabled, but not DMA */ | 328 | /* MMIO has been re-enabled, but not DMA */ |
329 | pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev); | 329 | pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev); |
330 | 330 | ||
331 | /* PCI Express link has been reset */ | 331 | /* PCI Express link has been reset */ |
332 | pci_ers_result_t (*link_reset)(struct pci_dev *dev); | 332 | pci_ers_result_t (*link_reset)(struct pci_dev *dev); |
333 | 333 | ||
334 | /* PCI slot has been reset */ | 334 | /* PCI slot has been reset */ |
335 | pci_ers_result_t (*slot_reset)(struct pci_dev *dev); | 335 | pci_ers_result_t (*slot_reset)(struct pci_dev *dev); |
336 | 336 | ||
337 | /* Device driver may resume normal operations */ | 337 | /* Device driver may resume normal operations */ |
338 | void (*resume)(struct pci_dev *dev); | 338 | void (*resume)(struct pci_dev *dev); |
339 | }; | 339 | }; |
340 | 340 | ||
341 | /* ---------------------------------------------------------------- */ | 341 | /* ---------------------------------------------------------------- */ |
342 | 342 | ||
343 | struct module; | 343 | struct module; |
344 | struct pci_driver { | 344 | struct pci_driver { |
345 | struct list_head node; | 345 | struct list_head node; |
346 | char *name; | 346 | char *name; |
347 | const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */ | 347 | const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */ |
348 | int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ | 348 | int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ |
349 | void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ | 349 | void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ |
350 | int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */ | 350 | int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */ |
351 | int (*suspend_late) (struct pci_dev *dev, pm_message_t state); | 351 | int (*suspend_late) (struct pci_dev *dev, pm_message_t state); |
352 | int (*resume_early) (struct pci_dev *dev); | 352 | int (*resume_early) (struct pci_dev *dev); |
353 | int (*resume) (struct pci_dev *dev); /* Device woken up */ | 353 | int (*resume) (struct pci_dev *dev); /* Device woken up */ |
354 | int (*enable_wake) (struct pci_dev *dev, pci_power_t state, int enable); /* Enable wake event */ | 354 | int (*enable_wake) (struct pci_dev *dev, pci_power_t state, int enable); /* Enable wake event */ |
355 | void (*shutdown) (struct pci_dev *dev); | 355 | void (*shutdown) (struct pci_dev *dev); |
356 | 356 | ||
357 | struct pci_error_handlers *err_handler; | 357 | struct pci_error_handlers *err_handler; |
358 | struct device_driver driver; | 358 | struct device_driver driver; |
359 | struct pci_dynids dynids; | 359 | struct pci_dynids dynids; |
360 | 360 | ||
361 | int multithread_probe; | 361 | int multithread_probe; |
362 | }; | 362 | }; |
363 | 363 | ||
364 | #define to_pci_driver(drv) container_of(drv,struct pci_driver, driver) | 364 | #define to_pci_driver(drv) container_of(drv,struct pci_driver, driver) |
365 | 365 | ||
366 | /** | 366 | /** |
367 | * PCI_DEVICE - macro used to describe a specific pci device | 367 | * PCI_DEVICE - macro used to describe a specific pci device |
368 | * @vend: the 16 bit PCI Vendor ID | 368 | * @vend: the 16 bit PCI Vendor ID |
369 | * @dev: the 16 bit PCI Device ID | 369 | * @dev: the 16 bit PCI Device ID |
370 | * | 370 | * |
371 | * This macro is used to create a struct pci_device_id that matches a | 371 | * This macro is used to create a struct pci_device_id that matches a |
372 | * specific device. The subvendor and subdevice fields will be set to | 372 | * specific device. The subvendor and subdevice fields will be set to |
373 | * PCI_ANY_ID. | 373 | * PCI_ANY_ID. |
374 | */ | 374 | */ |
375 | #define PCI_DEVICE(vend,dev) \ | 375 | #define PCI_DEVICE(vend,dev) \ |
376 | .vendor = (vend), .device = (dev), \ | 376 | .vendor = (vend), .device = (dev), \ |
377 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID | 377 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID |
378 | 378 | ||
379 | /** | 379 | /** |
380 | * PCI_DEVICE_CLASS - macro used to describe a specific pci device class | 380 | * PCI_DEVICE_CLASS - macro used to describe a specific pci device class |
381 | * @dev_class: the class, subclass, prog-if triple for this device | 381 | * @dev_class: the class, subclass, prog-if triple for this device |
382 | * @dev_class_mask: the class mask for this device | 382 | * @dev_class_mask: the class mask for this device |
383 | * | 383 | * |
384 | * This macro is used to create a struct pci_device_id that matches a | 384 | * This macro is used to create a struct pci_device_id that matches a |
385 | * specific PCI class. The vendor, device, subvendor, and subdevice | 385 | * specific PCI class. The vendor, device, subvendor, and subdevice |
386 | * fields will be set to PCI_ANY_ID. | 386 | * fields will be set to PCI_ANY_ID. |
387 | */ | 387 | */ |
388 | #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \ | 388 | #define PCI_DEVICE_CLASS(dev_class,dev_class_mask) \ |
389 | .class = (dev_class), .class_mask = (dev_class_mask), \ | 389 | .class = (dev_class), .class_mask = (dev_class_mask), \ |
390 | .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \ | 390 | .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \ |
391 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID | 391 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID |
392 | 392 | ||
393 | /* | 393 | /* |
394 | * pci_module_init is obsolete, this stays here till we fix up all usages of it | 394 | * pci_module_init is obsolete, this stays here till we fix up all usages of it |
395 | * in the tree. | 395 | * in the tree. |
396 | */ | 396 | */ |
397 | #define pci_module_init pci_register_driver | 397 | #define pci_module_init pci_register_driver |
398 | 398 | ||
399 | /** | ||
400 | * PCI_VDEVICE - macro used to describe a specific pci device in short form | ||
401 | * @vend: the vendor name | ||
402 | * @dev: the 16 bit PCI Device ID | ||
403 | * | ||
404 | * This macro is used to create a struct pci_device_id that matches a | ||
405 | * specific PCI device. The subvendor, and subdevice fields will be set | ||
406 | * to PCI_ANY_ID. The macro allows the next field to follow as the device | ||
407 | * private data. | ||
408 | */ | ||
409 | |||
410 | #define PCI_VDEVICE(vendor, device) \ | ||
411 | PCI_VENDOR_ID_##vendor, (device), \ | ||
412 | PCI_ANY_ID, PCI_ANY_ID, 0, 0 | ||
413 | |||
399 | /* these external functions are only available when PCI support is enabled */ | 414 | /* these external functions are only available when PCI support is enabled */ |
400 | #ifdef CONFIG_PCI | 415 | #ifdef CONFIG_PCI |
401 | 416 | ||
402 | extern struct bus_type pci_bus_type; | 417 | extern struct bus_type pci_bus_type; |
403 | 418 | ||
404 | /* Do NOT directly access these two variables, unless you are arch specific pci | 419 | /* Do NOT directly access these two variables, unless you are arch specific pci |
405 | * code, or pci core code. */ | 420 | * code, or pci core code. */ |
406 | extern struct list_head pci_root_buses; /* list of all known PCI buses */ | 421 | extern struct list_head pci_root_buses; /* list of all known PCI buses */ |
407 | extern struct list_head pci_devices; /* list of all devices */ | 422 | extern struct list_head pci_devices; /* list of all devices */ |
408 | 423 | ||
409 | void pcibios_fixup_bus(struct pci_bus *); | 424 | void pcibios_fixup_bus(struct pci_bus *); |
410 | int __must_check pcibios_enable_device(struct pci_dev *, int mask); | 425 | int __must_check pcibios_enable_device(struct pci_dev *, int mask); |
411 | char *pcibios_setup (char *str); | 426 | char *pcibios_setup (char *str); |
412 | 427 | ||
413 | /* Used only when drivers/pci/setup.c is used */ | 428 | /* Used only when drivers/pci/setup.c is used */ |
414 | void pcibios_align_resource(void *, struct resource *, resource_size_t, | 429 | void pcibios_align_resource(void *, struct resource *, resource_size_t, |
415 | resource_size_t); | 430 | resource_size_t); |
416 | void pcibios_update_irq(struct pci_dev *, int irq); | 431 | void pcibios_update_irq(struct pci_dev *, int irq); |
417 | 432 | ||
418 | /* Generic PCI functions used internally */ | 433 | /* Generic PCI functions used internally */ |
419 | 434 | ||
420 | extern struct pci_bus *pci_find_bus(int domain, int busnr); | 435 | extern struct pci_bus *pci_find_bus(int domain, int busnr); |
421 | void pci_bus_add_devices(struct pci_bus *bus); | 436 | void pci_bus_add_devices(struct pci_bus *bus); |
422 | struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus, struct pci_ops *ops, void *sysdata); | 437 | struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus, struct pci_ops *ops, void *sysdata); |
423 | static inline struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata) | 438 | static inline struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata) |
424 | { | 439 | { |
425 | struct pci_bus *root_bus; | 440 | struct pci_bus *root_bus; |
426 | root_bus = pci_scan_bus_parented(NULL, bus, ops, sysdata); | 441 | root_bus = pci_scan_bus_parented(NULL, bus, ops, sysdata); |
427 | if (root_bus) | 442 | if (root_bus) |
428 | pci_bus_add_devices(root_bus); | 443 | pci_bus_add_devices(root_bus); |
429 | return root_bus; | 444 | return root_bus; |
430 | } | 445 | } |
431 | struct pci_bus *pci_create_bus(struct device *parent, int bus, struct pci_ops *ops, void *sysdata); | 446 | struct pci_bus *pci_create_bus(struct device *parent, int bus, struct pci_ops *ops, void *sysdata); |
432 | struct pci_bus * pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr); | 447 | struct pci_bus * pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr); |
433 | int pci_scan_slot(struct pci_bus *bus, int devfn); | 448 | int pci_scan_slot(struct pci_bus *bus, int devfn); |
434 | struct pci_dev * pci_scan_single_device(struct pci_bus *bus, int devfn); | 449 | struct pci_dev * pci_scan_single_device(struct pci_bus *bus, int devfn); |
435 | void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); | 450 | void pci_device_add(struct pci_dev *dev, struct pci_bus *bus); |
436 | unsigned int pci_scan_child_bus(struct pci_bus *bus); | 451 | unsigned int pci_scan_child_bus(struct pci_bus *bus); |
437 | int __must_check pci_bus_add_device(struct pci_dev *dev); | 452 | int __must_check pci_bus_add_device(struct pci_dev *dev); |
438 | void pci_read_bridge_bases(struct pci_bus *child); | 453 | void pci_read_bridge_bases(struct pci_bus *child); |
439 | struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct resource *res); | 454 | struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct resource *res); |
440 | int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); | 455 | int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); |
441 | extern struct pci_dev *pci_dev_get(struct pci_dev *dev); | 456 | extern struct pci_dev *pci_dev_get(struct pci_dev *dev); |
442 | extern void pci_dev_put(struct pci_dev *dev); | 457 | extern void pci_dev_put(struct pci_dev *dev); |
443 | extern void pci_remove_bus(struct pci_bus *b); | 458 | extern void pci_remove_bus(struct pci_bus *b); |
444 | extern void pci_remove_bus_device(struct pci_dev *dev); | 459 | extern void pci_remove_bus_device(struct pci_dev *dev); |
445 | extern void pci_stop_bus_device(struct pci_dev *dev); | 460 | extern void pci_stop_bus_device(struct pci_dev *dev); |
446 | void pci_setup_cardbus(struct pci_bus *bus); | 461 | void pci_setup_cardbus(struct pci_bus *bus); |
447 | extern void pci_sort_breadthfirst(void); | 462 | extern void pci_sort_breadthfirst(void); |
448 | 463 | ||
449 | /* Generic PCI functions exported to card drivers */ | 464 | /* Generic PCI functions exported to card drivers */ |
450 | 465 | ||
451 | struct pci_dev *pci_find_device (unsigned int vendor, unsigned int device, const struct pci_dev *from); | 466 | struct pci_dev *pci_find_device (unsigned int vendor, unsigned int device, const struct pci_dev *from); |
452 | struct pci_dev *pci_find_device_reverse (unsigned int vendor, unsigned int device, const struct pci_dev *from); | 467 | struct pci_dev *pci_find_device_reverse (unsigned int vendor, unsigned int device, const struct pci_dev *from); |
453 | struct pci_dev *pci_find_slot (unsigned int bus, unsigned int devfn); | 468 | struct pci_dev *pci_find_slot (unsigned int bus, unsigned int devfn); |
454 | int pci_find_capability (struct pci_dev *dev, int cap); | 469 | int pci_find_capability (struct pci_dev *dev, int cap); |
455 | int pci_find_next_capability (struct pci_dev *dev, u8 pos, int cap); | 470 | int pci_find_next_capability (struct pci_dev *dev, u8 pos, int cap); |
456 | int pci_find_ext_capability (struct pci_dev *dev, int cap); | 471 | int pci_find_ext_capability (struct pci_dev *dev, int cap); |
457 | int pci_find_ht_capability (struct pci_dev *dev, int ht_cap); | 472 | int pci_find_ht_capability (struct pci_dev *dev, int ht_cap); |
458 | int pci_find_next_ht_capability (struct pci_dev *dev, int pos, int ht_cap); | 473 | int pci_find_next_ht_capability (struct pci_dev *dev, int pos, int ht_cap); |
459 | struct pci_bus *pci_find_next_bus(const struct pci_bus *from); | 474 | struct pci_bus *pci_find_next_bus(const struct pci_bus *from); |
460 | 475 | ||
461 | struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, | 476 | struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, |
462 | struct pci_dev *from); | 477 | struct pci_dev *from); |
463 | struct pci_dev *pci_get_device_reverse(unsigned int vendor, unsigned int device, | 478 | struct pci_dev *pci_get_device_reverse(unsigned int vendor, unsigned int device, |
464 | struct pci_dev *from); | 479 | struct pci_dev *from); |
465 | 480 | ||
466 | struct pci_dev *pci_get_subsys (unsigned int vendor, unsigned int device, | 481 | struct pci_dev *pci_get_subsys (unsigned int vendor, unsigned int device, |
467 | unsigned int ss_vendor, unsigned int ss_device, | 482 | unsigned int ss_vendor, unsigned int ss_device, |
468 | struct pci_dev *from); | 483 | struct pci_dev *from); |
469 | struct pci_dev *pci_get_slot (struct pci_bus *bus, unsigned int devfn); | 484 | struct pci_dev *pci_get_slot (struct pci_bus *bus, unsigned int devfn); |
470 | struct pci_dev *pci_get_bus_and_slot (unsigned int bus, unsigned int devfn); | 485 | struct pci_dev *pci_get_bus_and_slot (unsigned int bus, unsigned int devfn); |
471 | struct pci_dev *pci_get_class (unsigned int class, struct pci_dev *from); | 486 | struct pci_dev *pci_get_class (unsigned int class, struct pci_dev *from); |
472 | int pci_dev_present(const struct pci_device_id *ids); | 487 | int pci_dev_present(const struct pci_device_id *ids); |
473 | const struct pci_device_id *pci_find_present(const struct pci_device_id *ids); | 488 | const struct pci_device_id *pci_find_present(const struct pci_device_id *ids); |
474 | 489 | ||
475 | int pci_bus_read_config_byte (struct pci_bus *bus, unsigned int devfn, int where, u8 *val); | 490 | int pci_bus_read_config_byte (struct pci_bus *bus, unsigned int devfn, int where, u8 *val); |
476 | int pci_bus_read_config_word (struct pci_bus *bus, unsigned int devfn, int where, u16 *val); | 491 | int pci_bus_read_config_word (struct pci_bus *bus, unsigned int devfn, int where, u16 *val); |
477 | int pci_bus_read_config_dword (struct pci_bus *bus, unsigned int devfn, int where, u32 *val); | 492 | int pci_bus_read_config_dword (struct pci_bus *bus, unsigned int devfn, int where, u32 *val); |
478 | int pci_bus_write_config_byte (struct pci_bus *bus, unsigned int devfn, int where, u8 val); | 493 | int pci_bus_write_config_byte (struct pci_bus *bus, unsigned int devfn, int where, u8 val); |
479 | int pci_bus_write_config_word (struct pci_bus *bus, unsigned int devfn, int where, u16 val); | 494 | int pci_bus_write_config_word (struct pci_bus *bus, unsigned int devfn, int where, u16 val); |
480 | int pci_bus_write_config_dword (struct pci_bus *bus, unsigned int devfn, int where, u32 val); | 495 | int pci_bus_write_config_dword (struct pci_bus *bus, unsigned int devfn, int where, u32 val); |
481 | 496 | ||
482 | static inline int pci_read_config_byte(struct pci_dev *dev, int where, u8 *val) | 497 | static inline int pci_read_config_byte(struct pci_dev *dev, int where, u8 *val) |
483 | { | 498 | { |
484 | return pci_bus_read_config_byte (dev->bus, dev->devfn, where, val); | 499 | return pci_bus_read_config_byte (dev->bus, dev->devfn, where, val); |
485 | } | 500 | } |
486 | static inline int pci_read_config_word(struct pci_dev *dev, int where, u16 *val) | 501 | static inline int pci_read_config_word(struct pci_dev *dev, int where, u16 *val) |
487 | { | 502 | { |
488 | return pci_bus_read_config_word (dev->bus, dev->devfn, where, val); | 503 | return pci_bus_read_config_word (dev->bus, dev->devfn, where, val); |
489 | } | 504 | } |
490 | static inline int pci_read_config_dword(struct pci_dev *dev, int where, u32 *val) | 505 | static inline int pci_read_config_dword(struct pci_dev *dev, int where, u32 *val) |
491 | { | 506 | { |
492 | return pci_bus_read_config_dword (dev->bus, dev->devfn, where, val); | 507 | return pci_bus_read_config_dword (dev->bus, dev->devfn, where, val); |
493 | } | 508 | } |
494 | static inline int pci_write_config_byte(struct pci_dev *dev, int where, u8 val) | 509 | static inline int pci_write_config_byte(struct pci_dev *dev, int where, u8 val) |
495 | { | 510 | { |
496 | return pci_bus_write_config_byte (dev->bus, dev->devfn, where, val); | 511 | return pci_bus_write_config_byte (dev->bus, dev->devfn, where, val); |
497 | } | 512 | } |
498 | static inline int pci_write_config_word(struct pci_dev *dev, int where, u16 val) | 513 | static inline int pci_write_config_word(struct pci_dev *dev, int where, u16 val) |
499 | { | 514 | { |
500 | return pci_bus_write_config_word (dev->bus, dev->devfn, where, val); | 515 | return pci_bus_write_config_word (dev->bus, dev->devfn, where, val); |
501 | } | 516 | } |
502 | static inline int pci_write_config_dword(struct pci_dev *dev, int where, u32 val) | 517 | static inline int pci_write_config_dword(struct pci_dev *dev, int where, u32 val) |
503 | { | 518 | { |
504 | return pci_bus_write_config_dword (dev->bus, dev->devfn, where, val); | 519 | return pci_bus_write_config_dword (dev->bus, dev->devfn, where, val); |
505 | } | 520 | } |
506 | 521 | ||
507 | int __must_check pci_enable_device(struct pci_dev *dev); | 522 | int __must_check pci_enable_device(struct pci_dev *dev); |
508 | int __must_check pci_enable_device_bars(struct pci_dev *dev, int mask); | 523 | int __must_check pci_enable_device_bars(struct pci_dev *dev, int mask); |
509 | void pci_disable_device(struct pci_dev *dev); | 524 | void pci_disable_device(struct pci_dev *dev); |
510 | void pci_set_master(struct pci_dev *dev); | 525 | void pci_set_master(struct pci_dev *dev); |
511 | #define HAVE_PCI_SET_MWI | 526 | #define HAVE_PCI_SET_MWI |
512 | int __must_check pci_set_mwi(struct pci_dev *dev); | 527 | int __must_check pci_set_mwi(struct pci_dev *dev); |
513 | void pci_clear_mwi(struct pci_dev *dev); | 528 | void pci_clear_mwi(struct pci_dev *dev); |
514 | void pci_intx(struct pci_dev *dev, int enable); | 529 | void pci_intx(struct pci_dev *dev, int enable); |
515 | int pci_set_dma_mask(struct pci_dev *dev, u64 mask); | 530 | int pci_set_dma_mask(struct pci_dev *dev, u64 mask); |
516 | int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask); | 531 | int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask); |
517 | void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno); | 532 | void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno); |
518 | int __must_check pci_assign_resource(struct pci_dev *dev, int i); | 533 | int __must_check pci_assign_resource(struct pci_dev *dev, int i); |
519 | int __must_check pci_assign_resource_fixed(struct pci_dev *dev, int i); | 534 | int __must_check pci_assign_resource_fixed(struct pci_dev *dev, int i); |
520 | void pci_restore_bars(struct pci_dev *dev); | 535 | void pci_restore_bars(struct pci_dev *dev); |
521 | 536 | ||
522 | /* ROM control related routines */ | 537 | /* ROM control related routines */ |
523 | void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); | 538 | void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size); |
524 | void __iomem __must_check *pci_map_rom_copy(struct pci_dev *pdev, size_t *size); | 539 | void __iomem __must_check *pci_map_rom_copy(struct pci_dev *pdev, size_t *size); |
525 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); | 540 | void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom); |
526 | void pci_remove_rom(struct pci_dev *pdev); | 541 | void pci_remove_rom(struct pci_dev *pdev); |
527 | 542 | ||
528 | /* Power management related routines */ | 543 | /* Power management related routines */ |
529 | int pci_save_state(struct pci_dev *dev); | 544 | int pci_save_state(struct pci_dev *dev); |
530 | int pci_restore_state(struct pci_dev *dev); | 545 | int pci_restore_state(struct pci_dev *dev); |
531 | int pci_set_power_state(struct pci_dev *dev, pci_power_t state); | 546 | int pci_set_power_state(struct pci_dev *dev, pci_power_t state); |
532 | pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); | 547 | pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); |
533 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable); | 548 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable); |
534 | 549 | ||
535 | /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ | 550 | /* Helper functions for low-level code (drivers/pci/setup-[bus,res].c) */ |
536 | void pci_bus_assign_resources(struct pci_bus *bus); | 551 | void pci_bus_assign_resources(struct pci_bus *bus); |
537 | void pci_bus_size_bridges(struct pci_bus *bus); | 552 | void pci_bus_size_bridges(struct pci_bus *bus); |
538 | int pci_claim_resource(struct pci_dev *, int); | 553 | int pci_claim_resource(struct pci_dev *, int); |
539 | void pci_assign_unassigned_resources(void); | 554 | void pci_assign_unassigned_resources(void); |
540 | void pdev_enable_device(struct pci_dev *); | 555 | void pdev_enable_device(struct pci_dev *); |
541 | void pdev_sort_resources(struct pci_dev *, struct resource_list *); | 556 | void pdev_sort_resources(struct pci_dev *, struct resource_list *); |
542 | void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *), | 557 | void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *), |
543 | int (*)(struct pci_dev *, u8, u8)); | 558 | int (*)(struct pci_dev *, u8, u8)); |
544 | #define HAVE_PCI_REQ_REGIONS 2 | 559 | #define HAVE_PCI_REQ_REGIONS 2 |
545 | int __must_check pci_request_regions(struct pci_dev *, const char *); | 560 | int __must_check pci_request_regions(struct pci_dev *, const char *); |
546 | void pci_release_regions(struct pci_dev *); | 561 | void pci_release_regions(struct pci_dev *); |
547 | int __must_check pci_request_region(struct pci_dev *, int, const char *); | 562 | int __must_check pci_request_region(struct pci_dev *, int, const char *); |
548 | void pci_release_region(struct pci_dev *, int); | 563 | void pci_release_region(struct pci_dev *, int); |
549 | 564 | ||
550 | /* drivers/pci/bus.c */ | 565 | /* drivers/pci/bus.c */ |
551 | int __must_check pci_bus_alloc_resource(struct pci_bus *bus, | 566 | int __must_check pci_bus_alloc_resource(struct pci_bus *bus, |
552 | struct resource *res, resource_size_t size, | 567 | struct resource *res, resource_size_t size, |
553 | resource_size_t align, resource_size_t min, | 568 | resource_size_t align, resource_size_t min, |
554 | unsigned int type_mask, | 569 | unsigned int type_mask, |
555 | void (*alignf)(void *, struct resource *, | 570 | void (*alignf)(void *, struct resource *, |
556 | resource_size_t, resource_size_t), | 571 | resource_size_t, resource_size_t), |
557 | void *alignf_data); | 572 | void *alignf_data); |
558 | void pci_enable_bridges(struct pci_bus *bus); | 573 | void pci_enable_bridges(struct pci_bus *bus); |
559 | 574 | ||
560 | /* Proper probing supporting hot-pluggable devices */ | 575 | /* Proper probing supporting hot-pluggable devices */ |
561 | int __must_check __pci_register_driver(struct pci_driver *, struct module *); | 576 | int __must_check __pci_register_driver(struct pci_driver *, struct module *); |
562 | static inline int __must_check pci_register_driver(struct pci_driver *driver) | 577 | static inline int __must_check pci_register_driver(struct pci_driver *driver) |
563 | { | 578 | { |
564 | return __pci_register_driver(driver, THIS_MODULE); | 579 | return __pci_register_driver(driver, THIS_MODULE); |
565 | } | 580 | } |
566 | 581 | ||
567 | void pci_unregister_driver(struct pci_driver *); | 582 | void pci_unregister_driver(struct pci_driver *); |
568 | void pci_remove_behind_bridge(struct pci_dev *); | 583 | void pci_remove_behind_bridge(struct pci_dev *); |
569 | struct pci_driver *pci_dev_driver(const struct pci_dev *); | 584 | struct pci_driver *pci_dev_driver(const struct pci_dev *); |
570 | const struct pci_device_id *pci_match_device(struct pci_driver *drv, struct pci_dev *dev); | 585 | const struct pci_device_id *pci_match_device(struct pci_driver *drv, struct pci_dev *dev); |
571 | const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, struct pci_dev *dev); | 586 | const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, struct pci_dev *dev); |
572 | int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass); | 587 | int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass); |
573 | 588 | ||
574 | void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *), | 589 | void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *), |
575 | void *userdata); | 590 | void *userdata); |
576 | int pci_cfg_space_size(struct pci_dev *dev); | 591 | int pci_cfg_space_size(struct pci_dev *dev); |
577 | unsigned char pci_bus_max_busnr(struct pci_bus* bus); | 592 | unsigned char pci_bus_max_busnr(struct pci_bus* bus); |
578 | 593 | ||
579 | /* kmem_cache style wrapper around pci_alloc_consistent() */ | 594 | /* kmem_cache style wrapper around pci_alloc_consistent() */ |
580 | 595 | ||
581 | #include <linux/dmapool.h> | 596 | #include <linux/dmapool.h> |
582 | 597 | ||
583 | #define pci_pool dma_pool | 598 | #define pci_pool dma_pool |
584 | #define pci_pool_create(name, pdev, size, align, allocation) \ | 599 | #define pci_pool_create(name, pdev, size, align, allocation) \ |
585 | dma_pool_create(name, &pdev->dev, size, align, allocation) | 600 | dma_pool_create(name, &pdev->dev, size, align, allocation) |
586 | #define pci_pool_destroy(pool) dma_pool_destroy(pool) | 601 | #define pci_pool_destroy(pool) dma_pool_destroy(pool) |
587 | #define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle) | 602 | #define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle) |
588 | #define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) | 603 | #define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) |
589 | 604 | ||
590 | enum pci_dma_burst_strategy { | 605 | enum pci_dma_burst_strategy { |
591 | PCI_DMA_BURST_INFINITY, /* make bursts as large as possible, | 606 | PCI_DMA_BURST_INFINITY, /* make bursts as large as possible, |
592 | strategy_parameter is N/A */ | 607 | strategy_parameter is N/A */ |
593 | PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter | 608 | PCI_DMA_BURST_BOUNDARY, /* disconnect at every strategy_parameter |
594 | byte boundaries */ | 609 | byte boundaries */ |
595 | PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of | 610 | PCI_DMA_BURST_MULTIPLE, /* disconnect at some multiple of |
596 | strategy_parameter byte boundaries */ | 611 | strategy_parameter byte boundaries */ |
597 | }; | 612 | }; |
598 | 613 | ||
599 | #if defined(CONFIG_ISA) || defined(CONFIG_EISA) | 614 | #if defined(CONFIG_ISA) || defined(CONFIG_EISA) |
600 | extern struct pci_dev *isa_bridge; | 615 | extern struct pci_dev *isa_bridge; |
601 | #endif | 616 | #endif |
602 | 617 | ||
603 | struct msix_entry { | 618 | struct msix_entry { |
604 | u16 vector; /* kernel uses to write allocated vector */ | 619 | u16 vector; /* kernel uses to write allocated vector */ |
605 | u16 entry; /* driver uses to specify entry, OS writes */ | 620 | u16 entry; /* driver uses to specify entry, OS writes */ |
606 | }; | 621 | }; |
607 | 622 | ||
608 | 623 | ||
609 | #ifndef CONFIG_PCI_MSI | 624 | #ifndef CONFIG_PCI_MSI |
610 | static inline void pci_scan_msi_device(struct pci_dev *dev) {} | 625 | static inline void pci_scan_msi_device(struct pci_dev *dev) {} |
611 | static inline int pci_enable_msi(struct pci_dev *dev) {return -1;} | 626 | static inline int pci_enable_msi(struct pci_dev *dev) {return -1;} |
612 | static inline void pci_disable_msi(struct pci_dev *dev) {} | 627 | static inline void pci_disable_msi(struct pci_dev *dev) {} |
613 | static inline int pci_enable_msix(struct pci_dev* dev, | 628 | static inline int pci_enable_msix(struct pci_dev* dev, |
614 | struct msix_entry *entries, int nvec) {return -1;} | 629 | struct msix_entry *entries, int nvec) {return -1;} |
615 | static inline void pci_disable_msix(struct pci_dev *dev) {} | 630 | static inline void pci_disable_msix(struct pci_dev *dev) {} |
616 | static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) {} | 631 | static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) {} |
617 | #else | 632 | #else |
618 | extern void pci_scan_msi_device(struct pci_dev *dev); | 633 | extern void pci_scan_msi_device(struct pci_dev *dev); |
619 | extern int pci_enable_msi(struct pci_dev *dev); | 634 | extern int pci_enable_msi(struct pci_dev *dev); |
620 | extern void pci_disable_msi(struct pci_dev *dev); | 635 | extern void pci_disable_msi(struct pci_dev *dev); |
621 | extern int pci_enable_msix(struct pci_dev* dev, | 636 | extern int pci_enable_msix(struct pci_dev* dev, |
622 | struct msix_entry *entries, int nvec); | 637 | struct msix_entry *entries, int nvec); |
623 | extern void pci_disable_msix(struct pci_dev *dev); | 638 | extern void pci_disable_msix(struct pci_dev *dev); |
624 | extern void msi_remove_pci_irq_vectors(struct pci_dev *dev); | 639 | extern void msi_remove_pci_irq_vectors(struct pci_dev *dev); |
625 | #endif | 640 | #endif |
626 | 641 | ||
627 | #ifdef CONFIG_HT_IRQ | 642 | #ifdef CONFIG_HT_IRQ |
628 | /* The functions a driver should call */ | 643 | /* The functions a driver should call */ |
629 | int ht_create_irq(struct pci_dev *dev, int idx); | 644 | int ht_create_irq(struct pci_dev *dev, int idx); |
630 | void ht_destroy_irq(unsigned int irq); | 645 | void ht_destroy_irq(unsigned int irq); |
631 | #endif /* CONFIG_HT_IRQ */ | 646 | #endif /* CONFIG_HT_IRQ */ |
632 | 647 | ||
633 | extern void pci_block_user_cfg_access(struct pci_dev *dev); | 648 | extern void pci_block_user_cfg_access(struct pci_dev *dev); |
634 | extern void pci_unblock_user_cfg_access(struct pci_dev *dev); | 649 | extern void pci_unblock_user_cfg_access(struct pci_dev *dev); |
635 | 650 | ||
636 | /* | 651 | /* |
637 | * PCI domain support. Sometimes called PCI segment (eg by ACPI), | 652 | * PCI domain support. Sometimes called PCI segment (eg by ACPI), |
638 | * a PCI domain is defined to be a set of PCI busses which share | 653 | * a PCI domain is defined to be a set of PCI busses which share |
639 | * configuration space. | 654 | * configuration space. |
640 | */ | 655 | */ |
641 | #ifndef CONFIG_PCI_DOMAINS | 656 | #ifndef CONFIG_PCI_DOMAINS |
642 | static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } | 657 | static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } |
643 | static inline int pci_proc_domain(struct pci_bus *bus) | 658 | static inline int pci_proc_domain(struct pci_bus *bus) |
644 | { | 659 | { |
645 | return 0; | 660 | return 0; |
646 | } | 661 | } |
647 | #endif | 662 | #endif |
648 | 663 | ||
649 | #else /* CONFIG_PCI is not enabled */ | 664 | #else /* CONFIG_PCI is not enabled */ |
650 | 665 | ||
651 | /* | 666 | /* |
652 | * If the system does not have PCI, clearly these return errors. Define | 667 | * If the system does not have PCI, clearly these return errors. Define |
653 | * these as simple inline functions to avoid hair in drivers. | 668 | * these as simple inline functions to avoid hair in drivers. |
654 | */ | 669 | */ |
655 | 670 | ||
656 | #define _PCI_NOP(o,s,t) \ | 671 | #define _PCI_NOP(o,s,t) \ |
657 | static inline int pci_##o##_config_##s (struct pci_dev *dev, int where, t val) \ | 672 | static inline int pci_##o##_config_##s (struct pci_dev *dev, int where, t val) \ |
658 | { return PCIBIOS_FUNC_NOT_SUPPORTED; } | 673 | { return PCIBIOS_FUNC_NOT_SUPPORTED; } |
659 | #define _PCI_NOP_ALL(o,x) _PCI_NOP(o,byte,u8 x) \ | 674 | #define _PCI_NOP_ALL(o,x) _PCI_NOP(o,byte,u8 x) \ |
660 | _PCI_NOP(o,word,u16 x) \ | 675 | _PCI_NOP(o,word,u16 x) \ |
661 | _PCI_NOP(o,dword,u32 x) | 676 | _PCI_NOP(o,dword,u32 x) |
662 | _PCI_NOP_ALL(read, *) | 677 | _PCI_NOP_ALL(read, *) |
663 | _PCI_NOP_ALL(write,) | 678 | _PCI_NOP_ALL(write,) |
664 | 679 | ||
665 | static inline struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, const struct pci_dev *from) | 680 | static inline struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, const struct pci_dev *from) |
666 | { return NULL; } | 681 | { return NULL; } |
667 | 682 | ||
668 | static inline struct pci_dev *pci_find_slot(unsigned int bus, unsigned int devfn) | 683 | static inline struct pci_dev *pci_find_slot(unsigned int bus, unsigned int devfn) |
669 | { return NULL; } | 684 | { return NULL; } |
670 | 685 | ||
671 | static inline struct pci_dev *pci_get_device(unsigned int vendor, | 686 | static inline struct pci_dev *pci_get_device(unsigned int vendor, |
672 | unsigned int device, struct pci_dev *from) | 687 | unsigned int device, struct pci_dev *from) |
673 | { return NULL; } | 688 | { return NULL; } |
674 | 689 | ||
675 | static inline struct pci_dev *pci_get_device_reverse(unsigned int vendor, | 690 | static inline struct pci_dev *pci_get_device_reverse(unsigned int vendor, |
676 | unsigned int device, struct pci_dev *from) | 691 | unsigned int device, struct pci_dev *from) |
677 | { return NULL; } | 692 | { return NULL; } |
678 | 693 | ||
679 | static inline struct pci_dev *pci_get_subsys (unsigned int vendor, unsigned int device, | 694 | static inline struct pci_dev *pci_get_subsys (unsigned int vendor, unsigned int device, |
680 | unsigned int ss_vendor, unsigned int ss_device, struct pci_dev *from) | 695 | unsigned int ss_vendor, unsigned int ss_device, struct pci_dev *from) |
681 | { return NULL; } | 696 | { return NULL; } |
682 | 697 | ||
683 | static inline struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) | 698 | static inline struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) |
684 | { return NULL; } | 699 | { return NULL; } |
685 | 700 | ||
686 | #define pci_dev_present(ids) (0) | 701 | #define pci_dev_present(ids) (0) |
687 | #define pci_find_present(ids) (NULL) | 702 | #define pci_find_present(ids) (NULL) |
688 | #define pci_dev_put(dev) do { } while (0) | 703 | #define pci_dev_put(dev) do { } while (0) |
689 | 704 | ||
690 | static inline void pci_set_master(struct pci_dev *dev) { } | 705 | static inline void pci_set_master(struct pci_dev *dev) { } |
691 | static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; } | 706 | static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; } |
692 | static inline void pci_disable_device(struct pci_dev *dev) { } | 707 | static inline void pci_disable_device(struct pci_dev *dev) { } |
693 | static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) { return -EIO; } | 708 | static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) { return -EIO; } |
694 | static inline int pci_assign_resource(struct pci_dev *dev, int i) { return -EBUSY;} | 709 | static inline int pci_assign_resource(struct pci_dev *dev, int i) { return -EBUSY;} |
695 | static inline int __pci_register_driver(struct pci_driver *drv, struct module *owner) { return 0;} | 710 | static inline int __pci_register_driver(struct pci_driver *drv, struct module *owner) { return 0;} |
696 | static inline int pci_register_driver(struct pci_driver *drv) { return 0;} | 711 | static inline int pci_register_driver(struct pci_driver *drv) { return 0;} |
697 | static inline void pci_unregister_driver(struct pci_driver *drv) { } | 712 | static inline void pci_unregister_driver(struct pci_driver *drv) { } |
698 | static inline int pci_find_capability (struct pci_dev *dev, int cap) {return 0; } | 713 | static inline int pci_find_capability (struct pci_dev *dev, int cap) {return 0; } |
699 | static inline int pci_find_next_capability (struct pci_dev *dev, u8 post, int cap) { return 0; } | 714 | static inline int pci_find_next_capability (struct pci_dev *dev, u8 post, int cap) { return 0; } |
700 | static inline int pci_find_ext_capability (struct pci_dev *dev, int cap) {return 0; } | 715 | static inline int pci_find_ext_capability (struct pci_dev *dev, int cap) {return 0; } |
701 | static inline const struct pci_device_id *pci_match_device(const struct pci_device_id *ids, const struct pci_dev *dev) { return NULL; } | 716 | static inline const struct pci_device_id *pci_match_device(const struct pci_device_id *ids, const struct pci_dev *dev) { return NULL; } |
702 | 717 | ||
703 | /* Power management related routines */ | 718 | /* Power management related routines */ |
704 | static inline int pci_save_state(struct pci_dev *dev) { return 0; } | 719 | static inline int pci_save_state(struct pci_dev *dev) { return 0; } |
705 | static inline int pci_restore_state(struct pci_dev *dev) { return 0; } | 720 | static inline int pci_restore_state(struct pci_dev *dev) { return 0; } |
706 | static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) { return 0; } | 721 | static inline int pci_set_power_state(struct pci_dev *dev, pci_power_t state) { return 0; } |
707 | static inline pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) { return PCI_D0; } | 722 | static inline pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) { return PCI_D0; } |
708 | static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) { return 0; } | 723 | static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) { return 0; } |
709 | 724 | ||
710 | #define isa_bridge ((struct pci_dev *)NULL) | 725 | #define isa_bridge ((struct pci_dev *)NULL) |
711 | 726 | ||
712 | #define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0) | 727 | #define pci_dma_burst_advice(pdev, strat, strategy_parameter) do { } while (0) |
713 | 728 | ||
714 | static inline void pci_block_user_cfg_access(struct pci_dev *dev) { } | 729 | static inline void pci_block_user_cfg_access(struct pci_dev *dev) { } |
715 | static inline void pci_unblock_user_cfg_access(struct pci_dev *dev) { } | 730 | static inline void pci_unblock_user_cfg_access(struct pci_dev *dev) { } |
716 | 731 | ||
717 | #endif /* CONFIG_PCI */ | 732 | #endif /* CONFIG_PCI */ |
718 | 733 | ||
719 | /* Include architecture-dependent settings and functions */ | 734 | /* Include architecture-dependent settings and functions */ |
720 | 735 | ||
721 | #include <asm/pci.h> | 736 | #include <asm/pci.h> |
722 | 737 | ||
723 | /* these helpers provide future and backwards compatibility | 738 | /* these helpers provide future and backwards compatibility |
724 | * for accessing popular PCI BAR info */ | 739 | * for accessing popular PCI BAR info */ |
725 | #define pci_resource_start(dev,bar) ((dev)->resource[(bar)].start) | 740 | #define pci_resource_start(dev,bar) ((dev)->resource[(bar)].start) |
726 | #define pci_resource_end(dev,bar) ((dev)->resource[(bar)].end) | 741 | #define pci_resource_end(dev,bar) ((dev)->resource[(bar)].end) |
727 | #define pci_resource_flags(dev,bar) ((dev)->resource[(bar)].flags) | 742 | #define pci_resource_flags(dev,bar) ((dev)->resource[(bar)].flags) |
728 | #define pci_resource_len(dev,bar) \ | 743 | #define pci_resource_len(dev,bar) \ |
729 | ((pci_resource_start((dev),(bar)) == 0 && \ | 744 | ((pci_resource_start((dev),(bar)) == 0 && \ |
730 | pci_resource_end((dev),(bar)) == \ | 745 | pci_resource_end((dev),(bar)) == \ |
731 | pci_resource_start((dev),(bar))) ? 0 : \ | 746 | pci_resource_start((dev),(bar))) ? 0 : \ |
732 | \ | 747 | \ |
733 | (pci_resource_end((dev),(bar)) - \ | 748 | (pci_resource_end((dev),(bar)) - \ |
734 | pci_resource_start((dev),(bar)) + 1)) | 749 | pci_resource_start((dev),(bar)) + 1)) |
735 | 750 | ||
736 | /* Similar to the helpers above, these manipulate per-pci_dev | 751 | /* Similar to the helpers above, these manipulate per-pci_dev |
737 | * driver-specific data. They are really just a wrapper around | 752 | * driver-specific data. They are really just a wrapper around |
738 | * the generic device structure functions of these calls. | 753 | * the generic device structure functions of these calls. |
739 | */ | 754 | */ |
740 | static inline void *pci_get_drvdata (struct pci_dev *pdev) | 755 | static inline void *pci_get_drvdata (struct pci_dev *pdev) |
741 | { | 756 | { |
742 | return dev_get_drvdata(&pdev->dev); | 757 | return dev_get_drvdata(&pdev->dev); |
743 | } | 758 | } |
744 | 759 | ||
745 | static inline void pci_set_drvdata (struct pci_dev *pdev, void *data) | 760 | static inline void pci_set_drvdata (struct pci_dev *pdev, void *data) |
746 | { | 761 | { |
747 | dev_set_drvdata(&pdev->dev, data); | 762 | dev_set_drvdata(&pdev->dev, data); |
748 | } | 763 | } |
749 | 764 | ||
750 | /* If you want to know what to call your pci_dev, ask this function. | 765 | /* If you want to know what to call your pci_dev, ask this function. |
751 | * Again, it's a wrapper around the generic device. | 766 | * Again, it's a wrapper around the generic device. |
752 | */ | 767 | */ |
753 | static inline char *pci_name(struct pci_dev *pdev) | 768 | static inline char *pci_name(struct pci_dev *pdev) |
754 | { | 769 | { |
755 | return pdev->dev.bus_id; | 770 | return pdev->dev.bus_id; |
756 | } | 771 | } |
757 | 772 | ||
758 | 773 | ||
759 | /* Some archs don't want to expose struct resource to userland as-is | 774 | /* Some archs don't want to expose struct resource to userland as-is |
760 | * in sysfs and /proc | 775 | * in sysfs and /proc |
761 | */ | 776 | */ |
762 | #ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER | 777 | #ifndef HAVE_ARCH_PCI_RESOURCE_TO_USER |
763 | static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, | 778 | static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, |
764 | const struct resource *rsrc, resource_size_t *start, | 779 | const struct resource *rsrc, resource_size_t *start, |
765 | resource_size_t *end) | 780 | resource_size_t *end) |
766 | { | 781 | { |
767 | *start = rsrc->start; | 782 | *start = rsrc->start; |
768 | *end = rsrc->end; | 783 | *end = rsrc->end; |
769 | } | 784 | } |
770 | #endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */ | 785 | #endif /* HAVE_ARCH_PCI_RESOURCE_TO_USER */ |
771 | 786 | ||
772 | 787 | ||
773 | /* | 788 | /* |
774 | * The world is not perfect and supplies us with broken PCI devices. | 789 | * The world is not perfect and supplies us with broken PCI devices. |
775 | * For at least a part of these bugs we need a work-around, so both | 790 | * For at least a part of these bugs we need a work-around, so both |
776 | * generic (drivers/pci/quirks.c) and per-architecture code can define | 791 | * generic (drivers/pci/quirks.c) and per-architecture code can define |
777 | * fixup hooks to be called for particular buggy devices. | 792 | * fixup hooks to be called for particular buggy devices. |
778 | */ | 793 | */ |
779 | 794 | ||
780 | struct pci_fixup { | 795 | struct pci_fixup { |
781 | u16 vendor, device; /* You can use PCI_ANY_ID here of course */ | 796 | u16 vendor, device; /* You can use PCI_ANY_ID here of course */ |
782 | void (*hook)(struct pci_dev *dev); | 797 | void (*hook)(struct pci_dev *dev); |
783 | }; | 798 | }; |
784 | 799 | ||
785 | enum pci_fixup_pass { | 800 | enum pci_fixup_pass { |
786 | pci_fixup_early, /* Before probing BARs */ | 801 | pci_fixup_early, /* Before probing BARs */ |
787 | pci_fixup_header, /* After reading configuration header */ | 802 | pci_fixup_header, /* After reading configuration header */ |
788 | pci_fixup_final, /* Final phase of device fixups */ | 803 | pci_fixup_final, /* Final phase of device fixups */ |
789 | pci_fixup_enable, /* pci_enable_device() time */ | 804 | pci_fixup_enable, /* pci_enable_device() time */ |
805 | pci_fixup_resume, /* pci_enable_device() time */ | ||
790 | }; | 806 | }; |
791 | 807 | ||
792 | /* Anonymous variables would be nice... */ | 808 | /* Anonymous variables would be nice... */ |
793 | #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, hook) \ | 809 | #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, hook) \ |
794 | static const struct pci_fixup __pci_fixup_##name __attribute_used__ \ | 810 | static const struct pci_fixup __pci_fixup_##name __attribute_used__ \ |
795 | __attribute__((__section__(#section))) = { vendor, device, hook }; | 811 | __attribute__((__section__(#section))) = { vendor, device, hook }; |
796 | #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ | 812 | #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ |
797 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ | 813 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ |
798 | vendor##device##hook, vendor, device, hook) | 814 | vendor##device##hook, vendor, device, hook) |
799 | #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ | 815 | #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \ |
800 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ | 816 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \ |
801 | vendor##device##hook, vendor, device, hook) | 817 | vendor##device##hook, vendor, device, hook) |
802 | #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ | 818 | #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \ |
803 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ | 819 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \ |
804 | vendor##device##hook, vendor, device, hook) | 820 | vendor##device##hook, vendor, device, hook) |
805 | #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ | 821 | #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \ |
806 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ | 822 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \ |
807 | vendor##device##hook, vendor, device, hook) | 823 | vendor##device##hook, vendor, device, hook) |
824 | #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ | ||
825 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ | ||
826 | resume##vendor##device##hook, vendor, device, hook) | ||
808 | 827 | ||
809 | 828 | ||
810 | void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); | 829 | void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); |
811 | 830 | ||
812 | extern int pci_pci_problems; | 831 | extern int pci_pci_problems; |
813 | #define PCIPCI_FAIL 1 /* No PCI PCI DMA */ | 832 | #define PCIPCI_FAIL 1 /* No PCI PCI DMA */ |
814 | #define PCIPCI_TRITON 2 | 833 | #define PCIPCI_TRITON 2 |
815 | #define PCIPCI_NATOMA 4 | 834 | #define PCIPCI_NATOMA 4 |
816 | #define PCIPCI_VIAETBF 8 | 835 | #define PCIPCI_VIAETBF 8 |
817 | #define PCIPCI_VSFX 16 | 836 | #define PCIPCI_VSFX 16 |
818 | #define PCIPCI_ALIMAGIK 32 /* Need low latency setting */ | 837 | #define PCIPCI_ALIMAGIK 32 /* Need low latency setting */ |
819 | #define PCIAGP_FAIL 64 /* No PCI to AGP DMA */ | 838 | #define PCIAGP_FAIL 64 /* No PCI to AGP DMA */ |
820 | 839 | ||
821 | #endif /* __KERNEL__ */ | 840 | #endif /* __KERNEL__ */ |
822 | #endif /* LINUX_PCI_H */ | 841 | #endif /* LINUX_PCI_H */ |
823 | 842 |