Commit 0b49ec37a20bc7eb7178105aadaa8d1ecba825f8
Committed by
Jesse Barnes
1 parent
37bed90094
Exists in
master
and in
7 other branches
PCI/MSI: fix msi_mask() shift fix
Hidetoshi Seto points out that commit bffac3c593eba1f9da3efd0199e49ea6558a40ce has wrong values in the array. Rather than correct the array, we can just use a bounds check and perform the calculation specified in the comment. As a bonus, this will not run off the end of the array if the device specifies an illegal value in the MSI capability. Signed-off-by: Matthew Wilcox <willy@linux.intel.com> Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Showing 1 changed file with 4 additions and 6 deletions Inline Diff
drivers/pci/msi.c
1 | /* | 1 | /* |
2 | * File: msi.c | 2 | * File: msi.c |
3 | * Purpose: PCI Message Signaled Interrupt (MSI) | 3 | * Purpose: PCI Message Signaled Interrupt (MSI) |
4 | * | 4 | * |
5 | * Copyright (C) 2003-2004 Intel | 5 | * Copyright (C) 2003-2004 Intel |
6 | * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) | 6 | * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/err.h> | 9 | #include <linux/err.h> |
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/irq.h> | 11 | #include <linux/irq.h> |
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/ioport.h> | 14 | #include <linux/ioport.h> |
15 | #include <linux/pci.h> | 15 | #include <linux/pci.h> |
16 | #include <linux/proc_fs.h> | 16 | #include <linux/proc_fs.h> |
17 | #include <linux/msi.h> | 17 | #include <linux/msi.h> |
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | 19 | ||
20 | #include <asm/errno.h> | 20 | #include <asm/errno.h> |
21 | #include <asm/io.h> | 21 | #include <asm/io.h> |
22 | 22 | ||
23 | #include "pci.h" | 23 | #include "pci.h" |
24 | #include "msi.h" | 24 | #include "msi.h" |
25 | 25 | ||
26 | static int pci_msi_enable = 1; | 26 | static int pci_msi_enable = 1; |
27 | 27 | ||
28 | /* Arch hooks */ | 28 | /* Arch hooks */ |
29 | 29 | ||
30 | int __attribute__ ((weak)) | 30 | int __attribute__ ((weak)) |
31 | arch_msi_check_device(struct pci_dev *dev, int nvec, int type) | 31 | arch_msi_check_device(struct pci_dev *dev, int nvec, int type) |
32 | { | 32 | { |
33 | return 0; | 33 | return 0; |
34 | } | 34 | } |
35 | 35 | ||
36 | int __attribute__ ((weak)) | 36 | int __attribute__ ((weak)) |
37 | arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry) | 37 | arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *entry) |
38 | { | 38 | { |
39 | return 0; | 39 | return 0; |
40 | } | 40 | } |
41 | 41 | ||
42 | int __attribute__ ((weak)) | 42 | int __attribute__ ((weak)) |
43 | arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | 43 | arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
44 | { | 44 | { |
45 | struct msi_desc *entry; | 45 | struct msi_desc *entry; |
46 | int ret; | 46 | int ret; |
47 | 47 | ||
48 | list_for_each_entry(entry, &dev->msi_list, list) { | 48 | list_for_each_entry(entry, &dev->msi_list, list) { |
49 | ret = arch_setup_msi_irq(dev, entry); | 49 | ret = arch_setup_msi_irq(dev, entry); |
50 | if (ret) | 50 | if (ret) |
51 | return ret; | 51 | return ret; |
52 | } | 52 | } |
53 | 53 | ||
54 | return 0; | 54 | return 0; |
55 | } | 55 | } |
56 | 56 | ||
57 | void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq) | 57 | void __attribute__ ((weak)) arch_teardown_msi_irq(unsigned int irq) |
58 | { | 58 | { |
59 | return; | 59 | return; |
60 | } | 60 | } |
61 | 61 | ||
62 | void __attribute__ ((weak)) | 62 | void __attribute__ ((weak)) |
63 | arch_teardown_msi_irqs(struct pci_dev *dev) | 63 | arch_teardown_msi_irqs(struct pci_dev *dev) |
64 | { | 64 | { |
65 | struct msi_desc *entry; | 65 | struct msi_desc *entry; |
66 | 66 | ||
67 | list_for_each_entry(entry, &dev->msi_list, list) { | 67 | list_for_each_entry(entry, &dev->msi_list, list) { |
68 | if (entry->irq != 0) | 68 | if (entry->irq != 0) |
69 | arch_teardown_msi_irq(entry->irq); | 69 | arch_teardown_msi_irq(entry->irq); |
70 | } | 70 | } |
71 | } | 71 | } |
72 | 72 | ||
73 | static void __msi_set_enable(struct pci_dev *dev, int pos, int enable) | 73 | static void __msi_set_enable(struct pci_dev *dev, int pos, int enable) |
74 | { | 74 | { |
75 | u16 control; | 75 | u16 control; |
76 | 76 | ||
77 | if (pos) { | 77 | if (pos) { |
78 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); | 78 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); |
79 | control &= ~PCI_MSI_FLAGS_ENABLE; | 79 | control &= ~PCI_MSI_FLAGS_ENABLE; |
80 | if (enable) | 80 | if (enable) |
81 | control |= PCI_MSI_FLAGS_ENABLE; | 81 | control |= PCI_MSI_FLAGS_ENABLE; |
82 | pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); | 82 | pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); |
83 | } | 83 | } |
84 | } | 84 | } |
85 | 85 | ||
86 | static void msi_set_enable(struct pci_dev *dev, int enable) | 86 | static void msi_set_enable(struct pci_dev *dev, int enable) |
87 | { | 87 | { |
88 | __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable); | 88 | __msi_set_enable(dev, pci_find_capability(dev, PCI_CAP_ID_MSI), enable); |
89 | } | 89 | } |
90 | 90 | ||
91 | static void msix_set_enable(struct pci_dev *dev, int enable) | 91 | static void msix_set_enable(struct pci_dev *dev, int enable) |
92 | { | 92 | { |
93 | int pos; | 93 | int pos; |
94 | u16 control; | 94 | u16 control; |
95 | 95 | ||
96 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | 96 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); |
97 | if (pos) { | 97 | if (pos) { |
98 | pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); | 98 | pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); |
99 | control &= ~PCI_MSIX_FLAGS_ENABLE; | 99 | control &= ~PCI_MSIX_FLAGS_ENABLE; |
100 | if (enable) | 100 | if (enable) |
101 | control |= PCI_MSIX_FLAGS_ENABLE; | 101 | control |= PCI_MSIX_FLAGS_ENABLE; |
102 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); | 102 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); |
103 | } | 103 | } |
104 | } | 104 | } |
105 | 105 | ||
106 | /* | ||
107 | * Essentially, this is ((1 << (1 << x)) - 1), but without the | ||
108 | * undefinedness of a << 32. | ||
109 | */ | ||
110 | static inline __attribute_const__ u32 msi_mask(unsigned x) | 106 | static inline __attribute_const__ u32 msi_mask(unsigned x) |
111 | { | 107 | { |
112 | static const u32 mask[] = { 1, 2, 4, 0xf, 0xff, 0xffff, 0xffffffff }; | 108 | /* Don't shift by >= width of type */ |
113 | return mask[x]; | 109 | if (x >= 5) |
110 | return 0xffffffff; | ||
111 | return (1 << (1 << x)) - 1; | ||
114 | } | 112 | } |
115 | 113 | ||
116 | static void msix_flush_writes(struct irq_desc *desc) | 114 | static void msix_flush_writes(struct irq_desc *desc) |
117 | { | 115 | { |
118 | struct msi_desc *entry; | 116 | struct msi_desc *entry; |
119 | 117 | ||
120 | entry = get_irq_desc_msi(desc); | 118 | entry = get_irq_desc_msi(desc); |
121 | BUG_ON(!entry || !entry->dev); | 119 | BUG_ON(!entry || !entry->dev); |
122 | switch (entry->msi_attrib.type) { | 120 | switch (entry->msi_attrib.type) { |
123 | case PCI_CAP_ID_MSI: | 121 | case PCI_CAP_ID_MSI: |
124 | /* nothing to do */ | 122 | /* nothing to do */ |
125 | break; | 123 | break; |
126 | case PCI_CAP_ID_MSIX: | 124 | case PCI_CAP_ID_MSIX: |
127 | { | 125 | { |
128 | int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + | 126 | int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + |
129 | PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; | 127 | PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; |
130 | readl(entry->mask_base + offset); | 128 | readl(entry->mask_base + offset); |
131 | break; | 129 | break; |
132 | } | 130 | } |
133 | default: | 131 | default: |
134 | BUG(); | 132 | BUG(); |
135 | break; | 133 | break; |
136 | } | 134 | } |
137 | } | 135 | } |
138 | 136 | ||
139 | /* | 137 | /* |
140 | * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to | 138 | * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to |
141 | * mask all MSI interrupts by clearing the MSI enable bit does not work | 139 | * mask all MSI interrupts by clearing the MSI enable bit does not work |
142 | * reliably as devices without an INTx disable bit will then generate a | 140 | * reliably as devices without an INTx disable bit will then generate a |
143 | * level IRQ which will never be cleared. | 141 | * level IRQ which will never be cleared. |
144 | * | 142 | * |
145 | * Returns 1 if it succeeded in masking the interrupt and 0 if the device | 143 | * Returns 1 if it succeeded in masking the interrupt and 0 if the device |
146 | * doesn't support MSI masking. | 144 | * doesn't support MSI masking. |
147 | */ | 145 | */ |
148 | static int msi_set_mask_bits(struct irq_desc *desc, u32 mask, u32 flag) | 146 | static int msi_set_mask_bits(struct irq_desc *desc, u32 mask, u32 flag) |
149 | { | 147 | { |
150 | struct msi_desc *entry; | 148 | struct msi_desc *entry; |
151 | 149 | ||
152 | entry = get_irq_desc_msi(desc); | 150 | entry = get_irq_desc_msi(desc); |
153 | BUG_ON(!entry || !entry->dev); | 151 | BUG_ON(!entry || !entry->dev); |
154 | switch (entry->msi_attrib.type) { | 152 | switch (entry->msi_attrib.type) { |
155 | case PCI_CAP_ID_MSI: | 153 | case PCI_CAP_ID_MSI: |
156 | if (entry->msi_attrib.maskbit) { | 154 | if (entry->msi_attrib.maskbit) { |
157 | int pos; | 155 | int pos; |
158 | u32 mask_bits; | 156 | u32 mask_bits; |
159 | 157 | ||
160 | pos = (long)entry->mask_base; | 158 | pos = (long)entry->mask_base; |
161 | pci_read_config_dword(entry->dev, pos, &mask_bits); | 159 | pci_read_config_dword(entry->dev, pos, &mask_bits); |
162 | mask_bits &= ~(mask); | 160 | mask_bits &= ~(mask); |
163 | mask_bits |= flag & mask; | 161 | mask_bits |= flag & mask; |
164 | pci_write_config_dword(entry->dev, pos, mask_bits); | 162 | pci_write_config_dword(entry->dev, pos, mask_bits); |
165 | } else { | 163 | } else { |
166 | return 0; | 164 | return 0; |
167 | } | 165 | } |
168 | break; | 166 | break; |
169 | case PCI_CAP_ID_MSIX: | 167 | case PCI_CAP_ID_MSIX: |
170 | { | 168 | { |
171 | int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + | 169 | int offset = entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + |
172 | PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; | 170 | PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET; |
173 | writel(flag, entry->mask_base + offset); | 171 | writel(flag, entry->mask_base + offset); |
174 | readl(entry->mask_base + offset); | 172 | readl(entry->mask_base + offset); |
175 | break; | 173 | break; |
176 | } | 174 | } |
177 | default: | 175 | default: |
178 | BUG(); | 176 | BUG(); |
179 | break; | 177 | break; |
180 | } | 178 | } |
181 | entry->msi_attrib.masked = !!flag; | 179 | entry->msi_attrib.masked = !!flag; |
182 | return 1; | 180 | return 1; |
183 | } | 181 | } |
184 | 182 | ||
185 | void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | 183 | void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) |
186 | { | 184 | { |
187 | struct msi_desc *entry = get_irq_desc_msi(desc); | 185 | struct msi_desc *entry = get_irq_desc_msi(desc); |
188 | switch(entry->msi_attrib.type) { | 186 | switch(entry->msi_attrib.type) { |
189 | case PCI_CAP_ID_MSI: | 187 | case PCI_CAP_ID_MSI: |
190 | { | 188 | { |
191 | struct pci_dev *dev = entry->dev; | 189 | struct pci_dev *dev = entry->dev; |
192 | int pos = entry->msi_attrib.pos; | 190 | int pos = entry->msi_attrib.pos; |
193 | u16 data; | 191 | u16 data; |
194 | 192 | ||
195 | pci_read_config_dword(dev, msi_lower_address_reg(pos), | 193 | pci_read_config_dword(dev, msi_lower_address_reg(pos), |
196 | &msg->address_lo); | 194 | &msg->address_lo); |
197 | if (entry->msi_attrib.is_64) { | 195 | if (entry->msi_attrib.is_64) { |
198 | pci_read_config_dword(dev, msi_upper_address_reg(pos), | 196 | pci_read_config_dword(dev, msi_upper_address_reg(pos), |
199 | &msg->address_hi); | 197 | &msg->address_hi); |
200 | pci_read_config_word(dev, msi_data_reg(pos, 1), &data); | 198 | pci_read_config_word(dev, msi_data_reg(pos, 1), &data); |
201 | } else { | 199 | } else { |
202 | msg->address_hi = 0; | 200 | msg->address_hi = 0; |
203 | pci_read_config_word(dev, msi_data_reg(pos, 0), &data); | 201 | pci_read_config_word(dev, msi_data_reg(pos, 0), &data); |
204 | } | 202 | } |
205 | msg->data = data; | 203 | msg->data = data; |
206 | break; | 204 | break; |
207 | } | 205 | } |
208 | case PCI_CAP_ID_MSIX: | 206 | case PCI_CAP_ID_MSIX: |
209 | { | 207 | { |
210 | void __iomem *base; | 208 | void __iomem *base; |
211 | base = entry->mask_base + | 209 | base = entry->mask_base + |
212 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; | 210 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; |
213 | 211 | ||
214 | msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); | 212 | msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); |
215 | msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); | 213 | msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); |
216 | msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET); | 214 | msg->data = readl(base + PCI_MSIX_ENTRY_DATA_OFFSET); |
217 | break; | 215 | break; |
218 | } | 216 | } |
219 | default: | 217 | default: |
220 | BUG(); | 218 | BUG(); |
221 | } | 219 | } |
222 | } | 220 | } |
223 | 221 | ||
224 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) | 222 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) |
225 | { | 223 | { |
226 | struct irq_desc *desc = irq_to_desc(irq); | 224 | struct irq_desc *desc = irq_to_desc(irq); |
227 | 225 | ||
228 | read_msi_msg_desc(desc, msg); | 226 | read_msi_msg_desc(desc, msg); |
229 | } | 227 | } |
230 | 228 | ||
231 | void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) | 229 | void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg) |
232 | { | 230 | { |
233 | struct msi_desc *entry = get_irq_desc_msi(desc); | 231 | struct msi_desc *entry = get_irq_desc_msi(desc); |
234 | switch (entry->msi_attrib.type) { | 232 | switch (entry->msi_attrib.type) { |
235 | case PCI_CAP_ID_MSI: | 233 | case PCI_CAP_ID_MSI: |
236 | { | 234 | { |
237 | struct pci_dev *dev = entry->dev; | 235 | struct pci_dev *dev = entry->dev; |
238 | int pos = entry->msi_attrib.pos; | 236 | int pos = entry->msi_attrib.pos; |
239 | 237 | ||
240 | pci_write_config_dword(dev, msi_lower_address_reg(pos), | 238 | pci_write_config_dword(dev, msi_lower_address_reg(pos), |
241 | msg->address_lo); | 239 | msg->address_lo); |
242 | if (entry->msi_attrib.is_64) { | 240 | if (entry->msi_attrib.is_64) { |
243 | pci_write_config_dword(dev, msi_upper_address_reg(pos), | 241 | pci_write_config_dword(dev, msi_upper_address_reg(pos), |
244 | msg->address_hi); | 242 | msg->address_hi); |
245 | pci_write_config_word(dev, msi_data_reg(pos, 1), | 243 | pci_write_config_word(dev, msi_data_reg(pos, 1), |
246 | msg->data); | 244 | msg->data); |
247 | } else { | 245 | } else { |
248 | pci_write_config_word(dev, msi_data_reg(pos, 0), | 246 | pci_write_config_word(dev, msi_data_reg(pos, 0), |
249 | msg->data); | 247 | msg->data); |
250 | } | 248 | } |
251 | break; | 249 | break; |
252 | } | 250 | } |
253 | case PCI_CAP_ID_MSIX: | 251 | case PCI_CAP_ID_MSIX: |
254 | { | 252 | { |
255 | void __iomem *base; | 253 | void __iomem *base; |
256 | base = entry->mask_base + | 254 | base = entry->mask_base + |
257 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; | 255 | entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; |
258 | 256 | ||
259 | writel(msg->address_lo, | 257 | writel(msg->address_lo, |
260 | base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); | 258 | base + PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET); |
261 | writel(msg->address_hi, | 259 | writel(msg->address_hi, |
262 | base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); | 260 | base + PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET); |
263 | writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET); | 261 | writel(msg->data, base + PCI_MSIX_ENTRY_DATA_OFFSET); |
264 | break; | 262 | break; |
265 | } | 263 | } |
266 | default: | 264 | default: |
267 | BUG(); | 265 | BUG(); |
268 | } | 266 | } |
269 | entry->msg = *msg; | 267 | entry->msg = *msg; |
270 | } | 268 | } |
271 | 269 | ||
272 | void write_msi_msg(unsigned int irq, struct msi_msg *msg) | 270 | void write_msi_msg(unsigned int irq, struct msi_msg *msg) |
273 | { | 271 | { |
274 | struct irq_desc *desc = irq_to_desc(irq); | 272 | struct irq_desc *desc = irq_to_desc(irq); |
275 | 273 | ||
276 | write_msi_msg_desc(desc, msg); | 274 | write_msi_msg_desc(desc, msg); |
277 | } | 275 | } |
278 | 276 | ||
279 | void mask_msi_irq(unsigned int irq) | 277 | void mask_msi_irq(unsigned int irq) |
280 | { | 278 | { |
281 | struct irq_desc *desc = irq_to_desc(irq); | 279 | struct irq_desc *desc = irq_to_desc(irq); |
282 | 280 | ||
283 | msi_set_mask_bits(desc, 1, 1); | 281 | msi_set_mask_bits(desc, 1, 1); |
284 | msix_flush_writes(desc); | 282 | msix_flush_writes(desc); |
285 | } | 283 | } |
286 | 284 | ||
287 | void unmask_msi_irq(unsigned int irq) | 285 | void unmask_msi_irq(unsigned int irq) |
288 | { | 286 | { |
289 | struct irq_desc *desc = irq_to_desc(irq); | 287 | struct irq_desc *desc = irq_to_desc(irq); |
290 | 288 | ||
291 | msi_set_mask_bits(desc, 1, 0); | 289 | msi_set_mask_bits(desc, 1, 0); |
292 | msix_flush_writes(desc); | 290 | msix_flush_writes(desc); |
293 | } | 291 | } |
294 | 292 | ||
295 | static int msi_free_irqs(struct pci_dev* dev); | 293 | static int msi_free_irqs(struct pci_dev* dev); |
296 | 294 | ||
297 | static struct msi_desc* alloc_msi_entry(void) | 295 | static struct msi_desc* alloc_msi_entry(void) |
298 | { | 296 | { |
299 | struct msi_desc *entry; | 297 | struct msi_desc *entry; |
300 | 298 | ||
301 | entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL); | 299 | entry = kzalloc(sizeof(struct msi_desc), GFP_KERNEL); |
302 | if (!entry) | 300 | if (!entry) |
303 | return NULL; | 301 | return NULL; |
304 | 302 | ||
305 | INIT_LIST_HEAD(&entry->list); | 303 | INIT_LIST_HEAD(&entry->list); |
306 | entry->irq = 0; | 304 | entry->irq = 0; |
307 | entry->dev = NULL; | 305 | entry->dev = NULL; |
308 | 306 | ||
309 | return entry; | 307 | return entry; |
310 | } | 308 | } |
311 | 309 | ||
312 | static void pci_intx_for_msi(struct pci_dev *dev, int enable) | 310 | static void pci_intx_for_msi(struct pci_dev *dev, int enable) |
313 | { | 311 | { |
314 | if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) | 312 | if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) |
315 | pci_intx(dev, enable); | 313 | pci_intx(dev, enable); |
316 | } | 314 | } |
317 | 315 | ||
318 | static void __pci_restore_msi_state(struct pci_dev *dev) | 316 | static void __pci_restore_msi_state(struct pci_dev *dev) |
319 | { | 317 | { |
320 | int pos; | 318 | int pos; |
321 | u16 control; | 319 | u16 control; |
322 | struct msi_desc *entry; | 320 | struct msi_desc *entry; |
323 | 321 | ||
324 | if (!dev->msi_enabled) | 322 | if (!dev->msi_enabled) |
325 | return; | 323 | return; |
326 | 324 | ||
327 | entry = get_irq_msi(dev->irq); | 325 | entry = get_irq_msi(dev->irq); |
328 | pos = entry->msi_attrib.pos; | 326 | pos = entry->msi_attrib.pos; |
329 | 327 | ||
330 | pci_intx_for_msi(dev, 0); | 328 | pci_intx_for_msi(dev, 0); |
331 | msi_set_enable(dev, 0); | 329 | msi_set_enable(dev, 0); |
332 | write_msi_msg(dev->irq, &entry->msg); | 330 | write_msi_msg(dev->irq, &entry->msg); |
333 | if (entry->msi_attrib.maskbit) { | 331 | if (entry->msi_attrib.maskbit) { |
334 | struct irq_desc *desc = irq_to_desc(dev->irq); | 332 | struct irq_desc *desc = irq_to_desc(dev->irq); |
335 | msi_set_mask_bits(desc, entry->msi_attrib.maskbits_mask, | 333 | msi_set_mask_bits(desc, entry->msi_attrib.maskbits_mask, |
336 | entry->msi_attrib.masked); | 334 | entry->msi_attrib.masked); |
337 | } | 335 | } |
338 | 336 | ||
339 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); | 337 | pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); |
340 | control &= ~PCI_MSI_FLAGS_QSIZE; | 338 | control &= ~PCI_MSI_FLAGS_QSIZE; |
341 | control |= PCI_MSI_FLAGS_ENABLE; | 339 | control |= PCI_MSI_FLAGS_ENABLE; |
342 | pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); | 340 | pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); |
343 | } | 341 | } |
344 | 342 | ||
345 | static void __pci_restore_msix_state(struct pci_dev *dev) | 343 | static void __pci_restore_msix_state(struct pci_dev *dev) |
346 | { | 344 | { |
347 | int pos; | 345 | int pos; |
348 | struct msi_desc *entry; | 346 | struct msi_desc *entry; |
349 | u16 control; | 347 | u16 control; |
350 | 348 | ||
351 | if (!dev->msix_enabled) | 349 | if (!dev->msix_enabled) |
352 | return; | 350 | return; |
353 | 351 | ||
354 | /* route the table */ | 352 | /* route the table */ |
355 | pci_intx_for_msi(dev, 0); | 353 | pci_intx_for_msi(dev, 0); |
356 | msix_set_enable(dev, 0); | 354 | msix_set_enable(dev, 0); |
357 | 355 | ||
358 | list_for_each_entry(entry, &dev->msi_list, list) { | 356 | list_for_each_entry(entry, &dev->msi_list, list) { |
359 | struct irq_desc *desc = irq_to_desc(entry->irq); | 357 | struct irq_desc *desc = irq_to_desc(entry->irq); |
360 | write_msi_msg(entry->irq, &entry->msg); | 358 | write_msi_msg(entry->irq, &entry->msg); |
361 | msi_set_mask_bits(desc, 1, entry->msi_attrib.masked); | 359 | msi_set_mask_bits(desc, 1, entry->msi_attrib.masked); |
362 | } | 360 | } |
363 | 361 | ||
364 | BUG_ON(list_empty(&dev->msi_list)); | 362 | BUG_ON(list_empty(&dev->msi_list)); |
365 | entry = list_entry(dev->msi_list.next, struct msi_desc, list); | 363 | entry = list_entry(dev->msi_list.next, struct msi_desc, list); |
366 | pos = entry->msi_attrib.pos; | 364 | pos = entry->msi_attrib.pos; |
367 | pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); | 365 | pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); |
368 | control &= ~PCI_MSIX_FLAGS_MASKALL; | 366 | control &= ~PCI_MSIX_FLAGS_MASKALL; |
369 | control |= PCI_MSIX_FLAGS_ENABLE; | 367 | control |= PCI_MSIX_FLAGS_ENABLE; |
370 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); | 368 | pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); |
371 | } | 369 | } |
372 | 370 | ||
373 | void pci_restore_msi_state(struct pci_dev *dev) | 371 | void pci_restore_msi_state(struct pci_dev *dev) |
374 | { | 372 | { |
375 | __pci_restore_msi_state(dev); | 373 | __pci_restore_msi_state(dev); |
376 | __pci_restore_msix_state(dev); | 374 | __pci_restore_msix_state(dev); |
377 | } | 375 | } |
378 | EXPORT_SYMBOL_GPL(pci_restore_msi_state); | 376 | EXPORT_SYMBOL_GPL(pci_restore_msi_state); |
379 | 377 | ||
380 | /** | 378 | /** |
381 | * msi_capability_init - configure device's MSI capability structure | 379 | * msi_capability_init - configure device's MSI capability structure |
382 | * @dev: pointer to the pci_dev data structure of MSI device function | 380 | * @dev: pointer to the pci_dev data structure of MSI device function |
383 | * | 381 | * |
384 | * Setup the MSI capability structure of device function with a single | 382 | * Setup the MSI capability structure of device function with a single |
385 | * MSI irq, regardless of device function is capable of handling | 383 | * MSI irq, regardless of device function is capable of handling |
386 | * multiple messages. A return of zero indicates the successful setup | 384 | * multiple messages. A return of zero indicates the successful setup |
387 | * of an entry zero with the new MSI irq or non-zero for otherwise. | 385 | * of an entry zero with the new MSI irq or non-zero for otherwise. |
388 | **/ | 386 | **/ |
389 | static int msi_capability_init(struct pci_dev *dev) | 387 | static int msi_capability_init(struct pci_dev *dev) |
390 | { | 388 | { |
391 | struct msi_desc *entry; | 389 | struct msi_desc *entry; |
392 | int pos, ret; | 390 | int pos, ret; |
393 | u16 control; | 391 | u16 control; |
394 | 392 | ||
395 | msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */ | 393 | msi_set_enable(dev, 0); /* Ensure msi is disabled as I set it up */ |
396 | 394 | ||
397 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); | 395 | pos = pci_find_capability(dev, PCI_CAP_ID_MSI); |
398 | pci_read_config_word(dev, msi_control_reg(pos), &control); | 396 | pci_read_config_word(dev, msi_control_reg(pos), &control); |
399 | /* MSI Entry Initialization */ | 397 | /* MSI Entry Initialization */ |
400 | entry = alloc_msi_entry(); | 398 | entry = alloc_msi_entry(); |
401 | if (!entry) | 399 | if (!entry) |
402 | return -ENOMEM; | 400 | return -ENOMEM; |
403 | 401 | ||
404 | entry->msi_attrib.type = PCI_CAP_ID_MSI; | 402 | entry->msi_attrib.type = PCI_CAP_ID_MSI; |
405 | entry->msi_attrib.is_64 = is_64bit_address(control); | 403 | entry->msi_attrib.is_64 = is_64bit_address(control); |
406 | entry->msi_attrib.entry_nr = 0; | 404 | entry->msi_attrib.entry_nr = 0; |
407 | entry->msi_attrib.maskbit = is_mask_bit_support(control); | 405 | entry->msi_attrib.maskbit = is_mask_bit_support(control); |
408 | entry->msi_attrib.masked = 1; | 406 | entry->msi_attrib.masked = 1; |
409 | entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ | 407 | entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ |
410 | entry->msi_attrib.pos = pos; | 408 | entry->msi_attrib.pos = pos; |
411 | entry->dev = dev; | 409 | entry->dev = dev; |
412 | if (entry->msi_attrib.maskbit) { | 410 | if (entry->msi_attrib.maskbit) { |
413 | unsigned int base, maskbits, temp; | 411 | unsigned int base, maskbits, temp; |
414 | 412 | ||
415 | base = msi_mask_bits_reg(pos, entry->msi_attrib.is_64); | 413 | base = msi_mask_bits_reg(pos, entry->msi_attrib.is_64); |
416 | entry->mask_base = (void __iomem *)(long)base; | 414 | entry->mask_base = (void __iomem *)(long)base; |
417 | 415 | ||
418 | /* All MSIs are unmasked by default, Mask them all */ | 416 | /* All MSIs are unmasked by default, Mask them all */ |
419 | pci_read_config_dword(dev, base, &maskbits); | 417 | pci_read_config_dword(dev, base, &maskbits); |
420 | temp = msi_mask((control & PCI_MSI_FLAGS_QMASK) >> 1); | 418 | temp = msi_mask((control & PCI_MSI_FLAGS_QMASK) >> 1); |
421 | maskbits |= temp; | 419 | maskbits |= temp; |
422 | pci_write_config_dword(dev, base, maskbits); | 420 | pci_write_config_dword(dev, base, maskbits); |
423 | entry->msi_attrib.maskbits_mask = temp; | 421 | entry->msi_attrib.maskbits_mask = temp; |
424 | } | 422 | } |
425 | list_add_tail(&entry->list, &dev->msi_list); | 423 | list_add_tail(&entry->list, &dev->msi_list); |
426 | 424 | ||
427 | /* Configure MSI capability structure */ | 425 | /* Configure MSI capability structure */ |
428 | ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI); | 426 | ret = arch_setup_msi_irqs(dev, 1, PCI_CAP_ID_MSI); |
429 | if (ret) { | 427 | if (ret) { |
430 | msi_free_irqs(dev); | 428 | msi_free_irqs(dev); |
431 | return ret; | 429 | return ret; |
432 | } | 430 | } |
433 | 431 | ||
434 | /* Set MSI enabled bits */ | 432 | /* Set MSI enabled bits */ |
435 | pci_intx_for_msi(dev, 0); | 433 | pci_intx_for_msi(dev, 0); |
436 | msi_set_enable(dev, 1); | 434 | msi_set_enable(dev, 1); |
437 | dev->msi_enabled = 1; | 435 | dev->msi_enabled = 1; |
438 | 436 | ||
439 | dev->irq = entry->irq; | 437 | dev->irq = entry->irq; |
440 | return 0; | 438 | return 0; |
441 | } | 439 | } |
442 | 440 | ||
443 | /** | 441 | /** |
444 | * msix_capability_init - configure device's MSI-X capability | 442 | * msix_capability_init - configure device's MSI-X capability |
445 | * @dev: pointer to the pci_dev data structure of MSI-X device function | 443 | * @dev: pointer to the pci_dev data structure of MSI-X device function |
446 | * @entries: pointer to an array of struct msix_entry entries | 444 | * @entries: pointer to an array of struct msix_entry entries |
447 | * @nvec: number of @entries | 445 | * @nvec: number of @entries |
448 | * | 446 | * |
449 | * Setup the MSI-X capability structure of device function with a | 447 | * Setup the MSI-X capability structure of device function with a |
450 | * single MSI-X irq. A return of zero indicates the successful setup of | 448 | * single MSI-X irq. A return of zero indicates the successful setup of |
451 | * requested MSI-X entries with allocated irqs or non-zero for otherwise. | 449 | * requested MSI-X entries with allocated irqs or non-zero for otherwise. |
452 | **/ | 450 | **/ |
453 | static int msix_capability_init(struct pci_dev *dev, | 451 | static int msix_capability_init(struct pci_dev *dev, |
454 | struct msix_entry *entries, int nvec) | 452 | struct msix_entry *entries, int nvec) |
455 | { | 453 | { |
456 | struct msi_desc *entry; | 454 | struct msi_desc *entry; |
457 | int pos, i, j, nr_entries, ret; | 455 | int pos, i, j, nr_entries, ret; |
458 | unsigned long phys_addr; | 456 | unsigned long phys_addr; |
459 | u32 table_offset; | 457 | u32 table_offset; |
460 | u16 control; | 458 | u16 control; |
461 | u8 bir; | 459 | u8 bir; |
462 | void __iomem *base; | 460 | void __iomem *base; |
463 | 461 | ||
464 | msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */ | 462 | msix_set_enable(dev, 0);/* Ensure msix is disabled as I set it up */ |
465 | 463 | ||
466 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | 464 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); |
467 | /* Request & Map MSI-X table region */ | 465 | /* Request & Map MSI-X table region */ |
468 | pci_read_config_word(dev, msi_control_reg(pos), &control); | 466 | pci_read_config_word(dev, msi_control_reg(pos), &control); |
469 | nr_entries = multi_msix_capable(control); | 467 | nr_entries = multi_msix_capable(control); |
470 | 468 | ||
471 | pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); | 469 | pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset); |
472 | bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); | 470 | bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); |
473 | table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; | 471 | table_offset &= ~PCI_MSIX_FLAGS_BIRMASK; |
474 | phys_addr = pci_resource_start (dev, bir) + table_offset; | 472 | phys_addr = pci_resource_start (dev, bir) + table_offset; |
475 | base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); | 473 | base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); |
476 | if (base == NULL) | 474 | if (base == NULL) |
477 | return -ENOMEM; | 475 | return -ENOMEM; |
478 | 476 | ||
479 | /* MSI-X Table Initialization */ | 477 | /* MSI-X Table Initialization */ |
480 | for (i = 0; i < nvec; i++) { | 478 | for (i = 0; i < nvec; i++) { |
481 | entry = alloc_msi_entry(); | 479 | entry = alloc_msi_entry(); |
482 | if (!entry) | 480 | if (!entry) |
483 | break; | 481 | break; |
484 | 482 | ||
485 | j = entries[i].entry; | 483 | j = entries[i].entry; |
486 | entry->msi_attrib.type = PCI_CAP_ID_MSIX; | 484 | entry->msi_attrib.type = PCI_CAP_ID_MSIX; |
487 | entry->msi_attrib.is_64 = 1; | 485 | entry->msi_attrib.is_64 = 1; |
488 | entry->msi_attrib.entry_nr = j; | 486 | entry->msi_attrib.entry_nr = j; |
489 | entry->msi_attrib.maskbit = 1; | 487 | entry->msi_attrib.maskbit = 1; |
490 | entry->msi_attrib.masked = 1; | 488 | entry->msi_attrib.masked = 1; |
491 | entry->msi_attrib.default_irq = dev->irq; | 489 | entry->msi_attrib.default_irq = dev->irq; |
492 | entry->msi_attrib.pos = pos; | 490 | entry->msi_attrib.pos = pos; |
493 | entry->dev = dev; | 491 | entry->dev = dev; |
494 | entry->mask_base = base; | 492 | entry->mask_base = base; |
495 | 493 | ||
496 | list_add_tail(&entry->list, &dev->msi_list); | 494 | list_add_tail(&entry->list, &dev->msi_list); |
497 | } | 495 | } |
498 | 496 | ||
499 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); | 497 | ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); |
500 | if (ret) { | 498 | if (ret) { |
501 | int avail = 0; | 499 | int avail = 0; |
502 | list_for_each_entry(entry, &dev->msi_list, list) { | 500 | list_for_each_entry(entry, &dev->msi_list, list) { |
503 | if (entry->irq != 0) { | 501 | if (entry->irq != 0) { |
504 | avail++; | 502 | avail++; |
505 | } | 503 | } |
506 | } | 504 | } |
507 | 505 | ||
508 | msi_free_irqs(dev); | 506 | msi_free_irqs(dev); |
509 | 507 | ||
510 | /* If we had some success report the number of irqs | 508 | /* If we had some success report the number of irqs |
511 | * we succeeded in setting up. | 509 | * we succeeded in setting up. |
512 | */ | 510 | */ |
513 | if (avail == 0) | 511 | if (avail == 0) |
514 | avail = ret; | 512 | avail = ret; |
515 | return avail; | 513 | return avail; |
516 | } | 514 | } |
517 | 515 | ||
518 | i = 0; | 516 | i = 0; |
519 | list_for_each_entry(entry, &dev->msi_list, list) { | 517 | list_for_each_entry(entry, &dev->msi_list, list) { |
520 | entries[i].vector = entry->irq; | 518 | entries[i].vector = entry->irq; |
521 | set_irq_msi(entry->irq, entry); | 519 | set_irq_msi(entry->irq, entry); |
522 | i++; | 520 | i++; |
523 | } | 521 | } |
524 | /* Set MSI-X enabled bits */ | 522 | /* Set MSI-X enabled bits */ |
525 | pci_intx_for_msi(dev, 0); | 523 | pci_intx_for_msi(dev, 0); |
526 | msix_set_enable(dev, 1); | 524 | msix_set_enable(dev, 1); |
527 | dev->msix_enabled = 1; | 525 | dev->msix_enabled = 1; |
528 | 526 | ||
529 | return 0; | 527 | return 0; |
530 | } | 528 | } |
531 | 529 | ||
532 | /** | 530 | /** |
533 | * pci_msi_check_device - check whether MSI may be enabled on a device | 531 | * pci_msi_check_device - check whether MSI may be enabled on a device |
534 | * @dev: pointer to the pci_dev data structure of MSI device function | 532 | * @dev: pointer to the pci_dev data structure of MSI device function |
535 | * @nvec: how many MSIs have been requested ? | 533 | * @nvec: how many MSIs have been requested ? |
536 | * @type: are we checking for MSI or MSI-X ? | 534 | * @type: are we checking for MSI or MSI-X ? |
537 | * | 535 | * |
538 | * Look at global flags, the device itself, and its parent busses | 536 | * Look at global flags, the device itself, and its parent busses |
539 | * to determine if MSI/-X are supported for the device. If MSI/-X is | 537 | * to determine if MSI/-X are supported for the device. If MSI/-X is |
540 | * supported return 0, else return an error code. | 538 | * supported return 0, else return an error code. |
541 | **/ | 539 | **/ |
542 | static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type) | 540 | static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type) |
543 | { | 541 | { |
544 | struct pci_bus *bus; | 542 | struct pci_bus *bus; |
545 | int ret; | 543 | int ret; |
546 | 544 | ||
547 | /* MSI must be globally enabled and supported by the device */ | 545 | /* MSI must be globally enabled and supported by the device */ |
548 | if (!pci_msi_enable || !dev || dev->no_msi) | 546 | if (!pci_msi_enable || !dev || dev->no_msi) |
549 | return -EINVAL; | 547 | return -EINVAL; |
550 | 548 | ||
551 | /* | 549 | /* |
552 | * You can't ask to have 0 or less MSIs configured. | 550 | * You can't ask to have 0 or less MSIs configured. |
553 | * a) it's stupid .. | 551 | * a) it's stupid .. |
554 | * b) the list manipulation code assumes nvec >= 1. | 552 | * b) the list manipulation code assumes nvec >= 1. |
555 | */ | 553 | */ |
556 | if (nvec < 1) | 554 | if (nvec < 1) |
557 | return -ERANGE; | 555 | return -ERANGE; |
558 | 556 | ||
559 | /* Any bridge which does NOT route MSI transactions from it's | 557 | /* Any bridge which does NOT route MSI transactions from it's |
560 | * secondary bus to it's primary bus must set NO_MSI flag on | 558 | * secondary bus to it's primary bus must set NO_MSI flag on |
561 | * the secondary pci_bus. | 559 | * the secondary pci_bus. |
562 | * We expect only arch-specific PCI host bus controller driver | 560 | * We expect only arch-specific PCI host bus controller driver |
563 | * or quirks for specific PCI bridges to be setting NO_MSI. | 561 | * or quirks for specific PCI bridges to be setting NO_MSI. |
564 | */ | 562 | */ |
565 | for (bus = dev->bus; bus; bus = bus->parent) | 563 | for (bus = dev->bus; bus; bus = bus->parent) |
566 | if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) | 564 | if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) |
567 | return -EINVAL; | 565 | return -EINVAL; |
568 | 566 | ||
569 | ret = arch_msi_check_device(dev, nvec, type); | 567 | ret = arch_msi_check_device(dev, nvec, type); |
570 | if (ret) | 568 | if (ret) |
571 | return ret; | 569 | return ret; |
572 | 570 | ||
573 | if (!pci_find_capability(dev, type)) | 571 | if (!pci_find_capability(dev, type)) |
574 | return -EINVAL; | 572 | return -EINVAL; |
575 | 573 | ||
576 | return 0; | 574 | return 0; |
577 | } | 575 | } |
578 | 576 | ||
579 | /** | 577 | /** |
580 | * pci_enable_msi - configure device's MSI capability structure | 578 | * pci_enable_msi - configure device's MSI capability structure |
581 | * @dev: pointer to the pci_dev data structure of MSI device function | 579 | * @dev: pointer to the pci_dev data structure of MSI device function |
582 | * | 580 | * |
583 | * Setup the MSI capability structure of device function with | 581 | * Setup the MSI capability structure of device function with |
584 | * a single MSI irq upon its software driver call to request for | 582 | * a single MSI irq upon its software driver call to request for |
585 | * MSI mode enabled on its hardware device function. A return of zero | 583 | * MSI mode enabled on its hardware device function. A return of zero |
586 | * indicates the successful setup of an entry zero with the new MSI | 584 | * indicates the successful setup of an entry zero with the new MSI |
587 | * irq or non-zero for otherwise. | 585 | * irq or non-zero for otherwise. |
588 | **/ | 586 | **/ |
589 | int pci_enable_msi(struct pci_dev* dev) | 587 | int pci_enable_msi(struct pci_dev* dev) |
590 | { | 588 | { |
591 | int status; | 589 | int status; |
592 | 590 | ||
593 | status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI); | 591 | status = pci_msi_check_device(dev, 1, PCI_CAP_ID_MSI); |
594 | if (status) | 592 | if (status) |
595 | return status; | 593 | return status; |
596 | 594 | ||
597 | WARN_ON(!!dev->msi_enabled); | 595 | WARN_ON(!!dev->msi_enabled); |
598 | 596 | ||
599 | /* Check whether driver already requested for MSI-X irqs */ | 597 | /* Check whether driver already requested for MSI-X irqs */ |
600 | if (dev->msix_enabled) { | 598 | if (dev->msix_enabled) { |
601 | dev_info(&dev->dev, "can't enable MSI " | 599 | dev_info(&dev->dev, "can't enable MSI " |
602 | "(MSI-X already enabled)\n"); | 600 | "(MSI-X already enabled)\n"); |
603 | return -EINVAL; | 601 | return -EINVAL; |
604 | } | 602 | } |
605 | status = msi_capability_init(dev); | 603 | status = msi_capability_init(dev); |
606 | return status; | 604 | return status; |
607 | } | 605 | } |
608 | EXPORT_SYMBOL(pci_enable_msi); | 606 | EXPORT_SYMBOL(pci_enable_msi); |
609 | 607 | ||
610 | void pci_msi_shutdown(struct pci_dev* dev) | 608 | void pci_msi_shutdown(struct pci_dev* dev) |
611 | { | 609 | { |
612 | struct msi_desc *entry; | 610 | struct msi_desc *entry; |
613 | 611 | ||
614 | if (!pci_msi_enable || !dev || !dev->msi_enabled) | 612 | if (!pci_msi_enable || !dev || !dev->msi_enabled) |
615 | return; | 613 | return; |
616 | 614 | ||
617 | msi_set_enable(dev, 0); | 615 | msi_set_enable(dev, 0); |
618 | pci_intx_for_msi(dev, 1); | 616 | pci_intx_for_msi(dev, 1); |
619 | dev->msi_enabled = 0; | 617 | dev->msi_enabled = 0; |
620 | 618 | ||
621 | BUG_ON(list_empty(&dev->msi_list)); | 619 | BUG_ON(list_empty(&dev->msi_list)); |
622 | entry = list_entry(dev->msi_list.next, struct msi_desc, list); | 620 | entry = list_entry(dev->msi_list.next, struct msi_desc, list); |
623 | /* Return the the pci reset with msi irqs unmasked */ | 621 | /* Return the the pci reset with msi irqs unmasked */ |
624 | if (entry->msi_attrib.maskbit) { | 622 | if (entry->msi_attrib.maskbit) { |
625 | u32 mask = entry->msi_attrib.maskbits_mask; | 623 | u32 mask = entry->msi_attrib.maskbits_mask; |
626 | struct irq_desc *desc = irq_to_desc(dev->irq); | 624 | struct irq_desc *desc = irq_to_desc(dev->irq); |
627 | msi_set_mask_bits(desc, mask, ~mask); | 625 | msi_set_mask_bits(desc, mask, ~mask); |
628 | } | 626 | } |
629 | if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) | 627 | if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) |
630 | return; | 628 | return; |
631 | 629 | ||
632 | /* Restore dev->irq to its default pin-assertion irq */ | 630 | /* Restore dev->irq to its default pin-assertion irq */ |
633 | dev->irq = entry->msi_attrib.default_irq; | 631 | dev->irq = entry->msi_attrib.default_irq; |
634 | } | 632 | } |
635 | void pci_disable_msi(struct pci_dev* dev) | 633 | void pci_disable_msi(struct pci_dev* dev) |
636 | { | 634 | { |
637 | struct msi_desc *entry; | 635 | struct msi_desc *entry; |
638 | 636 | ||
639 | if (!pci_msi_enable || !dev || !dev->msi_enabled) | 637 | if (!pci_msi_enable || !dev || !dev->msi_enabled) |
640 | return; | 638 | return; |
641 | 639 | ||
642 | pci_msi_shutdown(dev); | 640 | pci_msi_shutdown(dev); |
643 | 641 | ||
644 | entry = list_entry(dev->msi_list.next, struct msi_desc, list); | 642 | entry = list_entry(dev->msi_list.next, struct msi_desc, list); |
645 | if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) | 643 | if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) |
646 | return; | 644 | return; |
647 | 645 | ||
648 | msi_free_irqs(dev); | 646 | msi_free_irqs(dev); |
649 | } | 647 | } |
650 | EXPORT_SYMBOL(pci_disable_msi); | 648 | EXPORT_SYMBOL(pci_disable_msi); |
651 | 649 | ||
652 | static int msi_free_irqs(struct pci_dev* dev) | 650 | static int msi_free_irqs(struct pci_dev* dev) |
653 | { | 651 | { |
654 | struct msi_desc *entry, *tmp; | 652 | struct msi_desc *entry, *tmp; |
655 | 653 | ||
656 | list_for_each_entry(entry, &dev->msi_list, list) { | 654 | list_for_each_entry(entry, &dev->msi_list, list) { |
657 | if (entry->irq) | 655 | if (entry->irq) |
658 | BUG_ON(irq_has_action(entry->irq)); | 656 | BUG_ON(irq_has_action(entry->irq)); |
659 | } | 657 | } |
660 | 658 | ||
661 | arch_teardown_msi_irqs(dev); | 659 | arch_teardown_msi_irqs(dev); |
662 | 660 | ||
663 | list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { | 661 | list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { |
664 | if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) { | 662 | if (entry->msi_attrib.type == PCI_CAP_ID_MSIX) { |
665 | writel(1, entry->mask_base + entry->msi_attrib.entry_nr | 663 | writel(1, entry->mask_base + entry->msi_attrib.entry_nr |
666 | * PCI_MSIX_ENTRY_SIZE | 664 | * PCI_MSIX_ENTRY_SIZE |
667 | + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); | 665 | + PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET); |
668 | 666 | ||
669 | if (list_is_last(&entry->list, &dev->msi_list)) | 667 | if (list_is_last(&entry->list, &dev->msi_list)) |
670 | iounmap(entry->mask_base); | 668 | iounmap(entry->mask_base); |
671 | } | 669 | } |
672 | list_del(&entry->list); | 670 | list_del(&entry->list); |
673 | kfree(entry); | 671 | kfree(entry); |
674 | } | 672 | } |
675 | 673 | ||
676 | return 0; | 674 | return 0; |
677 | } | 675 | } |
678 | 676 | ||
679 | /** | 677 | /** |
680 | * pci_enable_msix - configure device's MSI-X capability structure | 678 | * pci_enable_msix - configure device's MSI-X capability structure |
681 | * @dev: pointer to the pci_dev data structure of MSI-X device function | 679 | * @dev: pointer to the pci_dev data structure of MSI-X device function |
682 | * @entries: pointer to an array of MSI-X entries | 680 | * @entries: pointer to an array of MSI-X entries |
683 | * @nvec: number of MSI-X irqs requested for allocation by device driver | 681 | * @nvec: number of MSI-X irqs requested for allocation by device driver |
684 | * | 682 | * |
685 | * Setup the MSI-X capability structure of device function with the number | 683 | * Setup the MSI-X capability structure of device function with the number |
686 | * of requested irqs upon its software driver call to request for | 684 | * of requested irqs upon its software driver call to request for |
687 | * MSI-X mode enabled on its hardware device function. A return of zero | 685 | * MSI-X mode enabled on its hardware device function. A return of zero |
688 | * indicates the successful configuration of MSI-X capability structure | 686 | * indicates the successful configuration of MSI-X capability structure |
689 | * with new allocated MSI-X irqs. A return of < 0 indicates a failure. | 687 | * with new allocated MSI-X irqs. A return of < 0 indicates a failure. |
690 | * Or a return of > 0 indicates that driver request is exceeding the number | 688 | * Or a return of > 0 indicates that driver request is exceeding the number |
691 | * of irqs available. Driver should use the returned value to re-send | 689 | * of irqs available. Driver should use the returned value to re-send |
692 | * its request. | 690 | * its request. |
693 | **/ | 691 | **/ |
694 | int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) | 692 | int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) |
695 | { | 693 | { |
696 | int status, pos, nr_entries; | 694 | int status, pos, nr_entries; |
697 | int i, j; | 695 | int i, j; |
698 | u16 control; | 696 | u16 control; |
699 | 697 | ||
700 | if (!entries) | 698 | if (!entries) |
701 | return -EINVAL; | 699 | return -EINVAL; |
702 | 700 | ||
703 | status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX); | 701 | status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX); |
704 | if (status) | 702 | if (status) |
705 | return status; | 703 | return status; |
706 | 704 | ||
707 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | 705 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); |
708 | pci_read_config_word(dev, msi_control_reg(pos), &control); | 706 | pci_read_config_word(dev, msi_control_reg(pos), &control); |
709 | nr_entries = multi_msix_capable(control); | 707 | nr_entries = multi_msix_capable(control); |
710 | if (nvec > nr_entries) | 708 | if (nvec > nr_entries) |
711 | return -EINVAL; | 709 | return -EINVAL; |
712 | 710 | ||
713 | /* Check for any invalid entries */ | 711 | /* Check for any invalid entries */ |
714 | for (i = 0; i < nvec; i++) { | 712 | for (i = 0; i < nvec; i++) { |
715 | if (entries[i].entry >= nr_entries) | 713 | if (entries[i].entry >= nr_entries) |
716 | return -EINVAL; /* invalid entry */ | 714 | return -EINVAL; /* invalid entry */ |
717 | for (j = i + 1; j < nvec; j++) { | 715 | for (j = i + 1; j < nvec; j++) { |
718 | if (entries[i].entry == entries[j].entry) | 716 | if (entries[i].entry == entries[j].entry) |
719 | return -EINVAL; /* duplicate entry */ | 717 | return -EINVAL; /* duplicate entry */ |
720 | } | 718 | } |
721 | } | 719 | } |
722 | WARN_ON(!!dev->msix_enabled); | 720 | WARN_ON(!!dev->msix_enabled); |
723 | 721 | ||
724 | /* Check whether driver already requested for MSI irq */ | 722 | /* Check whether driver already requested for MSI irq */ |
725 | if (dev->msi_enabled) { | 723 | if (dev->msi_enabled) { |
726 | dev_info(&dev->dev, "can't enable MSI-X " | 724 | dev_info(&dev->dev, "can't enable MSI-X " |
727 | "(MSI IRQ already assigned)\n"); | 725 | "(MSI IRQ already assigned)\n"); |
728 | return -EINVAL; | 726 | return -EINVAL; |
729 | } | 727 | } |
730 | status = msix_capability_init(dev, entries, nvec); | 728 | status = msix_capability_init(dev, entries, nvec); |
731 | return status; | 729 | return status; |
732 | } | 730 | } |
733 | EXPORT_SYMBOL(pci_enable_msix); | 731 | EXPORT_SYMBOL(pci_enable_msix); |
734 | 732 | ||
735 | static void msix_free_all_irqs(struct pci_dev *dev) | 733 | static void msix_free_all_irqs(struct pci_dev *dev) |
736 | { | 734 | { |
737 | msi_free_irqs(dev); | 735 | msi_free_irqs(dev); |
738 | } | 736 | } |
739 | 737 | ||
740 | void pci_msix_shutdown(struct pci_dev* dev) | 738 | void pci_msix_shutdown(struct pci_dev* dev) |
741 | { | 739 | { |
742 | if (!pci_msi_enable || !dev || !dev->msix_enabled) | 740 | if (!pci_msi_enable || !dev || !dev->msix_enabled) |
743 | return; | 741 | return; |
744 | 742 | ||
745 | msix_set_enable(dev, 0); | 743 | msix_set_enable(dev, 0); |
746 | pci_intx_for_msi(dev, 1); | 744 | pci_intx_for_msi(dev, 1); |
747 | dev->msix_enabled = 0; | 745 | dev->msix_enabled = 0; |
748 | } | 746 | } |
749 | void pci_disable_msix(struct pci_dev* dev) | 747 | void pci_disable_msix(struct pci_dev* dev) |
750 | { | 748 | { |
751 | if (!pci_msi_enable || !dev || !dev->msix_enabled) | 749 | if (!pci_msi_enable || !dev || !dev->msix_enabled) |
752 | return; | 750 | return; |
753 | 751 | ||
754 | pci_msix_shutdown(dev); | 752 | pci_msix_shutdown(dev); |
755 | 753 | ||
756 | msix_free_all_irqs(dev); | 754 | msix_free_all_irqs(dev); |
757 | } | 755 | } |
758 | EXPORT_SYMBOL(pci_disable_msix); | 756 | EXPORT_SYMBOL(pci_disable_msix); |
759 | 757 | ||
760 | /** | 758 | /** |
761 | * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state | 759 | * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state |
762 | * @dev: pointer to the pci_dev data structure of MSI(X) device function | 760 | * @dev: pointer to the pci_dev data structure of MSI(X) device function |
763 | * | 761 | * |
764 | * Being called during hotplug remove, from which the device function | 762 | * Being called during hotplug remove, from which the device function |
765 | * is hot-removed. All previous assigned MSI/MSI-X irqs, if | 763 | * is hot-removed. All previous assigned MSI/MSI-X irqs, if |
766 | * allocated for this device function, are reclaimed to unused state, | 764 | * allocated for this device function, are reclaimed to unused state, |
767 | * which may be used later on. | 765 | * which may be used later on. |
768 | **/ | 766 | **/ |
769 | void msi_remove_pci_irq_vectors(struct pci_dev* dev) | 767 | void msi_remove_pci_irq_vectors(struct pci_dev* dev) |
770 | { | 768 | { |
771 | if (!pci_msi_enable || !dev) | 769 | if (!pci_msi_enable || !dev) |
772 | return; | 770 | return; |
773 | 771 | ||
774 | if (dev->msi_enabled) | 772 | if (dev->msi_enabled) |
775 | msi_free_irqs(dev); | 773 | msi_free_irqs(dev); |
776 | 774 | ||
777 | if (dev->msix_enabled) | 775 | if (dev->msix_enabled) |
778 | msix_free_all_irqs(dev); | 776 | msix_free_all_irqs(dev); |
779 | } | 777 | } |
780 | 778 | ||
781 | void pci_no_msi(void) | 779 | void pci_no_msi(void) |
782 | { | 780 | { |
783 | pci_msi_enable = 0; | 781 | pci_msi_enable = 0; |
784 | } | 782 | } |
785 | 783 | ||
786 | /** | 784 | /** |
787 | * pci_msi_enabled - is MSI enabled? | 785 | * pci_msi_enabled - is MSI enabled? |
788 | * | 786 | * |
789 | * Returns true if MSI has not been disabled by the command-line option | 787 | * Returns true if MSI has not been disabled by the command-line option |
790 | * pci=nomsi. | 788 | * pci=nomsi. |
791 | **/ | 789 | **/ |
792 | int pci_msi_enabled(void) | 790 | int pci_msi_enabled(void) |
793 | { | 791 | { |
794 | return pci_msi_enable; | 792 | return pci_msi_enable; |
795 | } | 793 | } |
796 | EXPORT_SYMBOL(pci_msi_enabled); | 794 | EXPORT_SYMBOL(pci_msi_enabled); |
797 | 795 | ||
798 | void pci_msi_init_pci_dev(struct pci_dev *dev) | 796 | void pci_msi_init_pci_dev(struct pci_dev *dev) |
799 | { | 797 | { |
800 | INIT_LIST_HEAD(&dev->msi_list); | 798 | INIT_LIST_HEAD(&dev->msi_list); |