Commit 8b3bb3ecf1934ac4a7005ad9017de1127e2fbd2f
Committed by
Rusty Russell
1 parent
2b8216e635
Exists in
master
and in
39 other branches
virtio: remove virtio-pci root device
We sometimes need to map between the virtio device and the given pci device. One such use is OS installer that gets the boot pci device from BIOS and needs to find the relevant block device. Since it can't, installation fails. Instead of creating a top-level devices/virtio-pci directory, create each device under the corresponding pci device node. Symlinks to all virtio-pci devices can be found under the pci driver link in bus/pci/drivers/virtio-pci/devices, and all virtio devices under drivers/bus/virtio/devices. Signed-off-by: Milton Miller <miltonm@bga.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Acked-by: Michael S. Tsirkin <mst@redhat.com> Tested-by: Michael S. Tsirkin <mst@redhat.com> Acked-by: Gleb Natapov <gleb@redhat.com> Tested-by: "Daniel P. Berrange" <berrange@redhat.com> Cc: stable@kernel.org
Showing 1 changed file with 2 additions and 18 deletions Inline Diff
drivers/virtio/virtio_pci.c
1 | /* | 1 | /* |
2 | * Virtio PCI driver | 2 | * Virtio PCI driver |
3 | * | 3 | * |
4 | * This module allows virtio devices to be used over a virtual PCI device. | 4 | * This module allows virtio devices to be used over a virtual PCI device. |
5 | * This can be used with QEMU based VMMs like KVM or Xen. | 5 | * This can be used with QEMU based VMMs like KVM or Xen. |
6 | * | 6 | * |
7 | * Copyright IBM Corp. 2007 | 7 | * Copyright IBM Corp. 2007 |
8 | * | 8 | * |
9 | * Authors: | 9 | * Authors: |
10 | * Anthony Liguori <aliguori@us.ibm.com> | 10 | * Anthony Liguori <aliguori@us.ibm.com> |
11 | * | 11 | * |
12 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | 12 | * This work is licensed under the terms of the GNU GPL, version 2 or later. |
13 | * See the COPYING file in the top-level directory. | 13 | * See the COPYING file in the top-level directory. |
14 | * | 14 | * |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/list.h> | 18 | #include <linux/list.h> |
19 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
22 | #include <linux/virtio.h> | 22 | #include <linux/virtio.h> |
23 | #include <linux/virtio_config.h> | 23 | #include <linux/virtio_config.h> |
24 | #include <linux/virtio_ring.h> | 24 | #include <linux/virtio_ring.h> |
25 | #include <linux/virtio_pci.h> | 25 | #include <linux/virtio_pci.h> |
26 | #include <linux/highmem.h> | 26 | #include <linux/highmem.h> |
27 | #include <linux/spinlock.h> | 27 | #include <linux/spinlock.h> |
28 | 28 | ||
29 | MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>"); | 29 | MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>"); |
30 | MODULE_DESCRIPTION("virtio-pci"); | 30 | MODULE_DESCRIPTION("virtio-pci"); |
31 | MODULE_LICENSE("GPL"); | 31 | MODULE_LICENSE("GPL"); |
32 | MODULE_VERSION("1"); | 32 | MODULE_VERSION("1"); |
33 | 33 | ||
34 | /* Our device structure */ | 34 | /* Our device structure */ |
35 | struct virtio_pci_device | 35 | struct virtio_pci_device |
36 | { | 36 | { |
37 | struct virtio_device vdev; | 37 | struct virtio_device vdev; |
38 | struct pci_dev *pci_dev; | 38 | struct pci_dev *pci_dev; |
39 | 39 | ||
40 | /* the IO mapping for the PCI config space */ | 40 | /* the IO mapping for the PCI config space */ |
41 | void __iomem *ioaddr; | 41 | void __iomem *ioaddr; |
42 | 42 | ||
43 | /* a list of queues so we can dispatch IRQs */ | 43 | /* a list of queues so we can dispatch IRQs */ |
44 | spinlock_t lock; | 44 | spinlock_t lock; |
45 | struct list_head virtqueues; | 45 | struct list_head virtqueues; |
46 | 46 | ||
47 | /* MSI-X support */ | 47 | /* MSI-X support */ |
48 | int msix_enabled; | 48 | int msix_enabled; |
49 | int intx_enabled; | 49 | int intx_enabled; |
50 | struct msix_entry *msix_entries; | 50 | struct msix_entry *msix_entries; |
51 | /* Name strings for interrupts. This size should be enough, | 51 | /* Name strings for interrupts. This size should be enough, |
52 | * and I'm too lazy to allocate each name separately. */ | 52 | * and I'm too lazy to allocate each name separately. */ |
53 | char (*msix_names)[256]; | 53 | char (*msix_names)[256]; |
54 | /* Number of available vectors */ | 54 | /* Number of available vectors */ |
55 | unsigned msix_vectors; | 55 | unsigned msix_vectors; |
56 | /* Vectors allocated, excluding per-vq vectors if any */ | 56 | /* Vectors allocated, excluding per-vq vectors if any */ |
57 | unsigned msix_used_vectors; | 57 | unsigned msix_used_vectors; |
58 | /* Whether we have vector per vq */ | 58 | /* Whether we have vector per vq */ |
59 | bool per_vq_vectors; | 59 | bool per_vq_vectors; |
60 | }; | 60 | }; |
61 | 61 | ||
62 | /* Constants for MSI-X */ | 62 | /* Constants for MSI-X */ |
63 | /* Use first vector for configuration changes, second and the rest for | 63 | /* Use first vector for configuration changes, second and the rest for |
64 | * virtqueues Thus, we need at least 2 vectors for MSI. */ | 64 | * virtqueues Thus, we need at least 2 vectors for MSI. */ |
65 | enum { | 65 | enum { |
66 | VP_MSIX_CONFIG_VECTOR = 0, | 66 | VP_MSIX_CONFIG_VECTOR = 0, |
67 | VP_MSIX_VQ_VECTOR = 1, | 67 | VP_MSIX_VQ_VECTOR = 1, |
68 | }; | 68 | }; |
69 | 69 | ||
70 | struct virtio_pci_vq_info | 70 | struct virtio_pci_vq_info |
71 | { | 71 | { |
72 | /* the actual virtqueue */ | 72 | /* the actual virtqueue */ |
73 | struct virtqueue *vq; | 73 | struct virtqueue *vq; |
74 | 74 | ||
75 | /* the number of entries in the queue */ | 75 | /* the number of entries in the queue */ |
76 | int num; | 76 | int num; |
77 | 77 | ||
78 | /* the index of the queue */ | 78 | /* the index of the queue */ |
79 | int queue_index; | 79 | int queue_index; |
80 | 80 | ||
81 | /* the virtual address of the ring queue */ | 81 | /* the virtual address of the ring queue */ |
82 | void *queue; | 82 | void *queue; |
83 | 83 | ||
84 | /* the list node for the virtqueues list */ | 84 | /* the list node for the virtqueues list */ |
85 | struct list_head node; | 85 | struct list_head node; |
86 | 86 | ||
87 | /* MSI-X vector (or none) */ | 87 | /* MSI-X vector (or none) */ |
88 | unsigned msix_vector; | 88 | unsigned msix_vector; |
89 | }; | 89 | }; |
90 | 90 | ||
91 | /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ | 91 | /* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ |
92 | static struct pci_device_id virtio_pci_id_table[] = { | 92 | static struct pci_device_id virtio_pci_id_table[] = { |
93 | { 0x1af4, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | 93 | { 0x1af4, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, |
94 | { 0 }, | 94 | { 0 }, |
95 | }; | 95 | }; |
96 | 96 | ||
97 | MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); | 97 | MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); |
98 | 98 | ||
99 | /* A PCI device has it's own struct device and so does a virtio device so | ||
100 | * we create a place for the virtio devices to show up in sysfs. I think it | ||
101 | * would make more sense for virtio to not insist on having it's own device. */ | ||
102 | static struct device *virtio_pci_root; | ||
103 | |||
104 | /* Convert a generic virtio device to our structure */ | 99 | /* Convert a generic virtio device to our structure */ |
105 | static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) | 100 | static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) |
106 | { | 101 | { |
107 | return container_of(vdev, struct virtio_pci_device, vdev); | 102 | return container_of(vdev, struct virtio_pci_device, vdev); |
108 | } | 103 | } |
109 | 104 | ||
110 | /* virtio config->get_features() implementation */ | 105 | /* virtio config->get_features() implementation */ |
111 | static u32 vp_get_features(struct virtio_device *vdev) | 106 | static u32 vp_get_features(struct virtio_device *vdev) |
112 | { | 107 | { |
113 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 108 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
114 | 109 | ||
115 | /* When someone needs more than 32 feature bits, we'll need to | 110 | /* When someone needs more than 32 feature bits, we'll need to |
116 | * steal a bit to indicate that the rest are somewhere else. */ | 111 | * steal a bit to indicate that the rest are somewhere else. */ |
117 | return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES); | 112 | return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES); |
118 | } | 113 | } |
119 | 114 | ||
120 | /* virtio config->finalize_features() implementation */ | 115 | /* virtio config->finalize_features() implementation */ |
121 | static void vp_finalize_features(struct virtio_device *vdev) | 116 | static void vp_finalize_features(struct virtio_device *vdev) |
122 | { | 117 | { |
123 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 118 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
124 | 119 | ||
125 | /* Give virtio_ring a chance to accept features. */ | 120 | /* Give virtio_ring a chance to accept features. */ |
126 | vring_transport_features(vdev); | 121 | vring_transport_features(vdev); |
127 | 122 | ||
128 | /* We only support 32 feature bits. */ | 123 | /* We only support 32 feature bits. */ |
129 | BUILD_BUG_ON(ARRAY_SIZE(vdev->features) != 1); | 124 | BUILD_BUG_ON(ARRAY_SIZE(vdev->features) != 1); |
130 | iowrite32(vdev->features[0], vp_dev->ioaddr+VIRTIO_PCI_GUEST_FEATURES); | 125 | iowrite32(vdev->features[0], vp_dev->ioaddr+VIRTIO_PCI_GUEST_FEATURES); |
131 | } | 126 | } |
132 | 127 | ||
133 | /* virtio config->get() implementation */ | 128 | /* virtio config->get() implementation */ |
134 | static void vp_get(struct virtio_device *vdev, unsigned offset, | 129 | static void vp_get(struct virtio_device *vdev, unsigned offset, |
135 | void *buf, unsigned len) | 130 | void *buf, unsigned len) |
136 | { | 131 | { |
137 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 132 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
138 | void __iomem *ioaddr = vp_dev->ioaddr + | 133 | void __iomem *ioaddr = vp_dev->ioaddr + |
139 | VIRTIO_PCI_CONFIG(vp_dev) + offset; | 134 | VIRTIO_PCI_CONFIG(vp_dev) + offset; |
140 | u8 *ptr = buf; | 135 | u8 *ptr = buf; |
141 | int i; | 136 | int i; |
142 | 137 | ||
143 | for (i = 0; i < len; i++) | 138 | for (i = 0; i < len; i++) |
144 | ptr[i] = ioread8(ioaddr + i); | 139 | ptr[i] = ioread8(ioaddr + i); |
145 | } | 140 | } |
146 | 141 | ||
147 | /* the config->set() implementation. it's symmetric to the config->get() | 142 | /* the config->set() implementation. it's symmetric to the config->get() |
148 | * implementation */ | 143 | * implementation */ |
149 | static void vp_set(struct virtio_device *vdev, unsigned offset, | 144 | static void vp_set(struct virtio_device *vdev, unsigned offset, |
150 | const void *buf, unsigned len) | 145 | const void *buf, unsigned len) |
151 | { | 146 | { |
152 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 147 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
153 | void __iomem *ioaddr = vp_dev->ioaddr + | 148 | void __iomem *ioaddr = vp_dev->ioaddr + |
154 | VIRTIO_PCI_CONFIG(vp_dev) + offset; | 149 | VIRTIO_PCI_CONFIG(vp_dev) + offset; |
155 | const u8 *ptr = buf; | 150 | const u8 *ptr = buf; |
156 | int i; | 151 | int i; |
157 | 152 | ||
158 | for (i = 0; i < len; i++) | 153 | for (i = 0; i < len; i++) |
159 | iowrite8(ptr[i], ioaddr + i); | 154 | iowrite8(ptr[i], ioaddr + i); |
160 | } | 155 | } |
161 | 156 | ||
162 | /* config->{get,set}_status() implementations */ | 157 | /* config->{get,set}_status() implementations */ |
163 | static u8 vp_get_status(struct virtio_device *vdev) | 158 | static u8 vp_get_status(struct virtio_device *vdev) |
164 | { | 159 | { |
165 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 160 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
166 | return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); | 161 | return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); |
167 | } | 162 | } |
168 | 163 | ||
169 | static void vp_set_status(struct virtio_device *vdev, u8 status) | 164 | static void vp_set_status(struct virtio_device *vdev, u8 status) |
170 | { | 165 | { |
171 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 166 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
172 | /* We should never be setting status to 0. */ | 167 | /* We should never be setting status to 0. */ |
173 | BUG_ON(status == 0); | 168 | BUG_ON(status == 0); |
174 | iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); | 169 | iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); |
175 | } | 170 | } |
176 | 171 | ||
177 | static void vp_reset(struct virtio_device *vdev) | 172 | static void vp_reset(struct virtio_device *vdev) |
178 | { | 173 | { |
179 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 174 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
180 | /* 0 status means a reset. */ | 175 | /* 0 status means a reset. */ |
181 | iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); | 176 | iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); |
182 | } | 177 | } |
183 | 178 | ||
184 | /* the notify function used when creating a virt queue */ | 179 | /* the notify function used when creating a virt queue */ |
185 | static void vp_notify(struct virtqueue *vq) | 180 | static void vp_notify(struct virtqueue *vq) |
186 | { | 181 | { |
187 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); | 182 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); |
188 | struct virtio_pci_vq_info *info = vq->priv; | 183 | struct virtio_pci_vq_info *info = vq->priv; |
189 | 184 | ||
190 | /* we write the queue's selector into the notification register to | 185 | /* we write the queue's selector into the notification register to |
191 | * signal the other end */ | 186 | * signal the other end */ |
192 | iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); | 187 | iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); |
193 | } | 188 | } |
194 | 189 | ||
195 | /* Handle a configuration change: Tell driver if it wants to know. */ | 190 | /* Handle a configuration change: Tell driver if it wants to know. */ |
196 | static irqreturn_t vp_config_changed(int irq, void *opaque) | 191 | static irqreturn_t vp_config_changed(int irq, void *opaque) |
197 | { | 192 | { |
198 | struct virtio_pci_device *vp_dev = opaque; | 193 | struct virtio_pci_device *vp_dev = opaque; |
199 | struct virtio_driver *drv; | 194 | struct virtio_driver *drv; |
200 | drv = container_of(vp_dev->vdev.dev.driver, | 195 | drv = container_of(vp_dev->vdev.dev.driver, |
201 | struct virtio_driver, driver); | 196 | struct virtio_driver, driver); |
202 | 197 | ||
203 | if (drv && drv->config_changed) | 198 | if (drv && drv->config_changed) |
204 | drv->config_changed(&vp_dev->vdev); | 199 | drv->config_changed(&vp_dev->vdev); |
205 | return IRQ_HANDLED; | 200 | return IRQ_HANDLED; |
206 | } | 201 | } |
207 | 202 | ||
208 | /* Notify all virtqueues on an interrupt. */ | 203 | /* Notify all virtqueues on an interrupt. */ |
209 | static irqreturn_t vp_vring_interrupt(int irq, void *opaque) | 204 | static irqreturn_t vp_vring_interrupt(int irq, void *opaque) |
210 | { | 205 | { |
211 | struct virtio_pci_device *vp_dev = opaque; | 206 | struct virtio_pci_device *vp_dev = opaque; |
212 | struct virtio_pci_vq_info *info; | 207 | struct virtio_pci_vq_info *info; |
213 | irqreturn_t ret = IRQ_NONE; | 208 | irqreturn_t ret = IRQ_NONE; |
214 | unsigned long flags; | 209 | unsigned long flags; |
215 | 210 | ||
216 | spin_lock_irqsave(&vp_dev->lock, flags); | 211 | spin_lock_irqsave(&vp_dev->lock, flags); |
217 | list_for_each_entry(info, &vp_dev->virtqueues, node) { | 212 | list_for_each_entry(info, &vp_dev->virtqueues, node) { |
218 | if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) | 213 | if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) |
219 | ret = IRQ_HANDLED; | 214 | ret = IRQ_HANDLED; |
220 | } | 215 | } |
221 | spin_unlock_irqrestore(&vp_dev->lock, flags); | 216 | spin_unlock_irqrestore(&vp_dev->lock, flags); |
222 | 217 | ||
223 | return ret; | 218 | return ret; |
224 | } | 219 | } |
225 | 220 | ||
226 | /* A small wrapper to also acknowledge the interrupt when it's handled. | 221 | /* A small wrapper to also acknowledge the interrupt when it's handled. |
227 | * I really need an EIO hook for the vring so I can ack the interrupt once we | 222 | * I really need an EIO hook for the vring so I can ack the interrupt once we |
228 | * know that we'll be handling the IRQ but before we invoke the callback since | 223 | * know that we'll be handling the IRQ but before we invoke the callback since |
229 | * the callback may notify the host which results in the host attempting to | 224 | * the callback may notify the host which results in the host attempting to |
230 | * raise an interrupt that we would then mask once we acknowledged the | 225 | * raise an interrupt that we would then mask once we acknowledged the |
231 | * interrupt. */ | 226 | * interrupt. */ |
232 | static irqreturn_t vp_interrupt(int irq, void *opaque) | 227 | static irqreturn_t vp_interrupt(int irq, void *opaque) |
233 | { | 228 | { |
234 | struct virtio_pci_device *vp_dev = opaque; | 229 | struct virtio_pci_device *vp_dev = opaque; |
235 | u8 isr; | 230 | u8 isr; |
236 | 231 | ||
237 | /* reading the ISR has the effect of also clearing it so it's very | 232 | /* reading the ISR has the effect of also clearing it so it's very |
238 | * important to save off the value. */ | 233 | * important to save off the value. */ |
239 | isr = ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); | 234 | isr = ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); |
240 | 235 | ||
241 | /* It's definitely not us if the ISR was not high */ | 236 | /* It's definitely not us if the ISR was not high */ |
242 | if (!isr) | 237 | if (!isr) |
243 | return IRQ_NONE; | 238 | return IRQ_NONE; |
244 | 239 | ||
245 | /* Configuration change? Tell driver if it wants to know. */ | 240 | /* Configuration change? Tell driver if it wants to know. */ |
246 | if (isr & VIRTIO_PCI_ISR_CONFIG) | 241 | if (isr & VIRTIO_PCI_ISR_CONFIG) |
247 | vp_config_changed(irq, opaque); | 242 | vp_config_changed(irq, opaque); |
248 | 243 | ||
249 | return vp_vring_interrupt(irq, opaque); | 244 | return vp_vring_interrupt(irq, opaque); |
250 | } | 245 | } |
251 | 246 | ||
252 | static void vp_free_vectors(struct virtio_device *vdev) | 247 | static void vp_free_vectors(struct virtio_device *vdev) |
253 | { | 248 | { |
254 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 249 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
255 | int i; | 250 | int i; |
256 | 251 | ||
257 | if (vp_dev->intx_enabled) { | 252 | if (vp_dev->intx_enabled) { |
258 | free_irq(vp_dev->pci_dev->irq, vp_dev); | 253 | free_irq(vp_dev->pci_dev->irq, vp_dev); |
259 | vp_dev->intx_enabled = 0; | 254 | vp_dev->intx_enabled = 0; |
260 | } | 255 | } |
261 | 256 | ||
262 | for (i = 0; i < vp_dev->msix_used_vectors; ++i) | 257 | for (i = 0; i < vp_dev->msix_used_vectors; ++i) |
263 | free_irq(vp_dev->msix_entries[i].vector, vp_dev); | 258 | free_irq(vp_dev->msix_entries[i].vector, vp_dev); |
264 | 259 | ||
265 | if (vp_dev->msix_enabled) { | 260 | if (vp_dev->msix_enabled) { |
266 | /* Disable the vector used for configuration */ | 261 | /* Disable the vector used for configuration */ |
267 | iowrite16(VIRTIO_MSI_NO_VECTOR, | 262 | iowrite16(VIRTIO_MSI_NO_VECTOR, |
268 | vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | 263 | vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); |
269 | /* Flush the write out to device */ | 264 | /* Flush the write out to device */ |
270 | ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | 265 | ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); |
271 | 266 | ||
272 | pci_disable_msix(vp_dev->pci_dev); | 267 | pci_disable_msix(vp_dev->pci_dev); |
273 | vp_dev->msix_enabled = 0; | 268 | vp_dev->msix_enabled = 0; |
274 | vp_dev->msix_vectors = 0; | 269 | vp_dev->msix_vectors = 0; |
275 | } | 270 | } |
276 | 271 | ||
277 | vp_dev->msix_used_vectors = 0; | 272 | vp_dev->msix_used_vectors = 0; |
278 | kfree(vp_dev->msix_names); | 273 | kfree(vp_dev->msix_names); |
279 | vp_dev->msix_names = NULL; | 274 | vp_dev->msix_names = NULL; |
280 | kfree(vp_dev->msix_entries); | 275 | kfree(vp_dev->msix_entries); |
281 | vp_dev->msix_entries = NULL; | 276 | vp_dev->msix_entries = NULL; |
282 | } | 277 | } |
283 | 278 | ||
284 | static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, | 279 | static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, |
285 | bool per_vq_vectors) | 280 | bool per_vq_vectors) |
286 | { | 281 | { |
287 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 282 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
288 | const char *name = dev_name(&vp_dev->vdev.dev); | 283 | const char *name = dev_name(&vp_dev->vdev.dev); |
289 | unsigned i, v; | 284 | unsigned i, v; |
290 | int err = -ENOMEM; | 285 | int err = -ENOMEM; |
291 | 286 | ||
292 | vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, | 287 | vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, |
293 | GFP_KERNEL); | 288 | GFP_KERNEL); |
294 | if (!vp_dev->msix_entries) | 289 | if (!vp_dev->msix_entries) |
295 | goto error; | 290 | goto error; |
296 | vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, | 291 | vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, |
297 | GFP_KERNEL); | 292 | GFP_KERNEL); |
298 | if (!vp_dev->msix_names) | 293 | if (!vp_dev->msix_names) |
299 | goto error; | 294 | goto error; |
300 | 295 | ||
301 | for (i = 0; i < nvectors; ++i) | 296 | for (i = 0; i < nvectors; ++i) |
302 | vp_dev->msix_entries[i].entry = i; | 297 | vp_dev->msix_entries[i].entry = i; |
303 | 298 | ||
304 | /* pci_enable_msix returns positive if we can't get this many. */ | 299 | /* pci_enable_msix returns positive if we can't get this many. */ |
305 | err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors); | 300 | err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors); |
306 | if (err > 0) | 301 | if (err > 0) |
307 | err = -ENOSPC; | 302 | err = -ENOSPC; |
308 | if (err) | 303 | if (err) |
309 | goto error; | 304 | goto error; |
310 | vp_dev->msix_vectors = nvectors; | 305 | vp_dev->msix_vectors = nvectors; |
311 | vp_dev->msix_enabled = 1; | 306 | vp_dev->msix_enabled = 1; |
312 | 307 | ||
313 | /* Set the vector used for configuration */ | 308 | /* Set the vector used for configuration */ |
314 | v = vp_dev->msix_used_vectors; | 309 | v = vp_dev->msix_used_vectors; |
315 | snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, | 310 | snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, |
316 | "%s-config", name); | 311 | "%s-config", name); |
317 | err = request_irq(vp_dev->msix_entries[v].vector, | 312 | err = request_irq(vp_dev->msix_entries[v].vector, |
318 | vp_config_changed, 0, vp_dev->msix_names[v], | 313 | vp_config_changed, 0, vp_dev->msix_names[v], |
319 | vp_dev); | 314 | vp_dev); |
320 | if (err) | 315 | if (err) |
321 | goto error; | 316 | goto error; |
322 | ++vp_dev->msix_used_vectors; | 317 | ++vp_dev->msix_used_vectors; |
323 | 318 | ||
324 | iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | 319 | iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); |
325 | /* Verify we had enough resources to assign the vector */ | 320 | /* Verify we had enough resources to assign the vector */ |
326 | v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); | 321 | v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); |
327 | if (v == VIRTIO_MSI_NO_VECTOR) { | 322 | if (v == VIRTIO_MSI_NO_VECTOR) { |
328 | err = -EBUSY; | 323 | err = -EBUSY; |
329 | goto error; | 324 | goto error; |
330 | } | 325 | } |
331 | 326 | ||
332 | if (!per_vq_vectors) { | 327 | if (!per_vq_vectors) { |
333 | /* Shared vector for all VQs */ | 328 | /* Shared vector for all VQs */ |
334 | v = vp_dev->msix_used_vectors; | 329 | v = vp_dev->msix_used_vectors; |
335 | snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, | 330 | snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, |
336 | "%s-virtqueues", name); | 331 | "%s-virtqueues", name); |
337 | err = request_irq(vp_dev->msix_entries[v].vector, | 332 | err = request_irq(vp_dev->msix_entries[v].vector, |
338 | vp_vring_interrupt, 0, vp_dev->msix_names[v], | 333 | vp_vring_interrupt, 0, vp_dev->msix_names[v], |
339 | vp_dev); | 334 | vp_dev); |
340 | if (err) | 335 | if (err) |
341 | goto error; | 336 | goto error; |
342 | ++vp_dev->msix_used_vectors; | 337 | ++vp_dev->msix_used_vectors; |
343 | } | 338 | } |
344 | return 0; | 339 | return 0; |
345 | error: | 340 | error: |
346 | vp_free_vectors(vdev); | 341 | vp_free_vectors(vdev); |
347 | return err; | 342 | return err; |
348 | } | 343 | } |
349 | 344 | ||
350 | static int vp_request_intx(struct virtio_device *vdev) | 345 | static int vp_request_intx(struct virtio_device *vdev) |
351 | { | 346 | { |
352 | int err; | 347 | int err; |
353 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 348 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
354 | 349 | ||
355 | err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, | 350 | err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, |
356 | IRQF_SHARED, dev_name(&vdev->dev), vp_dev); | 351 | IRQF_SHARED, dev_name(&vdev->dev), vp_dev); |
357 | if (!err) | 352 | if (!err) |
358 | vp_dev->intx_enabled = 1; | 353 | vp_dev->intx_enabled = 1; |
359 | return err; | 354 | return err; |
360 | } | 355 | } |
361 | 356 | ||
362 | static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index, | 357 | static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index, |
363 | void (*callback)(struct virtqueue *vq), | 358 | void (*callback)(struct virtqueue *vq), |
364 | const char *name, | 359 | const char *name, |
365 | u16 msix_vec) | 360 | u16 msix_vec) |
366 | { | 361 | { |
367 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 362 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
368 | struct virtio_pci_vq_info *info; | 363 | struct virtio_pci_vq_info *info; |
369 | struct virtqueue *vq; | 364 | struct virtqueue *vq; |
370 | unsigned long flags, size; | 365 | unsigned long flags, size; |
371 | u16 num; | 366 | u16 num; |
372 | int err; | 367 | int err; |
373 | 368 | ||
374 | /* Select the queue we're interested in */ | 369 | /* Select the queue we're interested in */ |
375 | iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); | 370 | iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); |
376 | 371 | ||
377 | /* Check if queue is either not available or already active. */ | 372 | /* Check if queue is either not available or already active. */ |
378 | num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM); | 373 | num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM); |
379 | if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) | 374 | if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) |
380 | return ERR_PTR(-ENOENT); | 375 | return ERR_PTR(-ENOENT); |
381 | 376 | ||
382 | /* allocate and fill out our structure the represents an active | 377 | /* allocate and fill out our structure the represents an active |
383 | * queue */ | 378 | * queue */ |
384 | info = kmalloc(sizeof(struct virtio_pci_vq_info), GFP_KERNEL); | 379 | info = kmalloc(sizeof(struct virtio_pci_vq_info), GFP_KERNEL); |
385 | if (!info) | 380 | if (!info) |
386 | return ERR_PTR(-ENOMEM); | 381 | return ERR_PTR(-ENOMEM); |
387 | 382 | ||
388 | info->queue_index = index; | 383 | info->queue_index = index; |
389 | info->num = num; | 384 | info->num = num; |
390 | info->msix_vector = msix_vec; | 385 | info->msix_vector = msix_vec; |
391 | 386 | ||
392 | size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); | 387 | size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); |
393 | info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); | 388 | info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); |
394 | if (info->queue == NULL) { | 389 | if (info->queue == NULL) { |
395 | err = -ENOMEM; | 390 | err = -ENOMEM; |
396 | goto out_info; | 391 | goto out_info; |
397 | } | 392 | } |
398 | 393 | ||
399 | /* activate the queue */ | 394 | /* activate the queue */ |
400 | iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, | 395 | iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, |
401 | vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); | 396 | vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); |
402 | 397 | ||
403 | /* create the vring */ | 398 | /* create the vring */ |
404 | vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, | 399 | vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, |
405 | vdev, info->queue, vp_notify, callback, name); | 400 | vdev, info->queue, vp_notify, callback, name); |
406 | if (!vq) { | 401 | if (!vq) { |
407 | err = -ENOMEM; | 402 | err = -ENOMEM; |
408 | goto out_activate_queue; | 403 | goto out_activate_queue; |
409 | } | 404 | } |
410 | 405 | ||
411 | vq->priv = info; | 406 | vq->priv = info; |
412 | info->vq = vq; | 407 | info->vq = vq; |
413 | 408 | ||
414 | if (msix_vec != VIRTIO_MSI_NO_VECTOR) { | 409 | if (msix_vec != VIRTIO_MSI_NO_VECTOR) { |
415 | iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | 410 | iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); |
416 | msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | 411 | msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); |
417 | if (msix_vec == VIRTIO_MSI_NO_VECTOR) { | 412 | if (msix_vec == VIRTIO_MSI_NO_VECTOR) { |
418 | err = -EBUSY; | 413 | err = -EBUSY; |
419 | goto out_assign; | 414 | goto out_assign; |
420 | } | 415 | } |
421 | } | 416 | } |
422 | 417 | ||
423 | spin_lock_irqsave(&vp_dev->lock, flags); | 418 | spin_lock_irqsave(&vp_dev->lock, flags); |
424 | list_add(&info->node, &vp_dev->virtqueues); | 419 | list_add(&info->node, &vp_dev->virtqueues); |
425 | spin_unlock_irqrestore(&vp_dev->lock, flags); | 420 | spin_unlock_irqrestore(&vp_dev->lock, flags); |
426 | 421 | ||
427 | return vq; | 422 | return vq; |
428 | 423 | ||
429 | out_assign: | 424 | out_assign: |
430 | vring_del_virtqueue(vq); | 425 | vring_del_virtqueue(vq); |
431 | out_activate_queue: | 426 | out_activate_queue: |
432 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); | 427 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); |
433 | free_pages_exact(info->queue, size); | 428 | free_pages_exact(info->queue, size); |
434 | out_info: | 429 | out_info: |
435 | kfree(info); | 430 | kfree(info); |
436 | return ERR_PTR(err); | 431 | return ERR_PTR(err); |
437 | } | 432 | } |
438 | 433 | ||
439 | static void vp_del_vq(struct virtqueue *vq) | 434 | static void vp_del_vq(struct virtqueue *vq) |
440 | { | 435 | { |
441 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); | 436 | struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); |
442 | struct virtio_pci_vq_info *info = vq->priv; | 437 | struct virtio_pci_vq_info *info = vq->priv; |
443 | unsigned long flags, size; | 438 | unsigned long flags, size; |
444 | 439 | ||
445 | spin_lock_irqsave(&vp_dev->lock, flags); | 440 | spin_lock_irqsave(&vp_dev->lock, flags); |
446 | list_del(&info->node); | 441 | list_del(&info->node); |
447 | spin_unlock_irqrestore(&vp_dev->lock, flags); | 442 | spin_unlock_irqrestore(&vp_dev->lock, flags); |
448 | 443 | ||
449 | iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); | 444 | iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); |
450 | 445 | ||
451 | if (vp_dev->msix_enabled) { | 446 | if (vp_dev->msix_enabled) { |
452 | iowrite16(VIRTIO_MSI_NO_VECTOR, | 447 | iowrite16(VIRTIO_MSI_NO_VECTOR, |
453 | vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); | 448 | vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); |
454 | /* Flush the write out to device */ | 449 | /* Flush the write out to device */ |
455 | ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); | 450 | ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); |
456 | } | 451 | } |
457 | 452 | ||
458 | vring_del_virtqueue(vq); | 453 | vring_del_virtqueue(vq); |
459 | 454 | ||
460 | /* Select and deactivate the queue */ | 455 | /* Select and deactivate the queue */ |
461 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); | 456 | iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); |
462 | 457 | ||
463 | size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); | 458 | size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); |
464 | free_pages_exact(info->queue, size); | 459 | free_pages_exact(info->queue, size); |
465 | kfree(info); | 460 | kfree(info); |
466 | } | 461 | } |
467 | 462 | ||
468 | /* the config->del_vqs() implementation */ | 463 | /* the config->del_vqs() implementation */ |
469 | static void vp_del_vqs(struct virtio_device *vdev) | 464 | static void vp_del_vqs(struct virtio_device *vdev) |
470 | { | 465 | { |
471 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 466 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
472 | struct virtqueue *vq, *n; | 467 | struct virtqueue *vq, *n; |
473 | struct virtio_pci_vq_info *info; | 468 | struct virtio_pci_vq_info *info; |
474 | 469 | ||
475 | list_for_each_entry_safe(vq, n, &vdev->vqs, list) { | 470 | list_for_each_entry_safe(vq, n, &vdev->vqs, list) { |
476 | info = vq->priv; | 471 | info = vq->priv; |
477 | if (vp_dev->per_vq_vectors && | 472 | if (vp_dev->per_vq_vectors && |
478 | info->msix_vector != VIRTIO_MSI_NO_VECTOR) | 473 | info->msix_vector != VIRTIO_MSI_NO_VECTOR) |
479 | free_irq(vp_dev->msix_entries[info->msix_vector].vector, | 474 | free_irq(vp_dev->msix_entries[info->msix_vector].vector, |
480 | vq); | 475 | vq); |
481 | vp_del_vq(vq); | 476 | vp_del_vq(vq); |
482 | } | 477 | } |
483 | vp_dev->per_vq_vectors = false; | 478 | vp_dev->per_vq_vectors = false; |
484 | 479 | ||
485 | vp_free_vectors(vdev); | 480 | vp_free_vectors(vdev); |
486 | } | 481 | } |
487 | 482 | ||
488 | static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, | 483 | static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
489 | struct virtqueue *vqs[], | 484 | struct virtqueue *vqs[], |
490 | vq_callback_t *callbacks[], | 485 | vq_callback_t *callbacks[], |
491 | const char *names[], | 486 | const char *names[], |
492 | bool use_msix, | 487 | bool use_msix, |
493 | bool per_vq_vectors) | 488 | bool per_vq_vectors) |
494 | { | 489 | { |
495 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 490 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
496 | u16 msix_vec; | 491 | u16 msix_vec; |
497 | int i, err, nvectors, allocated_vectors; | 492 | int i, err, nvectors, allocated_vectors; |
498 | 493 | ||
499 | if (!use_msix) { | 494 | if (!use_msix) { |
500 | /* Old style: one normal interrupt for change and all vqs. */ | 495 | /* Old style: one normal interrupt for change and all vqs. */ |
501 | err = vp_request_intx(vdev); | 496 | err = vp_request_intx(vdev); |
502 | if (err) | 497 | if (err) |
503 | goto error_request; | 498 | goto error_request; |
504 | } else { | 499 | } else { |
505 | if (per_vq_vectors) { | 500 | if (per_vq_vectors) { |
506 | /* Best option: one for change interrupt, one per vq. */ | 501 | /* Best option: one for change interrupt, one per vq. */ |
507 | nvectors = 1; | 502 | nvectors = 1; |
508 | for (i = 0; i < nvqs; ++i) | 503 | for (i = 0; i < nvqs; ++i) |
509 | if (callbacks[i]) | 504 | if (callbacks[i]) |
510 | ++nvectors; | 505 | ++nvectors; |
511 | } else { | 506 | } else { |
512 | /* Second best: one for change, shared for all vqs. */ | 507 | /* Second best: one for change, shared for all vqs. */ |
513 | nvectors = 2; | 508 | nvectors = 2; |
514 | } | 509 | } |
515 | 510 | ||
516 | err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors); | 511 | err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors); |
517 | if (err) | 512 | if (err) |
518 | goto error_request; | 513 | goto error_request; |
519 | } | 514 | } |
520 | 515 | ||
521 | vp_dev->per_vq_vectors = per_vq_vectors; | 516 | vp_dev->per_vq_vectors = per_vq_vectors; |
522 | allocated_vectors = vp_dev->msix_used_vectors; | 517 | allocated_vectors = vp_dev->msix_used_vectors; |
523 | for (i = 0; i < nvqs; ++i) { | 518 | for (i = 0; i < nvqs; ++i) { |
524 | if (!callbacks[i] || !vp_dev->msix_enabled) | 519 | if (!callbacks[i] || !vp_dev->msix_enabled) |
525 | msix_vec = VIRTIO_MSI_NO_VECTOR; | 520 | msix_vec = VIRTIO_MSI_NO_VECTOR; |
526 | else if (vp_dev->per_vq_vectors) | 521 | else if (vp_dev->per_vq_vectors) |
527 | msix_vec = allocated_vectors++; | 522 | msix_vec = allocated_vectors++; |
528 | else | 523 | else |
529 | msix_vec = VP_MSIX_VQ_VECTOR; | 524 | msix_vec = VP_MSIX_VQ_VECTOR; |
530 | vqs[i] = setup_vq(vdev, i, callbacks[i], names[i], msix_vec); | 525 | vqs[i] = setup_vq(vdev, i, callbacks[i], names[i], msix_vec); |
531 | if (IS_ERR(vqs[i])) { | 526 | if (IS_ERR(vqs[i])) { |
532 | err = PTR_ERR(vqs[i]); | 527 | err = PTR_ERR(vqs[i]); |
533 | goto error_find; | 528 | goto error_find; |
534 | } | 529 | } |
535 | 530 | ||
536 | if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) | 531 | if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) |
537 | continue; | 532 | continue; |
538 | 533 | ||
539 | /* allocate per-vq irq if available and necessary */ | 534 | /* allocate per-vq irq if available and necessary */ |
540 | snprintf(vp_dev->msix_names[msix_vec], | 535 | snprintf(vp_dev->msix_names[msix_vec], |
541 | sizeof *vp_dev->msix_names, | 536 | sizeof *vp_dev->msix_names, |
542 | "%s-%s", | 537 | "%s-%s", |
543 | dev_name(&vp_dev->vdev.dev), names[i]); | 538 | dev_name(&vp_dev->vdev.dev), names[i]); |
544 | err = request_irq(vp_dev->msix_entries[msix_vec].vector, | 539 | err = request_irq(vp_dev->msix_entries[msix_vec].vector, |
545 | vring_interrupt, 0, | 540 | vring_interrupt, 0, |
546 | vp_dev->msix_names[msix_vec], | 541 | vp_dev->msix_names[msix_vec], |
547 | vqs[i]); | 542 | vqs[i]); |
548 | if (err) { | 543 | if (err) { |
549 | vp_del_vq(vqs[i]); | 544 | vp_del_vq(vqs[i]); |
550 | goto error_find; | 545 | goto error_find; |
551 | } | 546 | } |
552 | } | 547 | } |
553 | return 0; | 548 | return 0; |
554 | 549 | ||
555 | error_find: | 550 | error_find: |
556 | vp_del_vqs(vdev); | 551 | vp_del_vqs(vdev); |
557 | 552 | ||
558 | error_request: | 553 | error_request: |
559 | return err; | 554 | return err; |
560 | } | 555 | } |
561 | 556 | ||
562 | /* the config->find_vqs() implementation */ | 557 | /* the config->find_vqs() implementation */ |
563 | static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, | 558 | static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
564 | struct virtqueue *vqs[], | 559 | struct virtqueue *vqs[], |
565 | vq_callback_t *callbacks[], | 560 | vq_callback_t *callbacks[], |
566 | const char *names[]) | 561 | const char *names[]) |
567 | { | 562 | { |
568 | int err; | 563 | int err; |
569 | 564 | ||
570 | /* Try MSI-X with one vector per queue. */ | 565 | /* Try MSI-X with one vector per queue. */ |
571 | err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true); | 566 | err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true); |
572 | if (!err) | 567 | if (!err) |
573 | return 0; | 568 | return 0; |
574 | /* Fallback: MSI-X with one vector for config, one shared for queues. */ | 569 | /* Fallback: MSI-X with one vector for config, one shared for queues. */ |
575 | err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, | 570 | err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, |
576 | true, false); | 571 | true, false); |
577 | if (!err) | 572 | if (!err) |
578 | return 0; | 573 | return 0; |
579 | /* Finally fall back to regular interrupts. */ | 574 | /* Finally fall back to regular interrupts. */ |
580 | return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, | 575 | return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, |
581 | false, false); | 576 | false, false); |
582 | } | 577 | } |
583 | 578 | ||
584 | static struct virtio_config_ops virtio_pci_config_ops = { | 579 | static struct virtio_config_ops virtio_pci_config_ops = { |
585 | .get = vp_get, | 580 | .get = vp_get, |
586 | .set = vp_set, | 581 | .set = vp_set, |
587 | .get_status = vp_get_status, | 582 | .get_status = vp_get_status, |
588 | .set_status = vp_set_status, | 583 | .set_status = vp_set_status, |
589 | .reset = vp_reset, | 584 | .reset = vp_reset, |
590 | .find_vqs = vp_find_vqs, | 585 | .find_vqs = vp_find_vqs, |
591 | .del_vqs = vp_del_vqs, | 586 | .del_vqs = vp_del_vqs, |
592 | .get_features = vp_get_features, | 587 | .get_features = vp_get_features, |
593 | .finalize_features = vp_finalize_features, | 588 | .finalize_features = vp_finalize_features, |
594 | }; | 589 | }; |
595 | 590 | ||
596 | static void virtio_pci_release_dev(struct device *_d) | 591 | static void virtio_pci_release_dev(struct device *_d) |
597 | { | 592 | { |
598 | struct virtio_device *dev = container_of(_d, struct virtio_device, dev); | 593 | struct virtio_device *dev = container_of(_d, struct virtio_device, dev); |
599 | struct virtio_pci_device *vp_dev = to_vp_device(dev); | 594 | struct virtio_pci_device *vp_dev = to_vp_device(dev); |
600 | struct pci_dev *pci_dev = vp_dev->pci_dev; | 595 | struct pci_dev *pci_dev = vp_dev->pci_dev; |
601 | 596 | ||
602 | vp_del_vqs(dev); | 597 | vp_del_vqs(dev); |
603 | pci_set_drvdata(pci_dev, NULL); | 598 | pci_set_drvdata(pci_dev, NULL); |
604 | pci_iounmap(pci_dev, vp_dev->ioaddr); | 599 | pci_iounmap(pci_dev, vp_dev->ioaddr); |
605 | pci_release_regions(pci_dev); | 600 | pci_release_regions(pci_dev); |
606 | pci_disable_device(pci_dev); | 601 | pci_disable_device(pci_dev); |
607 | kfree(vp_dev); | 602 | kfree(vp_dev); |
608 | } | 603 | } |
609 | 604 | ||
610 | /* the PCI probing function */ | 605 | /* the PCI probing function */ |
611 | static int __devinit virtio_pci_probe(struct pci_dev *pci_dev, | 606 | static int __devinit virtio_pci_probe(struct pci_dev *pci_dev, |
612 | const struct pci_device_id *id) | 607 | const struct pci_device_id *id) |
613 | { | 608 | { |
614 | struct virtio_pci_device *vp_dev; | 609 | struct virtio_pci_device *vp_dev; |
615 | int err; | 610 | int err; |
616 | 611 | ||
617 | /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */ | 612 | /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */ |
618 | if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f) | 613 | if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f) |
619 | return -ENODEV; | 614 | return -ENODEV; |
620 | 615 | ||
621 | if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) { | 616 | if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) { |
622 | printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n", | 617 | printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n", |
623 | VIRTIO_PCI_ABI_VERSION, pci_dev->revision); | 618 | VIRTIO_PCI_ABI_VERSION, pci_dev->revision); |
624 | return -ENODEV; | 619 | return -ENODEV; |
625 | } | 620 | } |
626 | 621 | ||
627 | /* allocate our structure and fill it out */ | 622 | /* allocate our structure and fill it out */ |
628 | vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); | 623 | vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); |
629 | if (vp_dev == NULL) | 624 | if (vp_dev == NULL) |
630 | return -ENOMEM; | 625 | return -ENOMEM; |
631 | 626 | ||
632 | vp_dev->vdev.dev.parent = virtio_pci_root; | 627 | vp_dev->vdev.dev.parent = &pci_dev->dev; |
633 | vp_dev->vdev.dev.release = virtio_pci_release_dev; | 628 | vp_dev->vdev.dev.release = virtio_pci_release_dev; |
634 | vp_dev->vdev.config = &virtio_pci_config_ops; | 629 | vp_dev->vdev.config = &virtio_pci_config_ops; |
635 | vp_dev->pci_dev = pci_dev; | 630 | vp_dev->pci_dev = pci_dev; |
636 | INIT_LIST_HEAD(&vp_dev->virtqueues); | 631 | INIT_LIST_HEAD(&vp_dev->virtqueues); |
637 | spin_lock_init(&vp_dev->lock); | 632 | spin_lock_init(&vp_dev->lock); |
638 | 633 | ||
639 | /* Disable MSI/MSIX to bring device to a known good state. */ | 634 | /* Disable MSI/MSIX to bring device to a known good state. */ |
640 | pci_msi_off(pci_dev); | 635 | pci_msi_off(pci_dev); |
641 | 636 | ||
642 | /* enable the device */ | 637 | /* enable the device */ |
643 | err = pci_enable_device(pci_dev); | 638 | err = pci_enable_device(pci_dev); |
644 | if (err) | 639 | if (err) |
645 | goto out; | 640 | goto out; |
646 | 641 | ||
647 | err = pci_request_regions(pci_dev, "virtio-pci"); | 642 | err = pci_request_regions(pci_dev, "virtio-pci"); |
648 | if (err) | 643 | if (err) |
649 | goto out_enable_device; | 644 | goto out_enable_device; |
650 | 645 | ||
651 | vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0); | 646 | vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0); |
652 | if (vp_dev->ioaddr == NULL) | 647 | if (vp_dev->ioaddr == NULL) |
653 | goto out_req_regions; | 648 | goto out_req_regions; |
654 | 649 | ||
655 | pci_set_drvdata(pci_dev, vp_dev); | 650 | pci_set_drvdata(pci_dev, vp_dev); |
656 | pci_set_master(pci_dev); | 651 | pci_set_master(pci_dev); |
657 | 652 | ||
658 | /* we use the subsystem vendor/device id as the virtio vendor/device | 653 | /* we use the subsystem vendor/device id as the virtio vendor/device |
659 | * id. this allows us to use the same PCI vendor/device id for all | 654 | * id. this allows us to use the same PCI vendor/device id for all |
660 | * virtio devices and to identify the particular virtio driver by | 655 | * virtio devices and to identify the particular virtio driver by |
661 | * the subsystem ids */ | 656 | * the subsystem ids */ |
662 | vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; | 657 | vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; |
663 | vp_dev->vdev.id.device = pci_dev->subsystem_device; | 658 | vp_dev->vdev.id.device = pci_dev->subsystem_device; |
664 | 659 | ||
665 | /* finally register the virtio device */ | 660 | /* finally register the virtio device */ |
666 | err = register_virtio_device(&vp_dev->vdev); | 661 | err = register_virtio_device(&vp_dev->vdev); |
667 | if (err) | 662 | if (err) |
668 | goto out_set_drvdata; | 663 | goto out_set_drvdata; |
669 | 664 | ||
670 | return 0; | 665 | return 0; |
671 | 666 | ||
672 | out_set_drvdata: | 667 | out_set_drvdata: |
673 | pci_set_drvdata(pci_dev, NULL); | 668 | pci_set_drvdata(pci_dev, NULL); |
674 | pci_iounmap(pci_dev, vp_dev->ioaddr); | 669 | pci_iounmap(pci_dev, vp_dev->ioaddr); |
675 | out_req_regions: | 670 | out_req_regions: |
676 | pci_release_regions(pci_dev); | 671 | pci_release_regions(pci_dev); |
677 | out_enable_device: | 672 | out_enable_device: |
678 | pci_disable_device(pci_dev); | 673 | pci_disable_device(pci_dev); |
679 | out: | 674 | out: |
680 | kfree(vp_dev); | 675 | kfree(vp_dev); |
681 | return err; | 676 | return err; |
682 | } | 677 | } |
683 | 678 | ||
684 | static void __devexit virtio_pci_remove(struct pci_dev *pci_dev) | 679 | static void __devexit virtio_pci_remove(struct pci_dev *pci_dev) |
685 | { | 680 | { |
686 | struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); | 681 | struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); |
687 | 682 | ||
688 | unregister_virtio_device(&vp_dev->vdev); | 683 | unregister_virtio_device(&vp_dev->vdev); |
689 | } | 684 | } |
690 | 685 | ||
691 | #ifdef CONFIG_PM | 686 | #ifdef CONFIG_PM |
692 | static int virtio_pci_suspend(struct pci_dev *pci_dev, pm_message_t state) | 687 | static int virtio_pci_suspend(struct pci_dev *pci_dev, pm_message_t state) |
693 | { | 688 | { |
694 | pci_save_state(pci_dev); | 689 | pci_save_state(pci_dev); |
695 | pci_set_power_state(pci_dev, PCI_D3hot); | 690 | pci_set_power_state(pci_dev, PCI_D3hot); |
696 | return 0; | 691 | return 0; |
697 | } | 692 | } |
698 | 693 | ||
699 | static int virtio_pci_resume(struct pci_dev *pci_dev) | 694 | static int virtio_pci_resume(struct pci_dev *pci_dev) |
700 | { | 695 | { |
701 | pci_restore_state(pci_dev); | 696 | pci_restore_state(pci_dev); |
702 | pci_set_power_state(pci_dev, PCI_D0); | 697 | pci_set_power_state(pci_dev, PCI_D0); |
703 | return 0; | 698 | return 0; |
704 | } | 699 | } |
705 | #endif | 700 | #endif |
706 | 701 | ||
707 | static struct pci_driver virtio_pci_driver = { | 702 | static struct pci_driver virtio_pci_driver = { |
708 | .name = "virtio-pci", | 703 | .name = "virtio-pci", |
709 | .id_table = virtio_pci_id_table, | 704 | .id_table = virtio_pci_id_table, |
710 | .probe = virtio_pci_probe, | 705 | .probe = virtio_pci_probe, |
711 | .remove = __devexit_p(virtio_pci_remove), | 706 | .remove = __devexit_p(virtio_pci_remove), |
712 | #ifdef CONFIG_PM | 707 | #ifdef CONFIG_PM |
713 | .suspend = virtio_pci_suspend, | 708 | .suspend = virtio_pci_suspend, |
714 | .resume = virtio_pci_resume, | 709 | .resume = virtio_pci_resume, |
715 | #endif | 710 | #endif |
716 | }; | 711 | }; |
717 | 712 | ||
718 | static int __init virtio_pci_init(void) | 713 | static int __init virtio_pci_init(void) |
719 | { | 714 | { |
720 | int err; | 715 | return pci_register_driver(&virtio_pci_driver); |
721 | |||
722 | virtio_pci_root = root_device_register("virtio-pci"); | ||
723 | if (IS_ERR(virtio_pci_root)) | ||
724 | return PTR_ERR(virtio_pci_root); | ||
725 | |||
726 | err = pci_register_driver(&virtio_pci_driver); | ||
727 | if (err) | ||
728 | root_device_unregister(virtio_pci_root); | ||
729 | |||
730 | return err; | ||
731 | } | 716 | } |
732 | 717 | ||
733 | module_init(virtio_pci_init); | 718 | module_init(virtio_pci_init); |
734 | 719 | ||
735 | static void __exit virtio_pci_exit(void) | 720 | static void __exit virtio_pci_exit(void) |
736 | { | 721 | { |
737 | pci_unregister_driver(&virtio_pci_driver); | 722 | pci_unregister_driver(&virtio_pci_driver); |
738 | root_device_unregister(virtio_pci_root); | ||
739 | } | 723 | } |
740 | 724 | ||
741 | module_exit(virtio_pci_exit); | 725 | module_exit(virtio_pci_exit); |
742 | 726 |