Blame view

drivers/virtio/virtio_pci_modern.c 20.9 KB
f33f5fe25   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-or-later
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
2
3
4
5
6
7
8
9
10
11
12
13
14
  /*
   * Virtio PCI driver - modern (virtio 1.0) device support
   *
   * This module allows virtio devices to be used over a virtual PCI device.
   * This can be used with QEMU based VMMs like KVM or Xen.
   *
   * Copyright IBM Corp. 2007
   * Copyright Red Hat, Inc. 2014
   *
   * Authors:
   *  Anthony Liguori  <aliguori@us.ibm.com>
   *  Rusty Russell <rusty@rustcorp.com.au>
   *  Michael S. Tsirkin <mst@redhat.com>
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
15
   */
05dbcb430   Michael S. Tsirkin   virtio: virtio 1....
16
  #include <linux/delay.h>
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
17
18
  #define VIRTIO_PCI_NO_LEGACY
  #include "virtio_pci_common.h"
c5d4c2c9c   Michael S. Tsirkin   virtio_pci_modern...
19
20
21
22
23
24
25
26
27
28
29
30
  /*
   * Type-safe wrappers for io accesses.
   * Use these to enforce at compile time the following spec requirement:
   *
   * The driver MUST access each field using the “natural” access
   * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
   * for 16-bit fields and 8-bit accesses for 8-bit fields.
   */
  static inline u8 vp_ioread8(u8 __iomem *addr)
  {
  	return ioread8(addr);
  }
61bd405f4   Gonglei   virtio_pci_modern...
31
  static inline u16 vp_ioread16 (__le16 __iomem *addr)
c5d4c2c9c   Michael S. Tsirkin   virtio_pci_modern...
32
33
34
  {
  	return ioread16(addr);
  }
61bd405f4   Gonglei   virtio_pci_modern...
35
  static inline u32 vp_ioread32(__le32 __iomem *addr)
c5d4c2c9c   Michael S. Tsirkin   virtio_pci_modern...
36
37
38
39
40
41
42
43
  {
  	return ioread32(addr);
  }
  
  static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
  {
  	iowrite8(value, addr);
  }
61bd405f4   Gonglei   virtio_pci_modern...
44
  static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
c5d4c2c9c   Michael S. Tsirkin   virtio_pci_modern...
45
46
47
  {
  	iowrite16(value, addr);
  }
61bd405f4   Gonglei   virtio_pci_modern...
48
  static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
c5d4c2c9c   Michael S. Tsirkin   virtio_pci_modern...
49
50
51
  {
  	iowrite32(value, addr);
  }
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
52
53
54
55
56
57
  static void vp_iowrite64_twopart(u64 val,
  				 __le32 __iomem *lo, __le32 __iomem *hi)
  {
  	vp_iowrite32((u32)val, lo);
  	vp_iowrite32(val >> 32, hi);
  }
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
  static void __iomem *map_capability(struct pci_dev *dev, int off,
  				    size_t minlen,
  				    u32 align,
  				    u32 start, u32 size,
  				    size_t *len)
  {
  	u8 bar;
  	u32 offset, length;
  	void __iomem *p;
  
  	pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
  						 bar),
  			     &bar);
  	pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
  			     &offset);
  	pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
  			      &length);
  
  	if (length <= start) {
  		dev_err(&dev->dev,
  			"virtio_pci: bad capability len %u (>%u expected)
  ",
  			length, start);
  		return NULL;
  	}
  
  	if (length - start < minlen) {
  		dev_err(&dev->dev,
  			"virtio_pci: bad capability len %u (>=%zu expected)
  ",
  			length, minlen);
  		return NULL;
  	}
  
  	length -= start;
  
  	if (start + offset < offset) {
  		dev_err(&dev->dev,
  			"virtio_pci: map wrap-around %u+%u
  ",
  			start, offset);
  		return NULL;
  	}
  
  	offset += start;
  
  	if (offset & (align - 1)) {
  		dev_err(&dev->dev,
  			"virtio_pci: offset %u not aligned to %u
  ",
  			offset, align);
  		return NULL;
  	}
  
  	if (length > size)
  		length = size;
  
  	if (len)
  		*len = length;
  
  	if (minlen + offset < minlen ||
  	    minlen + offset > pci_resource_len(dev, bar)) {
  		dev_err(&dev->dev,
  			"virtio_pci: map virtio %zu@%u "
  			"out of range on bar %i length %lu
  ",
  			minlen, offset,
  			bar, (unsigned long)pci_resource_len(dev, bar));
  		return NULL;
  	}
  
  	p = pci_iomap_range(dev, bar, offset, length);
  	if (!p)
  		dev_err(&dev->dev,
  			"virtio_pci: unable to map virtio %u@%u on bar %i
  ",
  			length, offset, bar);
  	return p;
  }
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
137
138
139
140
141
  /* virtio config->get_features() implementation */
  static u64 vp_get_features(struct virtio_device *vdev)
  {
  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  	u64 features;
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
142
143
144
145
  	vp_iowrite32(0, &vp_dev->common->device_feature_select);
  	features = vp_ioread32(&vp_dev->common->device_feature);
  	vp_iowrite32(1, &vp_dev->common->device_feature_select);
  	features |= ((u64)vp_ioread32(&vp_dev->common->device_feature) << 32);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
146
147
148
  
  	return features;
  }
cfecc2918   Tiwei Bie   virtio_pci: suppo...
149
150
151
152
153
154
155
156
157
  static void vp_transport_features(struct virtio_device *vdev, u64 features)
  {
  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  	struct pci_dev *pci_dev = vp_dev->pci_dev;
  
  	if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
  			pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
  		__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
  }
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
158
159
160
161
  /* virtio config->finalize_features() implementation */
  static int vp_finalize_features(struct virtio_device *vdev)
  {
  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
cfecc2918   Tiwei Bie   virtio_pci: suppo...
162
  	u64 features = vdev->features;
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
163
164
165
  
  	/* Give virtio_ring a chance to accept features. */
  	vring_transport_features(vdev);
cfecc2918   Tiwei Bie   virtio_pci: suppo...
166
167
  	/* Give virtio_pci a chance to accept features. */
  	vp_transport_features(vdev, features);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
168
169
170
171
172
173
  	if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
  		dev_err(&vdev->dev, "virtio: device uses modern interface "
  			"but does not have VIRTIO_F_VERSION_1
  ");
  		return -EINVAL;
  	}
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
174
175
176
177
  	vp_iowrite32(0, &vp_dev->common->guest_feature_select);
  	vp_iowrite32((u32)vdev->features, &vp_dev->common->guest_feature);
  	vp_iowrite32(1, &vp_dev->common->guest_feature_select);
  	vp_iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
  
  	return 0;
  }
  
  /* virtio config->get() implementation */
  static void vp_get(struct virtio_device *vdev, unsigned offset,
  		   void *buf, unsigned len)
  {
  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  	u8 b;
  	__le16 w;
  	__le32 l;
  
  	BUG_ON(offset + len > vp_dev->device_len);
  
  	switch (len) {
  	case 1:
  		b = ioread8(vp_dev->device + offset);
  		memcpy(buf, &b, sizeof b);
  		break;
  	case 2:
  		w = cpu_to_le16(ioread16(vp_dev->device + offset));
  		memcpy(buf, &w, sizeof w);
  		break;
  	case 4:
  		l = cpu_to_le32(ioread32(vp_dev->device + offset));
  		memcpy(buf, &l, sizeof l);
  		break;
  	case 8:
  		l = cpu_to_le32(ioread32(vp_dev->device + offset));
  		memcpy(buf, &l, sizeof l);
  		l = cpu_to_le32(ioread32(vp_dev->device + offset + sizeof l));
  		memcpy(buf + sizeof l, &l, sizeof l);
  		break;
  	default:
  		BUG();
  	}
  }
  
  /* the config->set() implementation.  it's symmetric to the config->get()
   * implementation */
  static void vp_set(struct virtio_device *vdev, unsigned offset,
  		   const void *buf, unsigned len)
  {
  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  	u8 b;
  	__le16 w;
  	__le32 l;
  
  	BUG_ON(offset + len > vp_dev->device_len);
  
  	switch (len) {
  	case 1:
  		memcpy(&b, buf, sizeof b);
  		iowrite8(b, vp_dev->device + offset);
  		break;
  	case 2:
  		memcpy(&w, buf, sizeof w);
  		iowrite16(le16_to_cpu(w), vp_dev->device + offset);
  		break;
  	case 4:
  		memcpy(&l, buf, sizeof l);
  		iowrite32(le32_to_cpu(l), vp_dev->device + offset);
  		break;
  	case 8:
  		memcpy(&l, buf, sizeof l);
  		iowrite32(le32_to_cpu(l), vp_dev->device + offset);
  		memcpy(&l, buf + sizeof l, sizeof l);
  		iowrite32(le32_to_cpu(l), vp_dev->device + offset + sizeof l);
  		break;
  	default:
  		BUG();
  	}
  }
  
  static u32 vp_generation(struct virtio_device *vdev)
  {
  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
256
  	return vp_ioread8(&vp_dev->common->config_generation);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
257
258
259
260
261
262
  }
  
  /* config->{get,set}_status() implementations */
  static u8 vp_get_status(struct virtio_device *vdev)
  {
  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
263
  	return vp_ioread8(&vp_dev->common->device_status);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
264
265
266
267
268
269
270
  }
  
  static void vp_set_status(struct virtio_device *vdev, u8 status)
  {
  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  	/* We should never be setting status to 0. */
  	BUG_ON(status == 0);
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
271
  	vp_iowrite8(status, &vp_dev->common->device_status);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
272
273
274
275
276
277
  }
  
  static void vp_reset(struct virtio_device *vdev)
  {
  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  	/* 0 status means a reset. */
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
278
  	vp_iowrite8(0, &vp_dev->common->device_status);
05dbcb430   Michael S. Tsirkin   virtio: virtio 1....
279
280
281
282
283
284
285
  	/* After writing 0 to device_status, the driver MUST wait for a read of
  	 * device_status to return 0 before reinitializing the device.
  	 * This will flush out the status write, and flush in device writes,
  	 * including MSI-X interrupts, if any.
  	 */
  	while (vp_ioread8(&vp_dev->common->device_status))
  		msleep(1);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
286
287
288
289
290
291
292
  	/* Flush pending VQ/configuration callbacks. */
  	vp_synchronize_vectors(vdev);
  }
  
  static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
  {
  	/* Setup the vector used for configuration events */
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
293
  	vp_iowrite16(vector, &vp_dev->common->msix_config);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
294
295
  	/* Verify we had enough resources to assign the vector */
  	/* Will also flush the write out to device */
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
296
  	return vp_ioread16(&vp_dev->common->msix_config);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
297
  }
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
298
  static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
0a9b3f47d   Michael S. Tsirkin   Revert "virtio_pc...
299
  				  struct virtio_pci_vq_info *info,
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
300
301
302
  				  unsigned index,
  				  void (*callback)(struct virtqueue *vq),
  				  const char *name,
f94682dde   Michael S. Tsirkin   virtio: add conte...
303
  				  bool ctx,
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
304
305
306
307
308
309
  				  u16 msix_vec)
  {
  	struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
  	struct virtqueue *vq;
  	u16 num, off;
  	int err;
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
310
  	if (index >= vp_ioread16(&cfg->num_queues))
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
311
312
313
  		return ERR_PTR(-ENOENT);
  
  	/* Select the queue we're interested in */
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
314
  	vp_iowrite16(index, &cfg->queue_select);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
315
316
  
  	/* Check if queue is either not available or already active. */
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
317
318
  	num = vp_ioread16(&cfg->queue_size);
  	if (!num || vp_ioread16(&cfg->queue_enable))
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
319
320
321
322
323
324
325
326
  		return ERR_PTR(-ENOENT);
  
  	if (num & (num - 1)) {
  		dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num);
  		return ERR_PTR(-EINVAL);
  	}
  
  	/* get offset of notification word for this vq */
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
327
  	off = vp_ioread16(&cfg->queue_notify_off);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
328

0a9b3f47d   Michael S. Tsirkin   Revert "virtio_pc...
329
  	info->msix_vector = msix_vec;
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
330
  	/* create the vring */
7a5589b24   Andy Lutomirski   virtio_pci: Use t...
331
332
  	vq = vring_create_virtqueue(index, num,
  				    SMP_CACHE_BYTES, &vp_dev->vdev,
f94682dde   Michael S. Tsirkin   virtio: add conte...
333
334
  				    true, true, ctx,
  				    vp_notify, callback, name);
7a5589b24   Andy Lutomirski   virtio_pci: Use t...
335
336
  	if (!vq)
  		return ERR_PTR(-ENOMEM);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
337
338
  
  	/* activate the queue */
7a5589b24   Andy Lutomirski   virtio_pci: Use t...
339
340
  	vp_iowrite16(virtqueue_get_vring_size(vq), &cfg->queue_size);
  	vp_iowrite64_twopart(virtqueue_get_desc_addr(vq),
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
341
  			     &cfg->queue_desc_lo, &cfg->queue_desc_hi);
7a5589b24   Andy Lutomirski   virtio_pci: Use t...
342
  	vp_iowrite64_twopart(virtqueue_get_avail_addr(vq),
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
343
  			     &cfg->queue_avail_lo, &cfg->queue_avail_hi);
7a5589b24   Andy Lutomirski   virtio_pci: Use t...
344
  	vp_iowrite64_twopart(virtqueue_get_used_addr(vq),
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
345
  			     &cfg->queue_used_lo, &cfg->queue_used_hi);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
346

3909213cf   Michael S. Tsirkin   virtio_pci_modern...
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
  	if (vp_dev->notify_base) {
  		/* offset should not wrap */
  		if ((u64)off * vp_dev->notify_offset_multiplier + 2
  		    > vp_dev->notify_len) {
  			dev_warn(&vp_dev->pci_dev->dev,
  				 "bad notification offset %u (x %u) "
  				 "for queue %u > %zd",
  				 off, vp_dev->notify_offset_multiplier,
  				 index, vp_dev->notify_len);
  			err = -EINVAL;
  			goto err_map_notify;
  		}
  		vq->priv = (void __force *)vp_dev->notify_base +
  			off * vp_dev->notify_offset_multiplier;
  	} else {
  		vq->priv = (void __force *)map_capability(vp_dev->pci_dev,
  					  vp_dev->notify_map_cap, 2, 2,
  					  off * vp_dev->notify_offset_multiplier, 2,
  					  NULL);
  	}
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
367
368
369
370
371
372
373
  
  	if (!vq->priv) {
  		err = -ENOMEM;
  		goto err_map_notify;
  	}
  
  	if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
374
375
  		vp_iowrite16(msix_vec, &cfg->queue_msix_vector);
  		msix_vec = vp_ioread16(&cfg->queue_msix_vector);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
376
377
378
379
380
381
382
383
384
  		if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
  			err = -EBUSY;
  			goto err_assign_vector;
  		}
  	}
  
  	return vq;
  
  err_assign_vector:
3909213cf   Michael S. Tsirkin   virtio_pci_modern...
385
386
  	if (!vp_dev->notify_base)
  		pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
387
388
  err_map_notify:
  	vring_del_virtqueue(vq);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
389
390
391
392
  	return ERR_PTR(err);
  }
  
  static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
f94682dde   Michael S. Tsirkin   virtio: add conte...
393
394
395
396
  			      struct virtqueue *vqs[],
  			      vq_callback_t *callbacks[],
  			      const char * const names[], const bool *ctx,
  			      struct irq_affinity *desc)
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
397
398
399
  {
  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  	struct virtqueue *vq;
f94682dde   Michael S. Tsirkin   virtio: add conte...
400
  	int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
401
402
403
404
405
406
407
408
  
  	if (rc)
  		return rc;
  
  	/* Select and activate all queues. Has to be done last: once we do
  	 * this, there's no way to go back except reset.
  	 */
  	list_for_each_entry(vq, &vdev->vqs, list) {
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
409
410
  		vp_iowrite16(vq->index, &vp_dev->common->queue_select);
  		vp_iowrite16(1, &vp_dev->common->queue_enable);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
411
412
413
414
  	}
  
  	return 0;
  }
0a9b3f47d   Michael S. Tsirkin   Revert "virtio_pc...
415
  static void del_vq(struct virtio_pci_vq_info *info)
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
416
  {
0a9b3f47d   Michael S. Tsirkin   Revert "virtio_pc...
417
  	struct virtqueue *vq = info->vq;
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
418
  	struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
419
  	vp_iowrite16(vq->index, &vp_dev->common->queue_select);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
420

2008c1544   Michael S. Tsirkin   Revert "virtio_pc...
421
  	if (vp_dev->msix_enabled) {
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
422
423
  		vp_iowrite16(VIRTIO_MSI_NO_VECTOR,
  			     &vp_dev->common->queue_msix_vector);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
424
  		/* Flush the write out to device */
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
425
  		vp_ioread16(&vp_dev->common->queue_msix_vector);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
426
  	}
3909213cf   Michael S. Tsirkin   virtio_pci_modern...
427
428
  	if (!vp_dev->notify_base)
  		pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
429
430
  
  	vring_del_virtqueue(vq);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
431
  }
d3f5f0656   Michael S. Tsirkin   virtio_pci_modern...
432
433
434
435
436
437
438
439
440
441
442
443
444
  static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
  	.get		= NULL,
  	.set		= NULL,
  	.generation	= vp_generation,
  	.get_status	= vp_get_status,
  	.set_status	= vp_set_status,
  	.reset		= vp_reset,
  	.find_vqs	= vp_modern_find_vqs,
  	.del_vqs	= vp_del_vqs,
  	.get_features	= vp_get_features,
  	.finalize_features = vp_finalize_features,
  	.bus_name	= vp_bus_name,
  	.set_vq_affinity = vp_set_vq_affinity,
bbaba4795   Christoph Hellwig   virtio: provide a...
445
  	.get_vq_affinity = vp_get_vq_affinity,
d3f5f0656   Michael S. Tsirkin   virtio_pci_modern...
446
  };
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
447
448
449
450
451
452
453
454
455
456
457
458
459
  static const struct virtio_config_ops virtio_pci_config_ops = {
  	.get		= vp_get,
  	.set		= vp_set,
  	.generation	= vp_generation,
  	.get_status	= vp_get_status,
  	.set_status	= vp_set_status,
  	.reset		= vp_reset,
  	.find_vqs	= vp_modern_find_vqs,
  	.del_vqs	= vp_del_vqs,
  	.get_features	= vp_get_features,
  	.finalize_features = vp_finalize_features,
  	.bus_name	= vp_bus_name,
  	.set_vq_affinity = vp_set_vq_affinity,
bbaba4795   Christoph Hellwig   virtio: provide a...
460
  	.get_vq_affinity = vp_get_vq_affinity,
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
461
462
463
464
465
466
467
468
469
470
471
  };
  
  /**
   * virtio_pci_find_capability - walk capabilities to find device info.
   * @dev: the pci device
   * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
   * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
   *
   * Returns offset of the capability, or 0.
   */
  static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
59a5b0f7b   Gerd Hoffmann   virtio-pci: alloc...
472
  					     u32 ioresource_types, int *bars)
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
  {
  	int pos;
  
  	for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
  	     pos > 0;
  	     pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
  		u8 type, bar;
  		pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
  							 cfg_type),
  				     &type);
  		pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
  							 bar),
  				     &bar);
  
  		/* Ignore structures with reserved BAR values */
  		if (bar > 0x5)
  			continue;
  
  		if (type == cfg_type) {
  			if (pci_resource_len(dev, bar) &&
59a5b0f7b   Gerd Hoffmann   virtio-pci: alloc...
493
494
  			    pci_resource_flags(dev, bar) & ioresource_types) {
  				*bars |= (1 << bar);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
495
  				return pos;
59a5b0f7b   Gerd Hoffmann   virtio-pci: alloc...
496
  			}
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
497
498
499
500
  		}
  	}
  	return 0;
  }
89461c4a1   Rusty Russell   virtio_pci: macro...
501
  /* This is part of the ABI.  Don't screw with it. */
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
502
503
  static inline void check_offsets(void)
  {
89461c4a1   Rusty Russell   virtio_pci: macro...
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
  	/* Note: disk space was harmed in compilation of this function. */
  	BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
  		     offsetof(struct virtio_pci_cap, cap_vndr));
  	BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
  		     offsetof(struct virtio_pci_cap, cap_next));
  	BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
  		     offsetof(struct virtio_pci_cap, cap_len));
  	BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
  		     offsetof(struct virtio_pci_cap, cfg_type));
  	BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
  		     offsetof(struct virtio_pci_cap, bar));
  	BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
  		     offsetof(struct virtio_pci_cap, offset));
  	BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
  		     offsetof(struct virtio_pci_cap, length));
  	BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
  		     offsetof(struct virtio_pci_notify_cap,
  			      notify_off_multiplier));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
  		     offsetof(struct virtio_pci_common_cfg,
  			      device_feature_select));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
  		     offsetof(struct virtio_pci_common_cfg, device_feature));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
  		     offsetof(struct virtio_pci_common_cfg,
  			      guest_feature_select));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
  		     offsetof(struct virtio_pci_common_cfg, guest_feature));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
  		     offsetof(struct virtio_pci_common_cfg, msix_config));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
  		     offsetof(struct virtio_pci_common_cfg, num_queues));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
  		     offsetof(struct virtio_pci_common_cfg, device_status));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
  		     offsetof(struct virtio_pci_common_cfg, config_generation));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
  		     offsetof(struct virtio_pci_common_cfg, queue_select));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
  		     offsetof(struct virtio_pci_common_cfg, queue_size));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
  		     offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
  		     offsetof(struct virtio_pci_common_cfg, queue_enable));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
  		     offsetof(struct virtio_pci_common_cfg, queue_notify_off));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
  		     offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
  		     offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
  		     offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
  		     offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
  		     offsetof(struct virtio_pci_common_cfg, queue_used_lo));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
  		     offsetof(struct virtio_pci_common_cfg, queue_used_hi));
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
562
563
564
565
566
567
568
569
  }
  
  /* the PCI probing function */
  int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
  {
  	struct pci_dev *pci_dev = vp_dev->pci_dev;
  	int err, common, isr, notify, device;
  	u32 notify_length;
3909213cf   Michael S. Tsirkin   virtio_pci_modern...
570
  	u32 notify_offset;
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
  
  	check_offsets();
  
  	/* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
  	if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
  		return -ENODEV;
  
  	if (pci_dev->device < 0x1040) {
  		/* Transitional devices: use the PCI subsystem device id as
  		 * virtio device id, same as legacy driver always did.
  		 */
  		vp_dev->vdev.id.device = pci_dev->subsystem_device;
  	} else {
  		/* Modern devices: simply use PCI device id, but start from 0x1040. */
  		vp_dev->vdev.id.device = pci_dev->device - 0x1040;
  	}
  	vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
588
589
  	/* check for a common config: if not, use legacy mode (bar 0). */
  	common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
59a5b0f7b   Gerd Hoffmann   virtio-pci: alloc...
590
591
  					    IORESOURCE_IO | IORESOURCE_MEM,
  					    &vp_dev->modern_bars);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
592
593
594
595
596
597
598
599
600
  	if (!common) {
  		dev_info(&pci_dev->dev,
  			 "virtio_pci: leaving for legacy driver
  ");
  		return -ENODEV;
  	}
  
  	/* If common is there, these should be too... */
  	isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
59a5b0f7b   Gerd Hoffmann   virtio-pci: alloc...
601
602
  					 IORESOURCE_IO | IORESOURCE_MEM,
  					 &vp_dev->modern_bars);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
603
  	notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
59a5b0f7b   Gerd Hoffmann   virtio-pci: alloc...
604
605
  					    IORESOURCE_IO | IORESOURCE_MEM,
  					    &vp_dev->modern_bars);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
606
607
608
609
610
611
612
  	if (!isr || !notify) {
  		dev_err(&pci_dev->dev,
  			"virtio_pci: missing capabilities %i/%i/%i
  ",
  			common, isr, notify);
  		return -EINVAL;
  	}
7a5589b24   Andy Lutomirski   virtio_pci: Use t...
613
614
615
616
617
618
619
  	err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
  	if (err)
  		err = dma_set_mask_and_coherent(&pci_dev->dev,
  						DMA_BIT_MASK(32));
  	if (err)
  		dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA.  Trying to continue, but this might not work.
  ");
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
620
621
622
623
  	/* Device capability is only mandatory for devices that have
  	 * device-specific configuration.
  	 */
  	device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
59a5b0f7b   Gerd Hoffmann   virtio-pci: alloc...
624
625
626
627
628
629
630
  					    IORESOURCE_IO | IORESOURCE_MEM,
  					    &vp_dev->modern_bars);
  
  	err = pci_request_selected_regions(pci_dev, vp_dev->modern_bars,
  					   "virtio-pci-modern");
  	if (err)
  		return err;
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
  
  	err = -EINVAL;
  	vp_dev->common = map_capability(pci_dev, common,
  					sizeof(struct virtio_pci_common_cfg), 4,
  					0, sizeof(struct virtio_pci_common_cfg),
  					NULL);
  	if (!vp_dev->common)
  		goto err_map_common;
  	vp_dev->isr = map_capability(pci_dev, isr, sizeof(u8), 1,
  				     0, 1,
  				     NULL);
  	if (!vp_dev->isr)
  		goto err_map_isr;
  
  	/* Read notify_off_multiplier from config space. */
  	pci_read_config_dword(pci_dev,
  			      notify + offsetof(struct virtio_pci_notify_cap,
  						notify_off_multiplier),
  			      &vp_dev->notify_offset_multiplier);
3909213cf   Michael S. Tsirkin   virtio_pci_modern...
650
  	/* Read notify length and offset from config space. */
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
651
652
653
654
  	pci_read_config_dword(pci_dev,
  			      notify + offsetof(struct virtio_pci_notify_cap,
  						cap.length),
  			      &notify_length);
3909213cf   Michael S. Tsirkin   virtio_pci_modern...
655
656
  	pci_read_config_dword(pci_dev,
  			      notify + offsetof(struct virtio_pci_notify_cap,
4e94ebdd0   Ladi Prosek   virtio-pci: read ...
657
  						cap.offset),
3909213cf   Michael S. Tsirkin   virtio_pci_modern...
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
  			      &notify_offset);
  
  	/* We don't know how many VQs we'll map, ahead of the time.
  	 * If notify length is small, map it all now.
  	 * Otherwise, map each VQ individually later.
  	 */
  	if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
  		vp_dev->notify_base = map_capability(pci_dev, notify, 2, 2,
  						     0, notify_length,
  						     &vp_dev->notify_len);
  		if (!vp_dev->notify_base)
  			goto err_map_notify;
  	} else {
  		vp_dev->notify_map_cap = notify;
  	}
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
673
674
675
676
677
678
679
680
681
682
  
  	/* Again, we don't know how much we should map, but PAGE_SIZE
  	 * is more than enough for all existing devices.
  	 */
  	if (device) {
  		vp_dev->device = map_capability(pci_dev, device, 0, 4,
  						0, PAGE_SIZE,
  						&vp_dev->device_len);
  		if (!vp_dev->device)
  			goto err_map_device;
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
683

d3f5f0656   Michael S. Tsirkin   virtio_pci_modern...
684
685
686
687
  		vp_dev->vdev.config = &virtio_pci_config_ops;
  	} else {
  		vp_dev->vdev.config = &virtio_pci_config_nodev_ops;
  	}
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
688
689
690
691
692
693
694
695
  
  	vp_dev->config_vector = vp_config_vector;
  	vp_dev->setup_vq = setup_vq;
  	vp_dev->del_vq = del_vq;
  
  	return 0;
  
  err_map_device:
3909213cf   Michael S. Tsirkin   virtio_pci_modern...
696
697
698
  	if (vp_dev->notify_base)
  		pci_iounmap(pci_dev, vp_dev->notify_base);
  err_map_notify:
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
699
700
701
702
703
704
705
706
707
708
709
710
711
  	pci_iounmap(pci_dev, vp_dev->isr);
  err_map_isr:
  	pci_iounmap(pci_dev, vp_dev->common);
  err_map_common:
  	return err;
  }
  
  void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
  {
  	struct pci_dev *pci_dev = vp_dev->pci_dev;
  
  	if (vp_dev->device)
  		pci_iounmap(pci_dev, vp_dev->device);
3909213cf   Michael S. Tsirkin   virtio_pci_modern...
712
713
  	if (vp_dev->notify_base)
  		pci_iounmap(pci_dev, vp_dev->notify_base);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
714
715
  	pci_iounmap(pci_dev, vp_dev->isr);
  	pci_iounmap(pci_dev, vp_dev->common);
59a5b0f7b   Gerd Hoffmann   virtio-pci: alloc...
716
  	pci_release_selected_regions(pci_dev, vp_dev->modern_bars);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
717
  }