Blame view

drivers/virtio/virtio_pci_modern.c 23.5 KB
f33f5fe25   Thomas Gleixner   treewide: Replace...
1
  // SPDX-License-Identifier: GPL-2.0-or-later
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
2
3
4
5
6
7
8
9
10
11
12
13
14
  /*
   * Virtio PCI driver - modern (virtio 1.0) device support
   *
   * This module allows virtio devices to be used over a virtual PCI device.
   * This can be used with QEMU based VMMs like KVM or Xen.
   *
   * Copyright IBM Corp. 2007
   * Copyright Red Hat, Inc. 2014
   *
   * Authors:
   *  Anthony Liguori  <aliguori@us.ibm.com>
   *  Rusty Russell <rusty@rustcorp.com.au>
   *  Michael S. Tsirkin <mst@redhat.com>
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
15
   */
05dbcb430   Michael S. Tsirkin   virtio: virtio 1....
16
  #include <linux/delay.h>
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
17
  #define VIRTIO_PCI_NO_LEGACY
e7c8cc35a   Matej Genci   virtio: add VIRTI...
18
  #define VIRTIO_RING_NO_LEGACY
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
19
  #include "virtio_pci_common.h"
c5d4c2c9c   Michael S. Tsirkin   virtio_pci_modern...
20
21
22
23
24
25
26
27
  /*
   * Type-safe wrappers for io accesses.
   * Use these to enforce at compile time the following spec requirement:
   *
   * The driver MUST access each field using the “natural” access
   * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses
   * for 16-bit fields and 8-bit accesses for 8-bit fields.
   */
fe0580ac5   Krzysztof Kozlowski   virtio: pci: cons...
28
  static inline u8 vp_ioread8(const u8 __iomem *addr)
c5d4c2c9c   Michael S. Tsirkin   virtio_pci_modern...
29
30
31
  {
  	return ioread8(addr);
  }
fe0580ac5   Krzysztof Kozlowski   virtio: pci: cons...
32
  static inline u16 vp_ioread16 (const __le16 __iomem *addr)
c5d4c2c9c   Michael S. Tsirkin   virtio_pci_modern...
33
34
35
  {
  	return ioread16(addr);
  }
fe0580ac5   Krzysztof Kozlowski   virtio: pci: cons...
36
  static inline u32 vp_ioread32(const __le32 __iomem *addr)
c5d4c2c9c   Michael S. Tsirkin   virtio_pci_modern...
37
38
39
40
41
42
43
44
  {
  	return ioread32(addr);
  }
  
  static inline void vp_iowrite8(u8 value, u8 __iomem *addr)
  {
  	iowrite8(value, addr);
  }
61bd405f4   Gonglei   virtio_pci_modern...
45
  static inline void vp_iowrite16(u16 value, __le16 __iomem *addr)
c5d4c2c9c   Michael S. Tsirkin   virtio_pci_modern...
46
47
48
  {
  	iowrite16(value, addr);
  }
61bd405f4   Gonglei   virtio_pci_modern...
49
  static inline void vp_iowrite32(u32 value, __le32 __iomem *addr)
c5d4c2c9c   Michael S. Tsirkin   virtio_pci_modern...
50
51
52
  {
  	iowrite32(value, addr);
  }
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
53
54
55
56
57
58
  static void vp_iowrite64_twopart(u64 val,
  				 __le32 __iomem *lo, __le32 __iomem *hi)
  {
  	vp_iowrite32((u32)val, lo);
  	vp_iowrite32(val >> 32, hi);
  }
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
  static void __iomem *map_capability(struct pci_dev *dev, int off,
  				    size_t minlen,
  				    u32 align,
  				    u32 start, u32 size,
  				    size_t *len)
  {
  	u8 bar;
  	u32 offset, length;
  	void __iomem *p;
  
  	pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
  						 bar),
  			     &bar);
  	pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
  			     &offset);
  	pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
  			      &length);
  
  	if (length <= start) {
  		dev_err(&dev->dev,
  			"virtio_pci: bad capability len %u (>%u expected)
  ",
  			length, start);
  		return NULL;
  	}
  
  	if (length - start < minlen) {
  		dev_err(&dev->dev,
  			"virtio_pci: bad capability len %u (>=%zu expected)
  ",
  			length, minlen);
  		return NULL;
  	}
  
  	length -= start;
  
  	if (start + offset < offset) {
  		dev_err(&dev->dev,
  			"virtio_pci: map wrap-around %u+%u
  ",
  			start, offset);
  		return NULL;
  	}
  
  	offset += start;
  
  	if (offset & (align - 1)) {
  		dev_err(&dev->dev,
  			"virtio_pci: offset %u not aligned to %u
  ",
  			offset, align);
  		return NULL;
  	}
  
  	if (length > size)
  		length = size;
  
  	if (len)
  		*len = length;
  
  	if (minlen + offset < minlen ||
  	    minlen + offset > pci_resource_len(dev, bar)) {
  		dev_err(&dev->dev,
  			"virtio_pci: map virtio %zu@%u "
  			"out of range on bar %i length %lu
  ",
  			minlen, offset,
  			bar, (unsigned long)pci_resource_len(dev, bar));
  		return NULL;
  	}
  
  	p = pci_iomap_range(dev, bar, offset, length);
  	if (!p)
  		dev_err(&dev->dev,
  			"virtio_pci: unable to map virtio %u@%u on bar %i
  ",
  			length, offset, bar);
  	return p;
  }
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
138
139
140
141
142
  /* virtio config->get_features() implementation */
  static u64 vp_get_features(struct virtio_device *vdev)
  {
  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  	u64 features;
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
143
144
145
146
  	vp_iowrite32(0, &vp_dev->common->device_feature_select);
  	features = vp_ioread32(&vp_dev->common->device_feature);
  	vp_iowrite32(1, &vp_dev->common->device_feature_select);
  	features |= ((u64)vp_ioread32(&vp_dev->common->device_feature) << 32);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
147
148
149
  
  	return features;
  }
cfecc2918   Tiwei Bie   virtio_pci: suppo...
150
151
152
153
154
155
156
157
158
  static void vp_transport_features(struct virtio_device *vdev, u64 features)
  {
  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  	struct pci_dev *pci_dev = vp_dev->pci_dev;
  
  	if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
  			pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
  		__virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
  }
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
159
160
161
162
  /* virtio config->finalize_features() implementation */
  static int vp_finalize_features(struct virtio_device *vdev)
  {
  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
cfecc2918   Tiwei Bie   virtio_pci: suppo...
163
  	u64 features = vdev->features;
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
164
165
166
  
  	/* Give virtio_ring a chance to accept features. */
  	vring_transport_features(vdev);
cfecc2918   Tiwei Bie   virtio_pci: suppo...
167
168
  	/* Give virtio_pci a chance to accept features. */
  	vp_transport_features(vdev, features);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
169
170
171
172
173
174
  	if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
  		dev_err(&vdev->dev, "virtio: device uses modern interface "
  			"but does not have VIRTIO_F_VERSION_1
  ");
  		return -EINVAL;
  	}
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
175
176
177
178
  	vp_iowrite32(0, &vp_dev->common->guest_feature_select);
  	vp_iowrite32((u32)vdev->features, &vp_dev->common->guest_feature);
  	vp_iowrite32(1, &vp_dev->common->guest_feature_select);
  	vp_iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
  
  	return 0;
  }
  
  /* virtio config->get() implementation */
  static void vp_get(struct virtio_device *vdev, unsigned offset,
  		   void *buf, unsigned len)
  {
  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  	u8 b;
  	__le16 w;
  	__le32 l;
  
  	BUG_ON(offset + len > vp_dev->device_len);
  
  	switch (len) {
  	case 1:
  		b = ioread8(vp_dev->device + offset);
  		memcpy(buf, &b, sizeof b);
  		break;
  	case 2:
  		w = cpu_to_le16(ioread16(vp_dev->device + offset));
  		memcpy(buf, &w, sizeof w);
  		break;
  	case 4:
  		l = cpu_to_le32(ioread32(vp_dev->device + offset));
  		memcpy(buf, &l, sizeof l);
  		break;
  	case 8:
  		l = cpu_to_le32(ioread32(vp_dev->device + offset));
  		memcpy(buf, &l, sizeof l);
  		l = cpu_to_le32(ioread32(vp_dev->device + offset + sizeof l));
  		memcpy(buf + sizeof l, &l, sizeof l);
  		break;
  	default:
  		BUG();
  	}
  }
  
  /* the config->set() implementation.  it's symmetric to the config->get()
   * implementation */
  static void vp_set(struct virtio_device *vdev, unsigned offset,
  		   const void *buf, unsigned len)
  {
  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  	u8 b;
  	__le16 w;
  	__le32 l;
  
  	BUG_ON(offset + len > vp_dev->device_len);
  
  	switch (len) {
  	case 1:
  		memcpy(&b, buf, sizeof b);
  		iowrite8(b, vp_dev->device + offset);
  		break;
  	case 2:
  		memcpy(&w, buf, sizeof w);
  		iowrite16(le16_to_cpu(w), vp_dev->device + offset);
  		break;
  	case 4:
  		memcpy(&l, buf, sizeof l);
  		iowrite32(le32_to_cpu(l), vp_dev->device + offset);
  		break;
  	case 8:
  		memcpy(&l, buf, sizeof l);
  		iowrite32(le32_to_cpu(l), vp_dev->device + offset);
  		memcpy(&l, buf + sizeof l, sizeof l);
  		iowrite32(le32_to_cpu(l), vp_dev->device + offset + sizeof l);
  		break;
  	default:
  		BUG();
  	}
  }
  
  static u32 vp_generation(struct virtio_device *vdev)
  {
  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
257
  	return vp_ioread8(&vp_dev->common->config_generation);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
258
259
260
261
262
263
  }
  
  /* config->{get,set}_status() implementations */
  static u8 vp_get_status(struct virtio_device *vdev)
  {
  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
264
  	return vp_ioread8(&vp_dev->common->device_status);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
265
266
267
268
269
270
271
  }
  
  static void vp_set_status(struct virtio_device *vdev, u8 status)
  {
  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  	/* We should never be setting status to 0. */
  	BUG_ON(status == 0);
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
272
  	vp_iowrite8(status, &vp_dev->common->device_status);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
273
274
275
276
277
278
  }
  
  static void vp_reset(struct virtio_device *vdev)
  {
  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  	/* 0 status means a reset. */
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
279
  	vp_iowrite8(0, &vp_dev->common->device_status);
05dbcb430   Michael S. Tsirkin   virtio: virtio 1....
280
281
282
283
284
285
286
  	/* After writing 0 to device_status, the driver MUST wait for a read of
  	 * device_status to return 0 before reinitializing the device.
  	 * This will flush out the status write, and flush in device writes,
  	 * including MSI-X interrupts, if any.
  	 */
  	while (vp_ioread8(&vp_dev->common->device_status))
  		msleep(1);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
287
288
289
290
291
292
293
  	/* Flush pending VQ/configuration callbacks. */
  	vp_synchronize_vectors(vdev);
  }
  
  static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
  {
  	/* Setup the vector used for configuration events */
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
294
  	vp_iowrite16(vector, &vp_dev->common->msix_config);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
295
296
  	/* Verify we had enough resources to assign the vector */
  	/* Will also flush the write out to device */
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
297
  	return vp_ioread16(&vp_dev->common->msix_config);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
298
  }
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
299
  static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
0a9b3f47d   Michael S. Tsirkin   Revert "virtio_pc...
300
  				  struct virtio_pci_vq_info *info,
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
301
302
303
  				  unsigned index,
  				  void (*callback)(struct virtqueue *vq),
  				  const char *name,
f94682dde   Michael S. Tsirkin   virtio: add conte...
304
  				  bool ctx,
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
305
306
307
308
309
310
  				  u16 msix_vec)
  {
  	struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common;
  	struct virtqueue *vq;
  	u16 num, off;
  	int err;
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
311
  	if (index >= vp_ioread16(&cfg->num_queues))
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
312
313
314
  		return ERR_PTR(-ENOENT);
  
  	/* Select the queue we're interested in */
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
315
  	vp_iowrite16(index, &cfg->queue_select);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
316
317
  
  	/* Check if queue is either not available or already active. */
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
318
319
  	num = vp_ioread16(&cfg->queue_size);
  	if (!num || vp_ioread16(&cfg->queue_enable))
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
320
321
322
323
324
325
326
327
  		return ERR_PTR(-ENOENT);
  
  	if (num & (num - 1)) {
  		dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num);
  		return ERR_PTR(-EINVAL);
  	}
  
  	/* get offset of notification word for this vq */
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
328
  	off = vp_ioread16(&cfg->queue_notify_off);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
329

0a9b3f47d   Michael S. Tsirkin   Revert "virtio_pc...
330
  	info->msix_vector = msix_vec;
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
331
  	/* create the vring */
7a5589b24   Andy Lutomirski   virtio_pci: Use t...
332
333
  	vq = vring_create_virtqueue(index, num,
  				    SMP_CACHE_BYTES, &vp_dev->vdev,
f94682dde   Michael S. Tsirkin   virtio: add conte...
334
335
  				    true, true, ctx,
  				    vp_notify, callback, name);
7a5589b24   Andy Lutomirski   virtio_pci: Use t...
336
337
  	if (!vq)
  		return ERR_PTR(-ENOMEM);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
338
339
  
  	/* activate the queue */
7a5589b24   Andy Lutomirski   virtio_pci: Use t...
340
341
  	vp_iowrite16(virtqueue_get_vring_size(vq), &cfg->queue_size);
  	vp_iowrite64_twopart(virtqueue_get_desc_addr(vq),
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
342
  			     &cfg->queue_desc_lo, &cfg->queue_desc_hi);
7a5589b24   Andy Lutomirski   virtio_pci: Use t...
343
  	vp_iowrite64_twopart(virtqueue_get_avail_addr(vq),
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
344
  			     &cfg->queue_avail_lo, &cfg->queue_avail_hi);
7a5589b24   Andy Lutomirski   virtio_pci: Use t...
345
  	vp_iowrite64_twopart(virtqueue_get_used_addr(vq),
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
346
  			     &cfg->queue_used_lo, &cfg->queue_used_hi);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
347

3909213cf   Michael S. Tsirkin   virtio_pci_modern...
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
  	if (vp_dev->notify_base) {
  		/* offset should not wrap */
  		if ((u64)off * vp_dev->notify_offset_multiplier + 2
  		    > vp_dev->notify_len) {
  			dev_warn(&vp_dev->pci_dev->dev,
  				 "bad notification offset %u (x %u) "
  				 "for queue %u > %zd",
  				 off, vp_dev->notify_offset_multiplier,
  				 index, vp_dev->notify_len);
  			err = -EINVAL;
  			goto err_map_notify;
  		}
  		vq->priv = (void __force *)vp_dev->notify_base +
  			off * vp_dev->notify_offset_multiplier;
  	} else {
  		vq->priv = (void __force *)map_capability(vp_dev->pci_dev,
  					  vp_dev->notify_map_cap, 2, 2,
  					  off * vp_dev->notify_offset_multiplier, 2,
  					  NULL);
  	}
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
368
369
370
371
372
373
374
  
  	if (!vq->priv) {
  		err = -ENOMEM;
  		goto err_map_notify;
  	}
  
  	if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
375
376
  		vp_iowrite16(msix_vec, &cfg->queue_msix_vector);
  		msix_vec = vp_ioread16(&cfg->queue_msix_vector);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
377
378
379
380
381
382
383
384
385
  		if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
  			err = -EBUSY;
  			goto err_assign_vector;
  		}
  	}
  
  	return vq;
  
  err_assign_vector:
3909213cf   Michael S. Tsirkin   virtio_pci_modern...
386
387
  	if (!vp_dev->notify_base)
  		pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
388
389
  err_map_notify:
  	vring_del_virtqueue(vq);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
390
391
392
393
  	return ERR_PTR(err);
  }
  
  static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs,
f94682dde   Michael S. Tsirkin   virtio: add conte...
394
395
396
397
  			      struct virtqueue *vqs[],
  			      vq_callback_t *callbacks[],
  			      const char * const names[], const bool *ctx,
  			      struct irq_affinity *desc)
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
398
399
400
  {
  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  	struct virtqueue *vq;
f94682dde   Michael S. Tsirkin   virtio: add conte...
401
  	int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names, ctx, desc);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
402
403
404
405
406
407
408
409
  
  	if (rc)
  		return rc;
  
  	/* Select and activate all queues. Has to be done last: once we do
  	 * this, there's no way to go back except reset.
  	 */
  	list_for_each_entry(vq, &vdev->vqs, list) {
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
410
411
  		vp_iowrite16(vq->index, &vp_dev->common->queue_select);
  		vp_iowrite16(1, &vp_dev->common->queue_enable);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
412
413
414
415
  	}
  
  	return 0;
  }
0a9b3f47d   Michael S. Tsirkin   Revert "virtio_pc...
416
  static void del_vq(struct virtio_pci_vq_info *info)
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
417
  {
0a9b3f47d   Michael S. Tsirkin   Revert "virtio_pc...
418
  	struct virtqueue *vq = info->vq;
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
419
  	struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
420
  	vp_iowrite16(vq->index, &vp_dev->common->queue_select);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
421

2008c1544   Michael S. Tsirkin   Revert "virtio_pc...
422
  	if (vp_dev->msix_enabled) {
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
423
424
  		vp_iowrite16(VIRTIO_MSI_NO_VECTOR,
  			     &vp_dev->common->queue_msix_vector);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
425
  		/* Flush the write out to device */
a8557d32f   Michael S. Tsirkin   virtio_pci_modern...
426
  		vp_ioread16(&vp_dev->common->queue_msix_vector);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
427
  	}
3909213cf   Michael S. Tsirkin   virtio_pci_modern...
428
429
  	if (!vp_dev->notify_base)
  		pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
430
431
  
  	vring_del_virtqueue(vq);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
432
  }
0dd4ff93f   Sebastien Boeuf   virtio: Implement...
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
  static int virtio_pci_find_shm_cap(struct pci_dev *dev, u8 required_id,
  				   u8 *bar, u64 *offset, u64 *len)
  {
  	int pos;
  
  	for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); pos > 0;
  	     pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
  		u8 type, cap_len, id;
  		u32 tmp32;
  		u64 res_offset, res_length;
  
  		pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
  							 cfg_type), &type);
  		if (type != VIRTIO_PCI_CAP_SHARED_MEMORY_CFG)
  			continue;
  
  		pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
  							 cap_len), &cap_len);
  		if (cap_len != sizeof(struct virtio_pci_cap64)) {
  			dev_err(&dev->dev, "%s: shm cap with bad size offset:"
  				" %d size: %d
  ", __func__, pos, cap_len);
  			continue;
  		}
  
  		pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
  							 id), &id);
  		if (id != required_id)
  			continue;
  
  		/* Type, and ID match, looks good */
  		pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
  							 bar), bar);
  
  		/* Read the lower 32bit of length and offset */
  		pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
  							  offset), &tmp32);
  		res_offset = tmp32;
  		pci_read_config_dword(dev, pos + offsetof(struct virtio_pci_cap,
  							  length), &tmp32);
  		res_length = tmp32;
  
  		/* and now the top half */
  		pci_read_config_dword(dev,
  				      pos + offsetof(struct virtio_pci_cap64,
  						     offset_hi), &tmp32);
  		res_offset |= ((u64)tmp32) << 32;
  		pci_read_config_dword(dev,
  				      pos + offsetof(struct virtio_pci_cap64,
  						     length_hi), &tmp32);
  		res_length |= ((u64)tmp32) << 32;
  
  		*offset = res_offset;
  		*len = res_length;
  
  		return pos;
  	}
  	return 0;
  }
  
  static bool vp_get_shm_region(struct virtio_device *vdev,
  			      struct virtio_shm_region *region, u8 id)
  {
  	struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  	struct pci_dev *pci_dev = vp_dev->pci_dev;
  	u8 bar;
  	u64 offset, len;
  	phys_addr_t phys_addr;
  	size_t bar_len;
  
  	if (!virtio_pci_find_shm_cap(pci_dev, id, &bar, &offset, &len))
  		return false;
  
  	phys_addr = pci_resource_start(pci_dev, bar);
  	bar_len = pci_resource_len(pci_dev, bar);
  
  	if ((offset + len) < offset) {
  		dev_err(&pci_dev->dev, "%s: cap offset+len overflow detected
  ",
  			__func__);
  		return false;
  	}
  
  	if (offset + len > bar_len) {
  		dev_err(&pci_dev->dev, "%s: bar shorter than cap offset+len
  ",
  			__func__);
  		return false;
  	}
  
  	region->len = len;
  	region->addr = (u64) phys_addr + offset;
  
  	return true;
  }
d3f5f0656   Michael S. Tsirkin   virtio_pci_modern...
528
529
530
531
532
533
534
535
536
537
538
539
540
  static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
  	.get		= NULL,
  	.set		= NULL,
  	.generation	= vp_generation,
  	.get_status	= vp_get_status,
  	.set_status	= vp_set_status,
  	.reset		= vp_reset,
  	.find_vqs	= vp_modern_find_vqs,
  	.del_vqs	= vp_del_vqs,
  	.get_features	= vp_get_features,
  	.finalize_features = vp_finalize_features,
  	.bus_name	= vp_bus_name,
  	.set_vq_affinity = vp_set_vq_affinity,
bbaba4795   Christoph Hellwig   virtio: provide a...
541
  	.get_vq_affinity = vp_get_vq_affinity,
0dd4ff93f   Sebastien Boeuf   virtio: Implement...
542
  	.get_shm_region  = vp_get_shm_region,
d3f5f0656   Michael S. Tsirkin   virtio_pci_modern...
543
  };
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
544
545
546
547
548
549
550
551
552
553
554
555
556
  static const struct virtio_config_ops virtio_pci_config_ops = {
  	.get		= vp_get,
  	.set		= vp_set,
  	.generation	= vp_generation,
  	.get_status	= vp_get_status,
  	.set_status	= vp_set_status,
  	.reset		= vp_reset,
  	.find_vqs	= vp_modern_find_vqs,
  	.del_vqs	= vp_del_vqs,
  	.get_features	= vp_get_features,
  	.finalize_features = vp_finalize_features,
  	.bus_name	= vp_bus_name,
  	.set_vq_affinity = vp_set_vq_affinity,
bbaba4795   Christoph Hellwig   virtio: provide a...
557
  	.get_vq_affinity = vp_get_vq_affinity,
0dd4ff93f   Sebastien Boeuf   virtio: Implement...
558
  	.get_shm_region  = vp_get_shm_region,
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
559
560
561
562
563
564
565
  };
  
  /**
   * virtio_pci_find_capability - walk capabilities to find device info.
   * @dev: the pci device
   * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
   * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
46af9adef   Liao Pingfang   virtio_pci_modern...
566
   * @bars: the bitmask of BARs
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
567
568
569
570
   *
   * Returns offset of the capability, or 0.
   */
  static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
59a5b0f7b   Gerd Hoffmann   virtio-pci: alloc...
571
  					     u32 ioresource_types, int *bars)
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
  {
  	int pos;
  
  	for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
  	     pos > 0;
  	     pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
  		u8 type, bar;
  		pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
  							 cfg_type),
  				     &type);
  		pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
  							 bar),
  				     &bar);
  
  		/* Ignore structures with reserved BAR values */
  		if (bar > 0x5)
  			continue;
  
  		if (type == cfg_type) {
  			if (pci_resource_len(dev, bar) &&
59a5b0f7b   Gerd Hoffmann   virtio-pci: alloc...
592
593
  			    pci_resource_flags(dev, bar) & ioresource_types) {
  				*bars |= (1 << bar);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
594
  				return pos;
59a5b0f7b   Gerd Hoffmann   virtio-pci: alloc...
595
  			}
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
596
597
598
599
  		}
  	}
  	return 0;
  }
89461c4a1   Rusty Russell   virtio_pci: macro...
600
  /* This is part of the ABI.  Don't screw with it. */
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
601
602
  static inline void check_offsets(void)
  {
89461c4a1   Rusty Russell   virtio_pci: macro...
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
  	/* Note: disk space was harmed in compilation of this function. */
  	BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
  		     offsetof(struct virtio_pci_cap, cap_vndr));
  	BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
  		     offsetof(struct virtio_pci_cap, cap_next));
  	BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
  		     offsetof(struct virtio_pci_cap, cap_len));
  	BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
  		     offsetof(struct virtio_pci_cap, cfg_type));
  	BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
  		     offsetof(struct virtio_pci_cap, bar));
  	BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
  		     offsetof(struct virtio_pci_cap, offset));
  	BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
  		     offsetof(struct virtio_pci_cap, length));
  	BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
  		     offsetof(struct virtio_pci_notify_cap,
  			      notify_off_multiplier));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
  		     offsetof(struct virtio_pci_common_cfg,
  			      device_feature_select));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
  		     offsetof(struct virtio_pci_common_cfg, device_feature));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
  		     offsetof(struct virtio_pci_common_cfg,
  			      guest_feature_select));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
  		     offsetof(struct virtio_pci_common_cfg, guest_feature));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
  		     offsetof(struct virtio_pci_common_cfg, msix_config));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
  		     offsetof(struct virtio_pci_common_cfg, num_queues));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
  		     offsetof(struct virtio_pci_common_cfg, device_status));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
  		     offsetof(struct virtio_pci_common_cfg, config_generation));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
  		     offsetof(struct virtio_pci_common_cfg, queue_select));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
  		     offsetof(struct virtio_pci_common_cfg, queue_size));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
  		     offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
  		     offsetof(struct virtio_pci_common_cfg, queue_enable));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
  		     offsetof(struct virtio_pci_common_cfg, queue_notify_off));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
  		     offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
  		     offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
  		     offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
  		     offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
  		     offsetof(struct virtio_pci_common_cfg, queue_used_lo));
  	BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
  		     offsetof(struct virtio_pci_common_cfg, queue_used_hi));
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
661
662
663
664
665
666
667
668
  }
  
  /* the PCI probing function */
  int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
  {
  	struct pci_dev *pci_dev = vp_dev->pci_dev;
  	int err, common, isr, notify, device;
  	u32 notify_length;
3909213cf   Michael S. Tsirkin   virtio_pci_modern...
669
  	u32 notify_offset;
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
  
  	check_offsets();
  
  	/* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
  	if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
  		return -ENODEV;
  
  	if (pci_dev->device < 0x1040) {
  		/* Transitional devices: use the PCI subsystem device id as
  		 * virtio device id, same as legacy driver always did.
  		 */
  		vp_dev->vdev.id.device = pci_dev->subsystem_device;
  	} else {
  		/* Modern devices: simply use PCI device id, but start from 0x1040. */
  		vp_dev->vdev.id.device = pci_dev->device - 0x1040;
  	}
  	vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
687
688
  	/* check for a common config: if not, use legacy mode (bar 0). */
  	common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
59a5b0f7b   Gerd Hoffmann   virtio-pci: alloc...
689
690
  					    IORESOURCE_IO | IORESOURCE_MEM,
  					    &vp_dev->modern_bars);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
691
692
693
694
695
696
697
698
699
  	if (!common) {
  		dev_info(&pci_dev->dev,
  			 "virtio_pci: leaving for legacy driver
  ");
  		return -ENODEV;
  	}
  
  	/* If common is there, these should be too... */
  	isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
59a5b0f7b   Gerd Hoffmann   virtio-pci: alloc...
700
701
  					 IORESOURCE_IO | IORESOURCE_MEM,
  					 &vp_dev->modern_bars);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
702
  	notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
59a5b0f7b   Gerd Hoffmann   virtio-pci: alloc...
703
704
  					    IORESOURCE_IO | IORESOURCE_MEM,
  					    &vp_dev->modern_bars);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
705
706
707
708
709
710
711
  	if (!isr || !notify) {
  		dev_err(&pci_dev->dev,
  			"virtio_pci: missing capabilities %i/%i/%i
  ",
  			common, isr, notify);
  		return -EINVAL;
  	}
7a5589b24   Andy Lutomirski   virtio_pci: Use t...
712
713
714
715
716
717
718
  	err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
  	if (err)
  		err = dma_set_mask_and_coherent(&pci_dev->dev,
  						DMA_BIT_MASK(32));
  	if (err)
  		dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA.  Trying to continue, but this might not work.
  ");
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
719
720
721
722
  	/* Device capability is only mandatory for devices that have
  	 * device-specific configuration.
  	 */
  	device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
59a5b0f7b   Gerd Hoffmann   virtio-pci: alloc...
723
724
725
726
727
728
729
  					    IORESOURCE_IO | IORESOURCE_MEM,
  					    &vp_dev->modern_bars);
  
  	err = pci_request_selected_regions(pci_dev, vp_dev->modern_bars,
  					   "virtio-pci-modern");
  	if (err)
  		return err;
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
  
  	err = -EINVAL;
  	vp_dev->common = map_capability(pci_dev, common,
  					sizeof(struct virtio_pci_common_cfg), 4,
  					0, sizeof(struct virtio_pci_common_cfg),
  					NULL);
  	if (!vp_dev->common)
  		goto err_map_common;
  	vp_dev->isr = map_capability(pci_dev, isr, sizeof(u8), 1,
  				     0, 1,
  				     NULL);
  	if (!vp_dev->isr)
  		goto err_map_isr;
  
  	/* Read notify_off_multiplier from config space. */
  	pci_read_config_dword(pci_dev,
  			      notify + offsetof(struct virtio_pci_notify_cap,
  						notify_off_multiplier),
  			      &vp_dev->notify_offset_multiplier);
3909213cf   Michael S. Tsirkin   virtio_pci_modern...
749
  	/* Read notify length and offset from config space. */
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
750
751
752
753
  	pci_read_config_dword(pci_dev,
  			      notify + offsetof(struct virtio_pci_notify_cap,
  						cap.length),
  			      &notify_length);
3909213cf   Michael S. Tsirkin   virtio_pci_modern...
754
755
  	pci_read_config_dword(pci_dev,
  			      notify + offsetof(struct virtio_pci_notify_cap,
4e94ebdd0   Ladi Prosek   virtio-pci: read ...
756
  						cap.offset),
3909213cf   Michael S. Tsirkin   virtio_pci_modern...
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
  			      &notify_offset);
  
  	/* We don't know how many VQs we'll map, ahead of the time.
  	 * If notify length is small, map it all now.
  	 * Otherwise, map each VQ individually later.
  	 */
  	if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
  		vp_dev->notify_base = map_capability(pci_dev, notify, 2, 2,
  						     0, notify_length,
  						     &vp_dev->notify_len);
  		if (!vp_dev->notify_base)
  			goto err_map_notify;
  	} else {
  		vp_dev->notify_map_cap = notify;
  	}
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
772
773
774
775
776
777
778
779
780
781
  
  	/* Again, we don't know how much we should map, but PAGE_SIZE
  	 * is more than enough for all existing devices.
  	 */
  	if (device) {
  		vp_dev->device = map_capability(pci_dev, device, 0, 4,
  						0, PAGE_SIZE,
  						&vp_dev->device_len);
  		if (!vp_dev->device)
  			goto err_map_device;
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
782

d3f5f0656   Michael S. Tsirkin   virtio_pci_modern...
783
784
785
786
  		vp_dev->vdev.config = &virtio_pci_config_ops;
  	} else {
  		vp_dev->vdev.config = &virtio_pci_config_nodev_ops;
  	}
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
787
788
789
790
791
792
793
794
  
  	vp_dev->config_vector = vp_config_vector;
  	vp_dev->setup_vq = setup_vq;
  	vp_dev->del_vq = del_vq;
  
  	return 0;
  
  err_map_device:
3909213cf   Michael S. Tsirkin   virtio_pci_modern...
795
796
797
  	if (vp_dev->notify_base)
  		pci_iounmap(pci_dev, vp_dev->notify_base);
  err_map_notify:
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
798
799
800
801
802
803
804
805
806
807
808
809
810
  	pci_iounmap(pci_dev, vp_dev->isr);
  err_map_isr:
  	pci_iounmap(pci_dev, vp_dev->common);
  err_map_common:
  	return err;
  }
  
  void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
  {
  	struct pci_dev *pci_dev = vp_dev->pci_dev;
  
  	if (vp_dev->device)
  		pci_iounmap(pci_dev, vp_dev->device);
3909213cf   Michael S. Tsirkin   virtio_pci_modern...
811
812
  	if (vp_dev->notify_base)
  		pci_iounmap(pci_dev, vp_dev->notify_base);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
813
814
  	pci_iounmap(pci_dev, vp_dev->isr);
  	pci_iounmap(pci_dev, vp_dev->common);
59a5b0f7b   Gerd Hoffmann   virtio-pci: alloc...
815
  	pci_release_selected_regions(pci_dev, vp_dev->modern_bars);
1fcf0512c   Michael S. Tsirkin   virtio_pci: moder...
816
  }