Blame view

drivers/virtio/virtio_ring.c 33.2 KB
0a8a69dd7   Rusty Russell   Virtio helper rou...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
  /* Virtio ring implementation.
   *
   *  Copyright 2007 Rusty Russell IBM Corporation
   *
   *  This program is free software; you can redistribute it and/or modify
   *  it under the terms of the GNU General Public License as published by
   *  the Free Software Foundation; either version 2 of the License, or
   *  (at your option) any later version.
   *
   *  This program is distributed in the hope that it will be useful,
   *  but WITHOUT ANY WARRANTY; without even the implied warranty of
   *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   *  GNU General Public License for more details.
   *
   *  You should have received a copy of the GNU General Public License
   *  along with this program; if not, write to the Free Software
   *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
   */
  #include <linux/virtio.h>
  #include <linux/virtio_ring.h>
e34f87256   Rusty Russell   virtio: Add trans...
21
  #include <linux/virtio_config.h>
0a8a69dd7   Rusty Russell   Virtio helper rou...
22
  #include <linux/device.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
23
  #include <linux/slab.h>
b5a2c4f19   Paul Gortmaker   virtio: Add modul...
24
  #include <linux/module.h>
e93300b1a   Rusty Russell   virtio: add debug...
25
  #include <linux/hrtimer.h>
6abb2dd92   Joel Stanley   tools/virtio: fix...
26
  #include <linux/kmemleak.h>
780bc7903   Andy Lutomirski   virtio_ring: Supp...
27
  #include <linux/dma-mapping.h>
78fe39872   Andy Lutomirski   vring: Use the DM...
28
  #include <xen/xen.h>
0a8a69dd7   Rusty Russell   Virtio helper rou...
29
30
31
  
  #ifdef DEBUG
  /* For development, we want to crash whenever the ring is screwed. */
9499f5e7e   Rusty Russell   virtio: add names...
32
33
34
35
36
37
  #define BAD_RING(_vq, fmt, args...)				\
  	do {							\
  		dev_err(&(_vq)->vq.vdev->dev,			\
  			"%s:"fmt, (_vq)->vq.name, ##args);	\
  		BUG();						\
  	} while (0)
c5f841f17   Rusty Russell   virtio: more neat...
38
39
40
41
  /* Caller is supposed to guarantee no reentry. */
  #define START_USE(_vq)						\
  	do {							\
  		if ((_vq)->in_use)				\
9499f5e7e   Rusty Russell   virtio: add names...
42
43
44
  			panic("%s:in_use = %i
  ",		\
  			      (_vq)->vq.name, (_vq)->in_use);	\
c5f841f17   Rusty Russell   virtio: more neat...
45
  		(_vq)->in_use = __LINE__;			\
9499f5e7e   Rusty Russell   virtio: add names...
46
  	} while (0)
3a35ce7dc   Roel Kluin   virtio: fix BAD_R...
47
  #define END_USE(_vq) \
97a545ab6   Rusty Russell   virtio: remove bo...
48
  	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
0a8a69dd7   Rusty Russell   Virtio helper rou...
49
  #else
9499f5e7e   Rusty Russell   virtio: add names...
50
51
52
53
54
55
  #define BAD_RING(_vq, fmt, args...)				\
  	do {							\
  		dev_err(&_vq->vq.vdev->dev,			\
  			"%s:"fmt, (_vq)->vq.name, ##args);	\
  		(_vq)->broken = true;				\
  	} while (0)
0a8a69dd7   Rusty Russell   Virtio helper rou...
56
57
58
  #define START_USE(vq)
  #define END_USE(vq)
  #endif
780bc7903   Andy Lutomirski   virtio_ring: Supp...
59
60
61
62
  struct vring_desc_state {
  	void *data;			/* Data for callback. */
  	struct vring_desc *indir_desc;	/* Indirect descriptor, if any. */
  };
43b4f721c   Michael S. Tsirkin   virtio_ring: codi...
63
  struct vring_virtqueue {
0a8a69dd7   Rusty Russell   Virtio helper rou...
64
65
66
67
  	struct virtqueue vq;
  
  	/* Actual memory layout for this queue */
  	struct vring vring;
7b21e34fd   Rusty Russell   virtio: harsher b...
68
69
  	/* Can we use weak barriers? */
  	bool weak_barriers;
0a8a69dd7   Rusty Russell   Virtio helper rou...
70
71
  	/* Other side has made a mess, don't try any more. */
  	bool broken;
9fa29b9df   Mark McLoughlin   virtio: indirect ...
72
73
  	/* Host supports indirect buffers */
  	bool indirect;
a5c262c5f   Michael S. Tsirkin   virtio_ring: supp...
74
75
  	/* Host publishes avail event idx */
  	bool event;
0a8a69dd7   Rusty Russell   Virtio helper rou...
76
77
78
79
80
81
  	/* Head of free buffer list. */
  	unsigned int free_head;
  	/* Number we've added since last sync. */
  	unsigned int num_added;
  
  	/* Last used index we've seen. */
1bc4953ed   Anthony Liguori   virtio: Fix used_...
82
  	u16 last_used_idx;
0a8a69dd7   Rusty Russell   Virtio helper rou...
83

f277ec42f   Venkatesh Srinivas   virtio_ring: shad...
84
85
86
87
88
  	/* Last written value to avail->flags */
  	u16 avail_flags_shadow;
  
  	/* Last written value to avail->idx in guest byte order */
  	u16 avail_idx_shadow;
0a8a69dd7   Rusty Russell   Virtio helper rou...
89
  	/* How to notify other side. FIXME: commonalize hcalls! */
46f9c2b92   Heinz Graalfs   virtio_ring: chan...
90
  	bool (*notify)(struct virtqueue *vq);
0a8a69dd7   Rusty Russell   Virtio helper rou...
91

2a2d1382f   Andy Lutomirski   virtio: Add impro...
92
93
94
95
  	/* DMA, allocation, and size information */
  	bool we_own_ring;
  	size_t queue_size_in_bytes;
  	dma_addr_t queue_dma_addr;
0a8a69dd7   Rusty Russell   Virtio helper rou...
96
97
98
  #ifdef DEBUG
  	/* They're supposed to lock for us. */
  	unsigned int in_use;
e93300b1a   Rusty Russell   virtio: add debug...
99
100
101
102
  
  	/* Figure out if their kicks are too delayed. */
  	bool last_add_time_valid;
  	ktime_t last_add_time;
0a8a69dd7   Rusty Russell   Virtio helper rou...
103
  #endif
780bc7903   Andy Lutomirski   virtio_ring: Supp...
104
105
  	/* Per-descriptor state. */
  	struct vring_desc_state desc_state[];
0a8a69dd7   Rusty Russell   Virtio helper rou...
106
107
108
  };
  
  #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
d26c96c81   Andy Lutomirski   vring: Introduce ...
109
  /*
1a9376939   Michael S. Tsirkin   virtio: new featu...
110
111
112
113
   * Modern virtio devices have feature bits to specify whether they need a
   * quirk and bypass the IOMMU. If not there, just use the DMA API.
   *
   * If there, the interaction between virtio and DMA API is messy.
d26c96c81   Andy Lutomirski   vring: Introduce ...
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
   *
   * On most systems with virtio, physical addresses match bus addresses,
   * and it doesn't particularly matter whether we use the DMA API.
   *
   * On some systems, including Xen and any system with a physical device
   * that speaks virtio behind a physical IOMMU, we must use the DMA API
   * for virtio DMA to work at all.
   *
   * On other systems, including SPARC and PPC64, virtio-pci devices are
   * enumerated as though they are behind an IOMMU, but the virtio host
   * ignores the IOMMU, so we must either pretend that the IOMMU isn't
   * there or somehow map everything as the identity.
   *
   * For the time being, we preserve historic behavior and bypass the DMA
   * API.
1a9376939   Michael S. Tsirkin   virtio: new featu...
129
130
131
132
   *
   * TODO: install a per-device DMA ops structure that does the right thing
   * taking into account all the above quirks, and use the DMA API
   * unconditionally on data path.
d26c96c81   Andy Lutomirski   vring: Introduce ...
133
134
135
136
   */
  
  static bool vring_use_dma_api(struct virtio_device *vdev)
  {
1a9376939   Michael S. Tsirkin   virtio: new featu...
137
138
139
140
  	if (!virtio_has_iommu_quirk(vdev))
  		return true;
  
  	/* Otherwise, we are left to guess. */
78fe39872   Andy Lutomirski   vring: Use the DM...
141
142
143
144
145
146
147
148
149
150
  	/*
  	 * In theory, it's possible to have a buggy QEMU-supposed
  	 * emulated Q35 IOMMU and Xen enabled at the same time.  On
  	 * such a configuration, virtio has never worked and will
  	 * not work without an even larger kludge.  Instead, enable
  	 * the DMA API if we're a Xen guest, which at least allows
  	 * all of the sensible Xen configurations to work correctly.
  	 */
  	if (xen_domain())
  		return true;
d26c96c81   Andy Lutomirski   vring: Introduce ...
151
152
  	return false;
  }
780bc7903   Andy Lutomirski   virtio_ring: Supp...
153
154
155
156
157
  /*
   * The DMA ops on various arches are rather gnarly right now, and
   * making all of the arch DMA ops work on the vring device itself
   * is a mess.  For now, we use the parent device for DMA ops.
   */
75bfa81bf   Michael S. Tsirkin   virtio_ring: mark...
158
  static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
780bc7903   Andy Lutomirski   virtio_ring: Supp...
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
  {
  	return vq->vq.vdev->dev.parent;
  }
  
  /* Map one sg entry. */
  static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
  				   struct scatterlist *sg,
  				   enum dma_data_direction direction)
  {
  	if (!vring_use_dma_api(vq->vq.vdev))
  		return (dma_addr_t)sg_phys(sg);
  
  	/*
  	 * We can't use dma_map_sg, because we don't use scatterlists in
  	 * the way it expects (we don't guarantee that the scatterlist
  	 * will exist for the lifetime of the mapping).
  	 */
  	return dma_map_page(vring_dma_dev(vq),
  			    sg_page(sg), sg->offset, sg->length,
  			    direction);
  }
  
  static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
  				   void *cpu_addr, size_t size,
  				   enum dma_data_direction direction)
  {
  	if (!vring_use_dma_api(vq->vq.vdev))
  		return (dma_addr_t)virt_to_phys(cpu_addr);
  
  	return dma_map_single(vring_dma_dev(vq),
  			      cpu_addr, size, direction);
  }
  
  static void vring_unmap_one(const struct vring_virtqueue *vq,
  			    struct vring_desc *desc)
  {
  	u16 flags;
  
  	if (!vring_use_dma_api(vq->vq.vdev))
  		return;
  
  	flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
  
  	if (flags & VRING_DESC_F_INDIRECT) {
  		dma_unmap_single(vring_dma_dev(vq),
  				 virtio64_to_cpu(vq->vq.vdev, desc->addr),
  				 virtio32_to_cpu(vq->vq.vdev, desc->len),
  				 (flags & VRING_DESC_F_WRITE) ?
  				 DMA_FROM_DEVICE : DMA_TO_DEVICE);
  	} else {
  		dma_unmap_page(vring_dma_dev(vq),
  			       virtio64_to_cpu(vq->vq.vdev, desc->addr),
  			       virtio32_to_cpu(vq->vq.vdev, desc->len),
  			       (flags & VRING_DESC_F_WRITE) ?
  			       DMA_FROM_DEVICE : DMA_TO_DEVICE);
  	}
  }
  
  static int vring_mapping_error(const struct vring_virtqueue *vq,
  			       dma_addr_t addr)
  {
  	if (!vring_use_dma_api(vq->vq.vdev))
  		return 0;
  
  	return dma_mapping_error(vring_dma_dev(vq), addr);
  }
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
225
226
  static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
  					 unsigned int total_sg, gfp_t gfp)
9fa29b9df   Mark McLoughlin   virtio: indirect ...
227
228
  {
  	struct vring_desc *desc;
b25bd2515   Rusty Russell   virtio_ring: unif...
229
  	unsigned int i;
9fa29b9df   Mark McLoughlin   virtio: indirect ...
230

b92b1b89a   Will Deacon   virtio: force vri...
231
232
233
234
235
  	/*
  	 * We require lowmem mappings for the descriptors because
  	 * otherwise virt_to_phys will give us bogus addresses in the
  	 * virtqueue.
  	 */
82107539b   Michal Hocko   virtio: Do not dr...
236
  	gfp &= ~__GFP_HIGHMEM;
b92b1b89a   Will Deacon   virtio: force vri...
237

13816c768   Rusty Russell   virtio_ring: virt...
238
  	desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
9fa29b9df   Mark McLoughlin   virtio: indirect ...
239
  	if (!desc)
b25bd2515   Rusty Russell   virtio_ring: unif...
240
  		return NULL;
9fa29b9df   Mark McLoughlin   virtio: indirect ...
241

b25bd2515   Rusty Russell   virtio_ring: unif...
242
  	for (i = 0; i < total_sg; i++)
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
243
  		desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
b25bd2515   Rusty Russell   virtio_ring: unif...
244
  	return desc;
9fa29b9df   Mark McLoughlin   virtio: indirect ...
245
  }
13816c768   Rusty Russell   virtio_ring: virt...
246
247
  static inline int virtqueue_add(struct virtqueue *_vq,
  				struct scatterlist *sgs[],
eeebf9b1f   Rusty Russell   virtio_ring: assu...
248
  				unsigned int total_sg,
13816c768   Rusty Russell   virtio_ring: virt...
249
250
251
252
  				unsigned int out_sgs,
  				unsigned int in_sgs,
  				void *data,
  				gfp_t gfp)
0a8a69dd7   Rusty Russell   Virtio helper rou...
253
254
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
13816c768   Rusty Russell   virtio_ring: virt...
255
  	struct scatterlist *sg;
b25bd2515   Rusty Russell   virtio_ring: unif...
256
  	struct vring_desc *desc;
780bc7903   Andy Lutomirski   virtio_ring: Supp...
257
  	unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
1fe9b6fef   Michael S. Tsirkin   virtio: fix oops ...
258
  	int head;
b25bd2515   Rusty Russell   virtio_ring: unif...
259
  	bool indirect;
0a8a69dd7   Rusty Russell   Virtio helper rou...
260

9fa29b9df   Mark McLoughlin   virtio: indirect ...
261
  	START_USE(vq);
0a8a69dd7   Rusty Russell   Virtio helper rou...
262
  	BUG_ON(data == NULL);
9fa29b9df   Mark McLoughlin   virtio: indirect ...
263

70670444c   Rusty Russell   virtio: fail addi...
264
265
266
267
  	if (unlikely(vq->broken)) {
  		END_USE(vq);
  		return -EIO;
  	}
e93300b1a   Rusty Russell   virtio: add debug...
268
269
270
271
272
273
274
275
276
277
278
279
  #ifdef DEBUG
  	{
  		ktime_t now = ktime_get();
  
  		/* No kick or get, with .1 second between?  Warn. */
  		if (vq->last_add_time_valid)
  			WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
  					    > 100);
  		vq->last_add_time = now;
  		vq->last_add_time_valid = true;
  	}
  #endif
b25bd2515   Rusty Russell   virtio_ring: unif...
280
281
282
283
  	BUG_ON(total_sg > vq->vring.num);
  	BUG_ON(total_sg == 0);
  
  	head = vq->free_head;
9fa29b9df   Mark McLoughlin   virtio: indirect ...
284
285
  	/* If the host supports indirect descriptor tables, and we have multiple
  	 * buffers, then go indirect. FIXME: tune this threshold */
b25bd2515   Rusty Russell   virtio_ring: unif...
286
  	if (vq->indirect && total_sg > 1 && vq->vq.num_free)
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
287
  		desc = alloc_indirect(_vq, total_sg, gfp);
b25bd2515   Rusty Russell   virtio_ring: unif...
288
289
290
291
292
  	else
  		desc = NULL;
  
  	if (desc) {
  		/* Use a single buffer which doesn't continue */
780bc7903   Andy Lutomirski   virtio_ring: Supp...
293
  		indirect = true;
b25bd2515   Rusty Russell   virtio_ring: unif...
294
295
296
  		/* Set up rest to use this indirect table. */
  		i = 0;
  		descs_used = 1;
b25bd2515   Rusty Russell   virtio_ring: unif...
297
  	} else {
780bc7903   Andy Lutomirski   virtio_ring: Supp...
298
  		indirect = false;
b25bd2515   Rusty Russell   virtio_ring: unif...
299
300
301
  		desc = vq->vring.desc;
  		i = head;
  		descs_used = total_sg;
9fa29b9df   Mark McLoughlin   virtio: indirect ...
302
  	}
b25bd2515   Rusty Russell   virtio_ring: unif...
303
  	if (vq->vq.num_free < descs_used) {
0a8a69dd7   Rusty Russell   Virtio helper rou...
304
305
  		pr_debug("Can't add buf len %i - avail = %i
  ",
b25bd2515   Rusty Russell   virtio_ring: unif...
306
  			 descs_used, vq->vq.num_free);
44653eae1   Rusty Russell   virtio: don't alw...
307
308
309
  		/* FIXME: for historical reasons, we force a notify here if
  		 * there are outgoing parts to the buffer.  Presumably the
  		 * host should service the ring ASAP. */
13816c768   Rusty Russell   virtio_ring: virt...
310
  		if (out_sgs)
44653eae1   Rusty Russell   virtio: don't alw...
311
  			vq->notify(&vq->vq);
58625edf9   Wei Yongjun   virtio: fix memor...
312
313
  		if (indirect)
  			kfree(desc);
0a8a69dd7   Rusty Russell   Virtio helper rou...
314
315
316
  		END_USE(vq);
  		return -ENOSPC;
  	}
13816c768   Rusty Russell   virtio_ring: virt...
317
  	for (n = 0; n < out_sgs; n++) {
eeebf9b1f   Rusty Russell   virtio_ring: assu...
318
  		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
780bc7903   Andy Lutomirski   virtio_ring: Supp...
319
320
321
  			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
  			if (vring_mapping_error(vq, addr))
  				goto unmap_release;
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
322
  			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
780bc7903   Andy Lutomirski   virtio_ring: Supp...
323
  			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
324
  			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
13816c768   Rusty Russell   virtio_ring: virt...
325
  			prev = i;
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
326
  			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
13816c768   Rusty Russell   virtio_ring: virt...
327
  		}
0a8a69dd7   Rusty Russell   Virtio helper rou...
328
  	}
13816c768   Rusty Russell   virtio_ring: virt...
329
  	for (; n < (out_sgs + in_sgs); n++) {
eeebf9b1f   Rusty Russell   virtio_ring: assu...
330
  		for (sg = sgs[n]; sg; sg = sg_next(sg)) {
780bc7903   Andy Lutomirski   virtio_ring: Supp...
331
332
333
  			dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
  			if (vring_mapping_error(vq, addr))
  				goto unmap_release;
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
334
  			desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
780bc7903   Andy Lutomirski   virtio_ring: Supp...
335
  			desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
336
  			desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
13816c768   Rusty Russell   virtio_ring: virt...
337
  			prev = i;
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
338
  			i = virtio16_to_cpu(_vq->vdev, desc[i].next);
13816c768   Rusty Russell   virtio_ring: virt...
339
  		}
0a8a69dd7   Rusty Russell   Virtio helper rou...
340
341
  	}
  	/* Last one doesn't continue. */
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
342
  	desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
0a8a69dd7   Rusty Russell   Virtio helper rou...
343

780bc7903   Andy Lutomirski   virtio_ring: Supp...
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
  	if (indirect) {
  		/* Now that the indirect table is filled in, map it. */
  		dma_addr_t addr = vring_map_single(
  			vq, desc, total_sg * sizeof(struct vring_desc),
  			DMA_TO_DEVICE);
  		if (vring_mapping_error(vq, addr))
  			goto unmap_release;
  
  		vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
  		vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr);
  
  		vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
  	}
  
  	/* We're using some buffers from the free list. */
  	vq->vq.num_free -= descs_used;
0a8a69dd7   Rusty Russell   Virtio helper rou...
360
  	/* Update free pointer */
b25bd2515   Rusty Russell   virtio_ring: unif...
361
  	if (indirect)
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
362
  		vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
b25bd2515   Rusty Russell   virtio_ring: unif...
363
364
  	else
  		vq->free_head = i;
0a8a69dd7   Rusty Russell   Virtio helper rou...
365

780bc7903   Andy Lutomirski   virtio_ring: Supp...
366
367
368
369
  	/* Store token and indirect buffer state. */
  	vq->desc_state[head].data = data;
  	if (indirect)
  		vq->desc_state[head].indir_desc = desc;
0a8a69dd7   Rusty Russell   Virtio helper rou...
370
371
  
  	/* Put entry in available array (but don't update avail->idx until they
3b720b8c8   Rusty Russell   virtio: avoid mod...
372
  	 * do sync). */
f277ec42f   Venkatesh Srinivas   virtio_ring: shad...
373
  	avail = vq->avail_idx_shadow & (vq->vring.num - 1);
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
374
  	vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
0a8a69dd7   Rusty Russell   Virtio helper rou...
375

ee7cd8981   Rusty Russell   virtio: expose ad...
376
377
  	/* Descriptors and available array need to be set before we expose the
  	 * new available array entries. */
a9a0fef77   Rusty Russell   virtio_ring: expo...
378
  	virtio_wmb(vq->weak_barriers);
f277ec42f   Venkatesh Srinivas   virtio_ring: shad...
379
380
  	vq->avail_idx_shadow++;
  	vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
ee7cd8981   Rusty Russell   virtio: expose ad...
381
  	vq->num_added++;
5e05bf583   Tetsuo Handa   virtio: Avoid pos...
382
383
384
  	pr_debug("Added buffer head %i to %p
  ", head, vq);
  	END_USE(vq);
ee7cd8981   Rusty Russell   virtio: expose ad...
385
386
387
388
  	/* This is very unlikely, but theoretically possible.  Kick
  	 * just in case. */
  	if (unlikely(vq->num_added == (1 << 16) - 1))
  		virtqueue_kick(_vq);
98e8c6bc6   Rusty Russell   virtio: make virt...
389
  	return 0;
780bc7903   Andy Lutomirski   virtio_ring: Supp...
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
  
  unmap_release:
  	err_idx = i;
  	i = head;
  
  	for (n = 0; n < total_sg; n++) {
  		if (i == err_idx)
  			break;
  		vring_unmap_one(vq, &desc[i]);
  		i = vq->vring.desc[i].next;
  	}
  
  	vq->vq.num_free += total_sg;
  
  	if (indirect)
  		kfree(desc);
3cc36f6e3   Michael S. Tsirkin   virtio: fix error...
406
  	END_USE(vq);
780bc7903   Andy Lutomirski   virtio_ring: Supp...
407
  	return -EIO;
0a8a69dd7   Rusty Russell   Virtio helper rou...
408
  }
13816c768   Rusty Russell   virtio_ring: virt...
409
410
  
  /**
13816c768   Rusty Russell   virtio_ring: virt...
411
412
413
414
415
416
417
418
419
420
421
   * virtqueue_add_sgs - expose buffers to other end
   * @vq: the struct virtqueue we're talking about.
   * @sgs: array of terminated scatterlists.
   * @out_num: the number of scatterlists readable by other side
   * @in_num: the number of scatterlists which are writable (after readable ones)
   * @data: the token identifying the buffer.
   * @gfp: how to do memory allocations (if necessary).
   *
   * Caller must ensure we don't call this with other virtqueue operations
   * at the same time (except where noted).
   *
70670444c   Rusty Russell   virtio: fail addi...
422
   * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
13816c768   Rusty Russell   virtio_ring: virt...
423
424
425
426
427
428
429
430
   */
  int virtqueue_add_sgs(struct virtqueue *_vq,
  		      struct scatterlist *sgs[],
  		      unsigned int out_sgs,
  		      unsigned int in_sgs,
  		      void *data,
  		      gfp_t gfp)
  {
eeebf9b1f   Rusty Russell   virtio_ring: assu...
431
  	unsigned int i, total_sg = 0;
13816c768   Rusty Russell   virtio_ring: virt...
432
433
  
  	/* Count them first. */
eeebf9b1f   Rusty Russell   virtio_ring: assu...
434
  	for (i = 0; i < out_sgs + in_sgs; i++) {
13816c768   Rusty Russell   virtio_ring: virt...
435
436
  		struct scatterlist *sg;
  		for (sg = sgs[i]; sg; sg = sg_next(sg))
eeebf9b1f   Rusty Russell   virtio_ring: assu...
437
  			total_sg++;
13816c768   Rusty Russell   virtio_ring: virt...
438
  	}
eeebf9b1f   Rusty Russell   virtio_ring: assu...
439
  	return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp);
13816c768   Rusty Russell   virtio_ring: virt...
440
441
442
443
  }
  EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
  
  /**
282edb364   Rusty Russell   virtio_ring: virt...
444
445
   * virtqueue_add_outbuf - expose output buffers to other end
   * @vq: the struct virtqueue we're talking about.
eeebf9b1f   Rusty Russell   virtio_ring: assu...
446
447
   * @sg: scatterlist (must be well-formed and terminated!)
   * @num: the number of entries in @sg readable by other side
282edb364   Rusty Russell   virtio_ring: virt...
448
449
450
451
452
453
   * @data: the token identifying the buffer.
   * @gfp: how to do memory allocations (if necessary).
   *
   * Caller must ensure we don't call this with other virtqueue operations
   * at the same time (except where noted).
   *
70670444c   Rusty Russell   virtio: fail addi...
454
   * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
282edb364   Rusty Russell   virtio_ring: virt...
455
456
   */
  int virtqueue_add_outbuf(struct virtqueue *vq,
eeebf9b1f   Rusty Russell   virtio_ring: assu...
457
  			 struct scatterlist *sg, unsigned int num,
282edb364   Rusty Russell   virtio_ring: virt...
458
459
460
  			 void *data,
  			 gfp_t gfp)
  {
eeebf9b1f   Rusty Russell   virtio_ring: assu...
461
  	return virtqueue_add(vq, &sg, num, 1, 0, data, gfp);
282edb364   Rusty Russell   virtio_ring: virt...
462
463
464
465
466
467
  }
  EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
  
  /**
   * virtqueue_add_inbuf - expose input buffers to other end
   * @vq: the struct virtqueue we're talking about.
eeebf9b1f   Rusty Russell   virtio_ring: assu...
468
469
   * @sg: scatterlist (must be well-formed and terminated!)
   * @num: the number of entries in @sg writable by other side
282edb364   Rusty Russell   virtio_ring: virt...
470
471
472
473
474
475
   * @data: the token identifying the buffer.
   * @gfp: how to do memory allocations (if necessary).
   *
   * Caller must ensure we don't call this with other virtqueue operations
   * at the same time (except where noted).
   *
70670444c   Rusty Russell   virtio: fail addi...
476
   * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
282edb364   Rusty Russell   virtio_ring: virt...
477
478
   */
  int virtqueue_add_inbuf(struct virtqueue *vq,
eeebf9b1f   Rusty Russell   virtio_ring: assu...
479
  			struct scatterlist *sg, unsigned int num,
282edb364   Rusty Russell   virtio_ring: virt...
480
481
482
  			void *data,
  			gfp_t gfp)
  {
eeebf9b1f   Rusty Russell   virtio_ring: assu...
483
  	return virtqueue_add(vq, &sg, num, 0, 1, data, gfp);
282edb364   Rusty Russell   virtio_ring: virt...
484
485
486
487
  }
  EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
  
  /**
41f0377f7   Rusty Russell   virtio: support u...
488
   * virtqueue_kick_prepare - first half of split virtqueue_kick call.
5dfc17628   Rusty Russell   virtio: document ...
489
490
   * @vq: the struct virtqueue
   *
41f0377f7   Rusty Russell   virtio: support u...
491
492
493
   * Instead of virtqueue_kick(), you can do:
   *	if (virtqueue_kick_prepare(vq))
   *		virtqueue_notify(vq);
5dfc17628   Rusty Russell   virtio: document ...
494
   *
41f0377f7   Rusty Russell   virtio: support u...
495
496
   * This is sometimes useful because the virtqueue_kick_prepare() needs
   * to be serialized, but the actual virtqueue_notify() call does not.
5dfc17628   Rusty Russell   virtio: document ...
497
   */
41f0377f7   Rusty Russell   virtio: support u...
498
  bool virtqueue_kick_prepare(struct virtqueue *_vq)
0a8a69dd7   Rusty Russell   Virtio helper rou...
499
500
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
a5c262c5f   Michael S. Tsirkin   virtio_ring: supp...
501
  	u16 new, old;
41f0377f7   Rusty Russell   virtio: support u...
502
  	bool needs_kick;
0a8a69dd7   Rusty Russell   Virtio helper rou...
503
  	START_USE(vq);
a72caae21   Jason Wang   virtio: correct t...
504
505
  	/* We need to expose available array entries before checking avail
  	 * event. */
a9a0fef77   Rusty Russell   virtio_ring: expo...
506
  	virtio_mb(vq->weak_barriers);
0a8a69dd7   Rusty Russell   Virtio helper rou...
507

f277ec42f   Venkatesh Srinivas   virtio_ring: shad...
508
509
  	old = vq->avail_idx_shadow - vq->num_added;
  	new = vq->avail_idx_shadow;
0a8a69dd7   Rusty Russell   Virtio helper rou...
510
  	vq->num_added = 0;
e93300b1a   Rusty Russell   virtio: add debug...
511
512
513
514
515
516
517
  #ifdef DEBUG
  	if (vq->last_add_time_valid) {
  		WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
  					      vq->last_add_time)) > 100);
  	}
  	vq->last_add_time_valid = false;
  #endif
41f0377f7   Rusty Russell   virtio: support u...
518
  	if (vq->event) {
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
519
  		needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
41f0377f7   Rusty Russell   virtio: support u...
520
521
  					      new, old);
  	} else {
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
522
  		needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
41f0377f7   Rusty Russell   virtio: support u...
523
  	}
0a8a69dd7   Rusty Russell   Virtio helper rou...
524
  	END_USE(vq);
41f0377f7   Rusty Russell   virtio: support u...
525
526
527
528
529
530
531
532
533
  	return needs_kick;
  }
  EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
  
  /**
   * virtqueue_notify - second half of split virtqueue_kick call.
   * @vq: the struct virtqueue
   *
   * This does not need to be serialized.
5b1bf7cb6   Heinz Graalfs   virtio_ring: let ...
534
535
   *
   * Returns false if host notify failed or queue is broken, otherwise true.
41f0377f7   Rusty Russell   virtio: support u...
536
   */
5b1bf7cb6   Heinz Graalfs   virtio_ring: let ...
537
  bool virtqueue_notify(struct virtqueue *_vq)
41f0377f7   Rusty Russell   virtio: support u...
538
539
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
5b1bf7cb6   Heinz Graalfs   virtio_ring: let ...
540
541
  	if (unlikely(vq->broken))
  		return false;
41f0377f7   Rusty Russell   virtio: support u...
542
  	/* Prod other side to tell it about changes. */
2342d6a65   Heinz Graalfs   virtio_ring: adap...
543
  	if (!vq->notify(_vq)) {
5b1bf7cb6   Heinz Graalfs   virtio_ring: let ...
544
545
546
547
  		vq->broken = true;
  		return false;
  	}
  	return true;
41f0377f7   Rusty Russell   virtio: support u...
548
549
550
551
552
553
554
  }
  EXPORT_SYMBOL_GPL(virtqueue_notify);
  
  /**
   * virtqueue_kick - update after add_buf
   * @vq: the struct virtqueue
   *
b3087e48c   Rusty Russell   virtio: remove vi...
555
   * After one or more virtqueue_add_* calls, invoke this to kick
41f0377f7   Rusty Russell   virtio: support u...
556
557
558
559
   * the other side.
   *
   * Caller must ensure we don't call this with other virtqueue
   * operations at the same time (except where noted).
5b1bf7cb6   Heinz Graalfs   virtio_ring: let ...
560
561
   *
   * Returns false if kick failed, otherwise true.
41f0377f7   Rusty Russell   virtio: support u...
562
   */
5b1bf7cb6   Heinz Graalfs   virtio_ring: let ...
563
  bool virtqueue_kick(struct virtqueue *vq)
41f0377f7   Rusty Russell   virtio: support u...
564
565
  {
  	if (virtqueue_kick_prepare(vq))
5b1bf7cb6   Heinz Graalfs   virtio_ring: let ...
566
567
  		return virtqueue_notify(vq);
  	return true;
0a8a69dd7   Rusty Russell   Virtio helper rou...
568
  }
7c5e9ed0c   Michael S. Tsirkin   virtio_ring: remo...
569
  EXPORT_SYMBOL_GPL(virtqueue_kick);
0a8a69dd7   Rusty Russell   Virtio helper rou...
570
571
572
  
  static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
  {
780bc7903   Andy Lutomirski   virtio_ring: Supp...
573
574
  	unsigned int i, j;
  	u16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
0a8a69dd7   Rusty Russell   Virtio helper rou...
575
576
  
  	/* Clear data ptr. */
780bc7903   Andy Lutomirski   virtio_ring: Supp...
577
  	vq->desc_state[head].data = NULL;
0a8a69dd7   Rusty Russell   Virtio helper rou...
578

780bc7903   Andy Lutomirski   virtio_ring: Supp...
579
  	/* Put back on free list: unmap first-level descriptors and find end */
0a8a69dd7   Rusty Russell   Virtio helper rou...
580
  	i = head;
9fa29b9df   Mark McLoughlin   virtio: indirect ...
581

780bc7903   Andy Lutomirski   virtio_ring: Supp...
582
583
  	while (vq->vring.desc[i].flags & nextflag) {
  		vring_unmap_one(vq, &vq->vring.desc[i]);
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
584
  		i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
06ca287db   Rusty Russell   virtio: move queu...
585
  		vq->vq.num_free++;
0a8a69dd7   Rusty Russell   Virtio helper rou...
586
  	}
780bc7903   Andy Lutomirski   virtio_ring: Supp...
587
  	vring_unmap_one(vq, &vq->vring.desc[i]);
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
588
  	vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
0a8a69dd7   Rusty Russell   Virtio helper rou...
589
  	vq->free_head = head;
780bc7903   Andy Lutomirski   virtio_ring: Supp...
590

0a8a69dd7   Rusty Russell   Virtio helper rou...
591
  	/* Plus final descriptor */
06ca287db   Rusty Russell   virtio: move queu...
592
  	vq->vq.num_free++;
780bc7903   Andy Lutomirski   virtio_ring: Supp...
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
  
  	/* Free the indirect table, if any, now that it's unmapped. */
  	if (vq->desc_state[head].indir_desc) {
  		struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
  		u32 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);
  
  		BUG_ON(!(vq->vring.desc[head].flags &
  			 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
  		BUG_ON(len == 0 || len % sizeof(struct vring_desc));
  
  		for (j = 0; j < len / sizeof(struct vring_desc); j++)
  			vring_unmap_one(vq, &indir_desc[j]);
  
  		kfree(vq->desc_state[head].indir_desc);
  		vq->desc_state[head].indir_desc = NULL;
  	}
0a8a69dd7   Rusty Russell   Virtio helper rou...
609
  }
0a8a69dd7   Rusty Russell   Virtio helper rou...
610
611
  static inline bool more_used(const struct vring_virtqueue *vq)
  {
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
612
  	return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
0a8a69dd7   Rusty Russell   Virtio helper rou...
613
  }
5dfc17628   Rusty Russell   virtio: document ...
614
615
616
617
618
619
620
621
622
623
624
625
626
627
  /**
   * virtqueue_get_buf - get the next used buffer
   * @vq: the struct virtqueue we're talking about.
   * @len: the length written into the buffer
   *
   * If the driver wrote data into the buffer, @len will be set to the
   * amount written.  This means you don't need to clear the buffer
   * beforehand to ensure there's no data leakage in the case of short
   * writes.
   *
   * Caller must ensure we don't call this with other virtqueue
   * operations at the same time (except where noted).
   *
   * Returns NULL if there are no used buffers, or the "data" token
b3087e48c   Rusty Russell   virtio: remove vi...
628
   * handed to virtqueue_add_*().
5dfc17628   Rusty Russell   virtio: document ...
629
   */
7c5e9ed0c   Michael S. Tsirkin   virtio_ring: remo...
630
  void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
0a8a69dd7   Rusty Russell   Virtio helper rou...
631
632
633
634
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
  	void *ret;
  	unsigned int i;
3b720b8c8   Rusty Russell   virtio: avoid mod...
635
  	u16 last_used;
0a8a69dd7   Rusty Russell   Virtio helper rou...
636
637
  
  	START_USE(vq);
5ef827526   Rusty Russell   virtio: ignore co...
638
639
640
641
  	if (unlikely(vq->broken)) {
  		END_USE(vq);
  		return NULL;
  	}
0a8a69dd7   Rusty Russell   Virtio helper rou...
642
643
644
645
646
647
  	if (!more_used(vq)) {
  		pr_debug("No more buffers in queue
  ");
  		END_USE(vq);
  		return NULL;
  	}
2d61ba950   Michael S. Tsirkin   virtio: order use...
648
  	/* Only get used array entries after they have been exposed by host. */
a9a0fef77   Rusty Russell   virtio_ring: expo...
649
  	virtio_rmb(vq->weak_barriers);
2d61ba950   Michael S. Tsirkin   virtio: order use...
650

3b720b8c8   Rusty Russell   virtio: avoid mod...
651
  	last_used = (vq->last_used_idx & (vq->vring.num - 1));
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
652
653
  	i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
  	*len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
0a8a69dd7   Rusty Russell   Virtio helper rou...
654
655
656
657
658
659
  
  	if (unlikely(i >= vq->vring.num)) {
  		BAD_RING(vq, "id %u out of range
  ", i);
  		return NULL;
  	}
780bc7903   Andy Lutomirski   virtio_ring: Supp...
660
  	if (unlikely(!vq->desc_state[i].data)) {
0a8a69dd7   Rusty Russell   Virtio helper rou...
661
662
663
664
665
666
  		BAD_RING(vq, "id %u is not a head!
  ", i);
  		return NULL;
  	}
  
  	/* detach_buf clears data, so grab it now. */
780bc7903   Andy Lutomirski   virtio_ring: Supp...
667
  	ret = vq->desc_state[i].data;
0a8a69dd7   Rusty Russell   Virtio helper rou...
668
669
  	detach_buf(vq, i);
  	vq->last_used_idx++;
a5c262c5f   Michael S. Tsirkin   virtio_ring: supp...
670
671
672
  	/* If we expect an interrupt for the next entry, tell host
  	 * by writing event index and flush out the write before
  	 * the read in the next get_buf call. */
788e5b3a5   Michael S. Tsirkin   virtio_ring: use ...
673
674
675
676
  	if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
  		virtio_store_mb(vq->weak_barriers,
  				&vring_used_event(&vq->vring),
  				cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
a5c262c5f   Michael S. Tsirkin   virtio_ring: supp...
677

e93300b1a   Rusty Russell   virtio: add debug...
678
679
680
  #ifdef DEBUG
  	vq->last_add_time_valid = false;
  #endif
0a8a69dd7   Rusty Russell   Virtio helper rou...
681
682
683
  	END_USE(vq);
  	return ret;
  }
7c5e9ed0c   Michael S. Tsirkin   virtio_ring: remo...
684
  EXPORT_SYMBOL_GPL(virtqueue_get_buf);
0a8a69dd7   Rusty Russell   Virtio helper rou...
685

5dfc17628   Rusty Russell   virtio: document ...
686
687
688
689
690
691
692
693
694
  /**
   * virtqueue_disable_cb - disable callbacks
   * @vq: the struct virtqueue we're talking about.
   *
   * Note that this is not necessarily synchronous, hence unreliable and only
   * useful as an optimization.
   *
   * Unlike other operations, this need not be serialized.
   */
7c5e9ed0c   Michael S. Tsirkin   virtio_ring: remo...
695
  void virtqueue_disable_cb(struct virtqueue *_vq)
18445c4d5   Rusty Russell   virtio: explicit ...
696
697
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
f277ec42f   Venkatesh Srinivas   virtio_ring: shad...
698
699
  	if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
  		vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
0ea1e4a6d   Ladi Prosek   virtio_ring: Make...
700
701
  		if (!vq->event)
  			vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
f277ec42f   Venkatesh Srinivas   virtio_ring: shad...
702
  	}
18445c4d5   Rusty Russell   virtio: explicit ...
703
  }
7c5e9ed0c   Michael S. Tsirkin   virtio_ring: remo...
704
  EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
18445c4d5   Rusty Russell   virtio: explicit ...
705

5dfc17628   Rusty Russell   virtio: document ...
706
  /**
cc229884d   Michael S. Tsirkin   virtio: support u...
707
   * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
5dfc17628   Rusty Russell   virtio: document ...
708
709
   * @vq: the struct virtqueue we're talking about.
   *
cc229884d   Michael S. Tsirkin   virtio: support u...
710
711
712
713
   * This re-enables callbacks; it returns current queue state
   * in an opaque unsigned value. This value should be later tested by
   * virtqueue_poll, to detect a possible race between the driver checking for
   * more work, and enabling callbacks.
5dfc17628   Rusty Russell   virtio: document ...
714
715
716
717
   *
   * Caller must ensure we don't call this with other virtqueue
   * operations at the same time (except where noted).
   */
cc229884d   Michael S. Tsirkin   virtio: support u...
718
  unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
0a8a69dd7   Rusty Russell   Virtio helper rou...
719
720
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
cc229884d   Michael S. Tsirkin   virtio: support u...
721
  	u16 last_used_idx;
0a8a69dd7   Rusty Russell   Virtio helper rou...
722
723
  
  	START_USE(vq);
0a8a69dd7   Rusty Russell   Virtio helper rou...
724
725
726
  
  	/* We optimistically turn back on interrupts, then check if there was
  	 * more to do. */
a5c262c5f   Michael S. Tsirkin   virtio_ring: supp...
727
728
729
  	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
  	 * either clear the flags bit or point the event index at the next
  	 * entry. Always do both to keep code simple. */
f277ec42f   Venkatesh Srinivas   virtio_ring: shad...
730
731
  	if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
  		vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
0ea1e4a6d   Ladi Prosek   virtio_ring: Make...
732
733
  		if (!vq->event)
  			vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
f277ec42f   Venkatesh Srinivas   virtio_ring: shad...
734
  	}
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
735
  	vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
cc229884d   Michael S. Tsirkin   virtio: support u...
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
  	END_USE(vq);
  	return last_used_idx;
  }
  EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
  
  /**
   * virtqueue_poll - query pending used buffers
   * @vq: the struct virtqueue we're talking about.
   * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
   *
   * Returns "true" if there are pending used buffers in the queue.
   *
   * This does not need to be serialized.
   */
  bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
a9a0fef77   Rusty Russell   virtio_ring: expo...
753
  	virtio_mb(vq->weak_barriers);
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
754
  	return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
cc229884d   Michael S. Tsirkin   virtio: support u...
755
756
  }
  EXPORT_SYMBOL_GPL(virtqueue_poll);
0a8a69dd7   Rusty Russell   Virtio helper rou...
757

cc229884d   Michael S. Tsirkin   virtio: support u...
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
  /**
   * virtqueue_enable_cb - restart callbacks after disable_cb.
   * @vq: the struct virtqueue we're talking about.
   *
   * This re-enables callbacks; it returns "false" if there are pending
   * buffers in the queue, to detect a possible race between the driver
   * checking for more work, and enabling callbacks.
   *
   * Caller must ensure we don't call this with other virtqueue
   * operations at the same time (except where noted).
   */
  bool virtqueue_enable_cb(struct virtqueue *_vq)
  {
  	unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
  	return !virtqueue_poll(_vq, last_used_idx);
0a8a69dd7   Rusty Russell   Virtio helper rou...
773
  }
7c5e9ed0c   Michael S. Tsirkin   virtio_ring: remo...
774
  EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
0a8a69dd7   Rusty Russell   Virtio helper rou...
775

5dfc17628   Rusty Russell   virtio: document ...
776
777
778
779
780
781
782
783
784
785
786
787
788
  /**
   * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
   * @vq: the struct virtqueue we're talking about.
   *
   * This re-enables callbacks but hints to the other side to delay
   * interrupts until most of the available buffers have been processed;
   * it returns "false" if there are many pending buffers in the queue,
   * to detect a possible race between the driver checking for more work,
   * and enabling callbacks.
   *
   * Caller must ensure we don't call this with other virtqueue
   * operations at the same time (except where noted).
   */
7ab358c23   Michael S. Tsirkin   virtio: add api f...
789
790
791
792
793
794
795
796
797
798
799
  bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
  	u16 bufs;
  
  	START_USE(vq);
  
  	/* We optimistically turn back on interrupts, then check if there was
  	 * more to do. */
  	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
  	 * either clear the flags bit or point the event index at the next
0ea1e4a6d   Ladi Prosek   virtio_ring: Make...
800
  	 * entry. Always update the event index to keep code simple. */
f277ec42f   Venkatesh Srinivas   virtio_ring: shad...
801
802
  	if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
  		vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
0ea1e4a6d   Ladi Prosek   virtio_ring: Make...
803
804
  		if (!vq->event)
  			vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
f277ec42f   Venkatesh Srinivas   virtio_ring: shad...
805
  	}
7ab358c23   Michael S. Tsirkin   virtio: add api f...
806
  	/* TODO: tune this threshold */
f277ec42f   Venkatesh Srinivas   virtio_ring: shad...
807
  	bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
788e5b3a5   Michael S. Tsirkin   virtio_ring: use ...
808
809
810
811
  
  	virtio_store_mb(vq->weak_barriers,
  			&vring_used_event(&vq->vring),
  			cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
812
  	if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
7ab358c23   Michael S. Tsirkin   virtio: add api f...
813
814
815
816
817
818
819
820
  		END_USE(vq);
  		return false;
  	}
  
  	END_USE(vq);
  	return true;
  }
  EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
5dfc17628   Rusty Russell   virtio: document ...
821
822
823
824
  /**
   * virtqueue_detach_unused_buf - detach first unused buffer
   * @vq: the struct virtqueue we're talking about.
   *
b3087e48c   Rusty Russell   virtio: remove vi...
825
   * Returns NULL or the "data" token handed to virtqueue_add_*().
5dfc17628   Rusty Russell   virtio: document ...
826
827
828
   * This is not valid on an active queue; it is useful only for device
   * shutdown.
   */
7c5e9ed0c   Michael S. Tsirkin   virtio_ring: remo...
829
  void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
c021eac41   Shirley Ma   virtio: Add abili...
830
831
832
833
834
835
836
837
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
  	unsigned int i;
  	void *buf;
  
  	START_USE(vq);
  
  	for (i = 0; i < vq->vring.num; i++) {
780bc7903   Andy Lutomirski   virtio_ring: Supp...
838
  		if (!vq->desc_state[i].data)
c021eac41   Shirley Ma   virtio: Add abili...
839
840
  			continue;
  		/* detach_buf clears data, so grab it now. */
780bc7903   Andy Lutomirski   virtio_ring: Supp...
841
  		buf = vq->desc_state[i].data;
c021eac41   Shirley Ma   virtio: Add abili...
842
  		detach_buf(vq, i);
f277ec42f   Venkatesh Srinivas   virtio_ring: shad...
843
844
  		vq->avail_idx_shadow--;
  		vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
c021eac41   Shirley Ma   virtio: Add abili...
845
846
847
848
  		END_USE(vq);
  		return buf;
  	}
  	/* That should have freed everything. */
06ca287db   Rusty Russell   virtio: move queu...
849
  	BUG_ON(vq->vq.num_free != vq->vring.num);
c021eac41   Shirley Ma   virtio: Add abili...
850
851
852
853
  
  	END_USE(vq);
  	return NULL;
  }
7c5e9ed0c   Michael S. Tsirkin   virtio_ring: remo...
854
  EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
c021eac41   Shirley Ma   virtio: Add abili...
855

0a8a69dd7   Rusty Russell   Virtio helper rou...
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
  irqreturn_t vring_interrupt(int irq, void *_vq)
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
  
  	if (!more_used(vq)) {
  		pr_debug("virtqueue interrupt with no work for %p
  ", vq);
  		return IRQ_NONE;
  	}
  
  	if (unlikely(vq->broken))
  		return IRQ_HANDLED;
  
  	pr_debug("virtqueue callback for %p (%p)
  ", vq, vq->vq.callback);
18445c4d5   Rusty Russell   virtio: explicit ...
871
872
  	if (vq->vq.callback)
  		vq->vq.callback(&vq->vq);
0a8a69dd7   Rusty Russell   Virtio helper rou...
873
874
875
  
  	return IRQ_HANDLED;
  }
c6fd47011   Rusty Russell   virtio: Allow vir...
876
  EXPORT_SYMBOL_GPL(vring_interrupt);
0a8a69dd7   Rusty Russell   Virtio helper rou...
877

2a2d1382f   Andy Lutomirski   virtio: Add impro...
878
879
880
881
882
883
884
  struct virtqueue *__vring_new_virtqueue(unsigned int index,
  					struct vring vring,
  					struct virtio_device *vdev,
  					bool weak_barriers,
  					bool (*notify)(struct virtqueue *),
  					void (*callback)(struct virtqueue *),
  					const char *name)
0a8a69dd7   Rusty Russell   Virtio helper rou...
885
  {
0a8a69dd7   Rusty Russell   Virtio helper rou...
886
  	unsigned int i;
2a2d1382f   Andy Lutomirski   virtio: Add impro...
887
  	struct vring_virtqueue *vq;
0a8a69dd7   Rusty Russell   Virtio helper rou...
888

2a2d1382f   Andy Lutomirski   virtio: Add impro...
889
  	vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state),
780bc7903   Andy Lutomirski   virtio_ring: Supp...
890
  		     GFP_KERNEL);
0a8a69dd7   Rusty Russell   Virtio helper rou...
891
892
  	if (!vq)
  		return NULL;
2a2d1382f   Andy Lutomirski   virtio: Add impro...
893
  	vq->vring = vring;
0a8a69dd7   Rusty Russell   Virtio helper rou...
894
895
  	vq->vq.callback = callback;
  	vq->vq.vdev = vdev;
9499f5e7e   Rusty Russell   virtio: add names...
896
  	vq->vq.name = name;
2a2d1382f   Andy Lutomirski   virtio: Add impro...
897
  	vq->vq.num_free = vring.num;
06ca287db   Rusty Russell   virtio: move queu...
898
  	vq->vq.index = index;
2a2d1382f   Andy Lutomirski   virtio: Add impro...
899
900
901
  	vq->we_own_ring = false;
  	vq->queue_dma_addr = 0;
  	vq->queue_size_in_bytes = 0;
0a8a69dd7   Rusty Russell   Virtio helper rou...
902
  	vq->notify = notify;
7b21e34fd   Rusty Russell   virtio: harsher b...
903
  	vq->weak_barriers = weak_barriers;
0a8a69dd7   Rusty Russell   Virtio helper rou...
904
905
  	vq->broken = false;
  	vq->last_used_idx = 0;
f277ec42f   Venkatesh Srinivas   virtio_ring: shad...
906
907
  	vq->avail_flags_shadow = 0;
  	vq->avail_idx_shadow = 0;
0a8a69dd7   Rusty Russell   Virtio helper rou...
908
  	vq->num_added = 0;
9499f5e7e   Rusty Russell   virtio: add names...
909
  	list_add_tail(&vq->vq.list, &vdev->vqs);
0a8a69dd7   Rusty Russell   Virtio helper rou...
910
911
  #ifdef DEBUG
  	vq->in_use = false;
e93300b1a   Rusty Russell   virtio: add debug...
912
  	vq->last_add_time_valid = false;
0a8a69dd7   Rusty Russell   Virtio helper rou...
913
  #endif
9fa29b9df   Mark McLoughlin   virtio: indirect ...
914
  	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
a5c262c5f   Michael S. Tsirkin   virtio_ring: supp...
915
  	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
9fa29b9df   Mark McLoughlin   virtio: indirect ...
916

0a8a69dd7   Rusty Russell   Virtio helper rou...
917
  	/* No callback?  Tell other side not to bother us. */
f277ec42f   Venkatesh Srinivas   virtio_ring: shad...
918
919
  	if (!callback) {
  		vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
0ea1e4a6d   Ladi Prosek   virtio_ring: Make...
920
921
  		if (!vq->event)
  			vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
f277ec42f   Venkatesh Srinivas   virtio_ring: shad...
922
  	}
0a8a69dd7   Rusty Russell   Virtio helper rou...
923
924
  
  	/* Put everything in free lists. */
0a8a69dd7   Rusty Russell   Virtio helper rou...
925
  	vq->free_head = 0;
2a2d1382f   Andy Lutomirski   virtio: Add impro...
926
  	for (i = 0; i < vring.num-1; i++)
00e6f3d9d   Michael S. Tsirkin   virtio_ring: swit...
927
  		vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
2a2d1382f   Andy Lutomirski   virtio: Add impro...
928
  	memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state));
0a8a69dd7   Rusty Russell   Virtio helper rou...
929
930
931
  
  	return &vq->vq;
  }
2a2d1382f   Andy Lutomirski   virtio: Add impro...
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
  EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
  
  static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
  			      dma_addr_t *dma_handle, gfp_t flag)
  {
  	if (vring_use_dma_api(vdev)) {
  		return dma_alloc_coherent(vdev->dev.parent, size,
  					  dma_handle, flag);
  	} else {
  		void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
  		if (queue) {
  			phys_addr_t phys_addr = virt_to_phys(queue);
  			*dma_handle = (dma_addr_t)phys_addr;
  
  			/*
  			 * Sanity check: make sure we dind't truncate
  			 * the address.  The only arches I can find that
  			 * have 64-bit phys_addr_t but 32-bit dma_addr_t
  			 * are certain non-highmem MIPS and x86
  			 * configurations, but these configurations
  			 * should never allocate physical pages above 32
  			 * bits, so this is fine.  Just in case, throw a
  			 * warning and abort if we end up with an
  			 * unrepresentable address.
  			 */
  			if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
  				free_pages_exact(queue, PAGE_ALIGN(size));
  				return NULL;
  			}
  		}
  		return queue;
  	}
  }
  
  static void vring_free_queue(struct virtio_device *vdev, size_t size,
  			     void *queue, dma_addr_t dma_handle)
  {
  	if (vring_use_dma_api(vdev)) {
  		dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
  	} else {
  		free_pages_exact(queue, PAGE_ALIGN(size));
  	}
  }
  
  struct virtqueue *vring_create_virtqueue(
  	unsigned int index,
  	unsigned int num,
  	unsigned int vring_align,
  	struct virtio_device *vdev,
  	bool weak_barriers,
  	bool may_reduce_num,
  	bool (*notify)(struct virtqueue *),
  	void (*callback)(struct virtqueue *),
  	const char *name)
  {
  	struct virtqueue *vq;
e00f7bd22   Dan Carpenter   virtio: Silence u...
988
  	void *queue = NULL;
2a2d1382f   Andy Lutomirski   virtio: Add impro...
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
  	dma_addr_t dma_addr;
  	size_t queue_size_in_bytes;
  	struct vring vring;
  
  	/* We assume num is a power of 2. */
  	if (num & (num - 1)) {
  		dev_warn(&vdev->dev, "Bad virtqueue length %u
  ", num);
  		return NULL;
  	}
  
  	/* TODO: allocate each queue chunk individually */
  	for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
  		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
  					  &dma_addr,
  					  GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
  		if (queue)
  			break;
  	}
  
  	if (!num)
  		return NULL;
  
  	if (!queue) {
  		/* Try to get a single page. You are my only hope! */
  		queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
  					  &dma_addr, GFP_KERNEL|__GFP_ZERO);
  	}
  	if (!queue)
  		return NULL;
  
  	queue_size_in_bytes = vring_size(num, vring_align);
  	vring_init(&vring, num, queue, vring_align);
  
  	vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers,
  				   notify, callback, name);
  	if (!vq) {
  		vring_free_queue(vdev, queue_size_in_bytes, queue,
  				 dma_addr);
  		return NULL;
  	}
  
  	to_vvq(vq)->queue_dma_addr = dma_addr;
  	to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes;
  	to_vvq(vq)->we_own_ring = true;
  
  	return vq;
  }
  EXPORT_SYMBOL_GPL(vring_create_virtqueue);
  
  struct virtqueue *vring_new_virtqueue(unsigned int index,
  				      unsigned int num,
  				      unsigned int vring_align,
  				      struct virtio_device *vdev,
  				      bool weak_barriers,
  				      void *pages,
  				      bool (*notify)(struct virtqueue *vq),
  				      void (*callback)(struct virtqueue *vq),
  				      const char *name)
  {
  	struct vring vring;
  	vring_init(&vring, num, pages, vring_align);
  	return __vring_new_virtqueue(index, vring, vdev, weak_barriers,
  				     notify, callback, name);
  }
c6fd47011   Rusty Russell   virtio: Allow vir...
1054
  EXPORT_SYMBOL_GPL(vring_new_virtqueue);
0a8a69dd7   Rusty Russell   Virtio helper rou...
1055

2a2d1382f   Andy Lutomirski   virtio: Add impro...
1056
  void vring_del_virtqueue(struct virtqueue *_vq)
0a8a69dd7   Rusty Russell   Virtio helper rou...
1057
  {
2a2d1382f   Andy Lutomirski   virtio: Add impro...
1058
1059
1060
1061
1062
1063
1064
1065
  	struct vring_virtqueue *vq = to_vvq(_vq);
  
  	if (vq->we_own_ring) {
  		vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes,
  				 vq->vring.desc, vq->queue_dma_addr);
  	}
  	list_del(&_vq->list);
  	kfree(vq);
0a8a69dd7   Rusty Russell   Virtio helper rou...
1066
  }
c6fd47011   Rusty Russell   virtio: Allow vir...
1067
  EXPORT_SYMBOL_GPL(vring_del_virtqueue);
0a8a69dd7   Rusty Russell   Virtio helper rou...
1068

e34f87256   Rusty Russell   virtio: Add trans...
1069
1070
1071
1072
1073
1074
1075
  /* Manipulates transport-specific feature bits. */
  void vring_transport_features(struct virtio_device *vdev)
  {
  	unsigned int i;
  
  	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
  		switch (i) {
9fa29b9df   Mark McLoughlin   virtio: indirect ...
1076
1077
  		case VIRTIO_RING_F_INDIRECT_DESC:
  			break;
a5c262c5f   Michael S. Tsirkin   virtio_ring: supp...
1078
1079
  		case VIRTIO_RING_F_EVENT_IDX:
  			break;
747ae34a6   Michael S. Tsirkin   virtio: make VIRT...
1080
1081
  		case VIRTIO_F_VERSION_1:
  			break;
1a9376939   Michael S. Tsirkin   virtio: new featu...
1082
1083
  		case VIRTIO_F_IOMMU_PLATFORM:
  			break;
e34f87256   Rusty Russell   virtio: Add trans...
1084
1085
  		default:
  			/* We don't understand this bit. */
e16e12be3   Michael S. Tsirkin   virtio: use u32, ...
1086
  			__virtio_clear_bit(vdev, i);
e34f87256   Rusty Russell   virtio: Add trans...
1087
1088
1089
1090
  		}
  	}
  }
  EXPORT_SYMBOL_GPL(vring_transport_features);
5dfc17628   Rusty Russell   virtio: document ...
1091
1092
1093
1094
1095
1096
1097
  /**
   * virtqueue_get_vring_size - return the size of the virtqueue's vring
   * @vq: the struct virtqueue containing the vring of interest.
   *
   * Returns the size of the vring.  This is mainly used for boasting to
   * userspace.  Unlike other operations, this need not be serialized.
   */
8f9f4668b   Rick Jones   Add ethtool -g su...
1098
1099
1100
1101
1102
1103
1104
1105
  unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
  {
  
  	struct vring_virtqueue *vq = to_vvq(_vq);
  
  	return vq->vring.num;
  }
  EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
b3b32c941   Heinz Graalfs   virtio_ring: add ...
1106
1107
1108
1109
1110
1111
1112
  bool virtqueue_is_broken(struct virtqueue *_vq)
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
  
  	return vq->broken;
  }
  EXPORT_SYMBOL_GPL(virtqueue_is_broken);
e2dcdfe95   Rusty Russell   virtio: virtio_br...
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
  /*
   * This should prevent the device from being used, allowing drivers to
   * recover.  You may need to grab appropriate locks to flush.
   */
  void virtio_break_device(struct virtio_device *dev)
  {
  	struct virtqueue *_vq;
  
  	list_for_each_entry(_vq, &dev->vqs, list) {
  		struct vring_virtqueue *vq = to_vvq(_vq);
  		vq->broken = true;
  	}
  }
  EXPORT_SYMBOL_GPL(virtio_break_device);
2a2d1382f   Andy Lutomirski   virtio: Add impro...
1127
  dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
890626521   Cornelia Huck   virtio: allow tra...
1128
1129
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
2a2d1382f   Andy Lutomirski   virtio: Add impro...
1130
1131
1132
  	BUG_ON(!vq->we_own_ring);
  
  	return vq->queue_dma_addr;
890626521   Cornelia Huck   virtio: allow tra...
1133
  }
2a2d1382f   Andy Lutomirski   virtio: Add impro...
1134
  EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
890626521   Cornelia Huck   virtio: allow tra...
1135

2a2d1382f   Andy Lutomirski   virtio: Add impro...
1136
  dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
890626521   Cornelia Huck   virtio: allow tra...
1137
1138
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
2a2d1382f   Andy Lutomirski   virtio: Add impro...
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
  	BUG_ON(!vq->we_own_ring);
  
  	return vq->queue_dma_addr +
  		((char *)vq->vring.avail - (char *)vq->vring.desc);
  }
  EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
  
  dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
  
  	BUG_ON(!vq->we_own_ring);
  
  	return vq->queue_dma_addr +
  		((char *)vq->vring.used - (char *)vq->vring.desc);
  }
  EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
  
  const struct vring *virtqueue_get_vring(struct virtqueue *vq)
  {
  	return &to_vvq(vq)->vring;
890626521   Cornelia Huck   virtio: allow tra...
1160
  }
2a2d1382f   Andy Lutomirski   virtio: Add impro...
1161
  EXPORT_SYMBOL_GPL(virtqueue_get_vring);
890626521   Cornelia Huck   virtio: allow tra...
1162

c6fd47011   Rusty Russell   virtio: Allow vir...
1163
  MODULE_LICENSE("GPL");