Blame view

drivers/virtio/virtio_ring.c 19.1 KB
0a8a69dd7   Rusty Russell   Virtio helper rou...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
  /* Virtio ring implementation.
   *
   *  Copyright 2007 Rusty Russell IBM Corporation
   *
   *  This program is free software; you can redistribute it and/or modify
   *  it under the terms of the GNU General Public License as published by
   *  the Free Software Foundation; either version 2 of the License, or
   *  (at your option) any later version.
   *
   *  This program is distributed in the hope that it will be useful,
   *  but WITHOUT ANY WARRANTY; without even the implied warranty of
   *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   *  GNU General Public License for more details.
   *
   *  You should have received a copy of the GNU General Public License
   *  along with this program; if not, write to the Free Software
   *  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
   */
  #include <linux/virtio.h>
  #include <linux/virtio_ring.h>
e34f87256   Rusty Russell   virtio: Add trans...
21
  #include <linux/virtio_config.h>
0a8a69dd7   Rusty Russell   Virtio helper rou...
22
  #include <linux/device.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
23
  #include <linux/slab.h>
b5a2c4f19   Paul Gortmaker   virtio: Add modul...
24
  #include <linux/module.h>
e93300b1a   Rusty Russell   virtio: add debug...
25
  #include <linux/hrtimer.h>
0a8a69dd7   Rusty Russell   Virtio helper rou...
26

d57ed95da   Michael S. Tsirkin   virtio: use smp_X...
27
28
29
30
31
  /* virtio guest is communicating with a virtual "device" that actually runs on
   * a host processor.  Memory barriers are used to control SMP effects. */
  #ifdef CONFIG_SMP
  /* Where possible, use SMP barriers which are more lightweight than mandatory
   * barriers, because mandatory barriers control MMIO effects on accesses
7b21e34fd   Rusty Russell   virtio: harsher b...
32
33
34
35
36
37
38
   * through relaxed memory I/O windows (which virtio-pci does not use). */
  #define virtio_mb(vq) \
  	do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0)
  #define virtio_rmb(vq) \
  	do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
  #define virtio_wmb(vq) \
  	do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
d57ed95da   Michael S. Tsirkin   virtio: use smp_X...
39
40
41
42
  #else
  /* We must force memory ordering even if guest is UP since host could be
   * running on another CPU, but SMP barriers are defined to barrier() in that
   * configuration. So fall back to mandatory barriers instead. */
7b21e34fd   Rusty Russell   virtio: harsher b...
43
44
45
  #define virtio_mb(vq) mb()
  #define virtio_rmb(vq) rmb()
  #define virtio_wmb(vq) wmb()
d57ed95da   Michael S. Tsirkin   virtio: use smp_X...
46
  #endif
0a8a69dd7   Rusty Russell   Virtio helper rou...
47
48
  #ifdef DEBUG
  /* For development, we want to crash whenever the ring is screwed. */
9499f5e7e   Rusty Russell   virtio: add names...
49
50
51
52
53
54
  #define BAD_RING(_vq, fmt, args...)				\
  	do {							\
  		dev_err(&(_vq)->vq.vdev->dev,			\
  			"%s:"fmt, (_vq)->vq.name, ##args);	\
  		BUG();						\
  	} while (0)
c5f841f17   Rusty Russell   virtio: more neat...
55
56
57
58
  /* Caller is supposed to guarantee no reentry. */
  #define START_USE(_vq)						\
  	do {							\
  		if ((_vq)->in_use)				\
9499f5e7e   Rusty Russell   virtio: add names...
59
60
61
  			panic("%s:in_use = %i
  ",		\
  			      (_vq)->vq.name, (_vq)->in_use);	\
c5f841f17   Rusty Russell   virtio: more neat...
62
  		(_vq)->in_use = __LINE__;			\
9499f5e7e   Rusty Russell   virtio: add names...
63
  	} while (0)
3a35ce7dc   Roel Kluin   virtio: fix BAD_R...
64
  #define END_USE(_vq) \
97a545ab6   Rusty Russell   virtio: remove bo...
65
  	do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
0a8a69dd7   Rusty Russell   Virtio helper rou...
66
  #else
9499f5e7e   Rusty Russell   virtio: add names...
67
68
69
70
71
72
  #define BAD_RING(_vq, fmt, args...)				\
  	do {							\
  		dev_err(&_vq->vq.vdev->dev,			\
  			"%s:"fmt, (_vq)->vq.name, ##args);	\
  		(_vq)->broken = true;				\
  	} while (0)
0a8a69dd7   Rusty Russell   Virtio helper rou...
73
74
75
76
77
78
79
80
81
82
  #define START_USE(vq)
  #define END_USE(vq)
  #endif
  
  struct vring_virtqueue
  {
  	struct virtqueue vq;
  
  	/* Actual memory layout for this queue */
  	struct vring vring;
7b21e34fd   Rusty Russell   virtio: harsher b...
83
84
  	/* Can we use weak barriers? */
  	bool weak_barriers;
0a8a69dd7   Rusty Russell   Virtio helper rou...
85
86
  	/* Other side has made a mess, don't try any more. */
  	bool broken;
9fa29b9df   Mark McLoughlin   virtio: indirect ...
87
88
  	/* Host supports indirect buffers */
  	bool indirect;
a5c262c5f   Michael S. Tsirkin   virtio_ring: supp...
89
90
  	/* Host publishes avail event idx */
  	bool event;
0a8a69dd7   Rusty Russell   Virtio helper rou...
91
92
93
94
95
96
97
98
  	/* Number of free buffers */
  	unsigned int num_free;
  	/* Head of free buffer list. */
  	unsigned int free_head;
  	/* Number we've added since last sync. */
  	unsigned int num_added;
  
  	/* Last used index we've seen. */
1bc4953ed   Anthony Liguori   virtio: Fix used_...
99
  	u16 last_used_idx;
0a8a69dd7   Rusty Russell   Virtio helper rou...
100
101
102
103
104
105
106
  
  	/* How to notify other side. FIXME: commonalize hcalls! */
  	void (*notify)(struct virtqueue *vq);
  
  #ifdef DEBUG
  	/* They're supposed to lock for us. */
  	unsigned int in_use;
e93300b1a   Rusty Russell   virtio: add debug...
107
108
109
110
  
  	/* Figure out if their kicks are too delayed. */
  	bool last_add_time_valid;
  	ktime_t last_add_time;
0a8a69dd7   Rusty Russell   Virtio helper rou...
111
112
113
114
115
116
117
  #endif
  
  	/* Tokens for callbacks. */
  	void *data[];
  };
  
  #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
9fa29b9df   Mark McLoughlin   virtio: indirect ...
118
119
120
121
  /* Set up an indirect table of descriptors and add it to the queue. */
  static int vring_add_indirect(struct vring_virtqueue *vq,
  			      struct scatterlist sg[],
  			      unsigned int out,
bbd603efb   Michael S. Tsirkin   virtio: add_buf_gfp
122
123
  			      unsigned int in,
  			      gfp_t gfp)
9fa29b9df   Mark McLoughlin   virtio: indirect ...
124
125
126
127
  {
  	struct vring_desc *desc;
  	unsigned head;
  	int i;
bbd603efb   Michael S. Tsirkin   virtio: add_buf_gfp
128
  	desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp);
9fa29b9df   Mark McLoughlin   virtio: indirect ...
129
  	if (!desc)
686d36378   Michael S. Tsirkin   virtio: return EN...
130
  		return -ENOMEM;
9fa29b9df   Mark McLoughlin   virtio: indirect ...
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
  
  	/* Transfer entries from the sg list into the indirect page */
  	for (i = 0; i < out; i++) {
  		desc[i].flags = VRING_DESC_F_NEXT;
  		desc[i].addr = sg_phys(sg);
  		desc[i].len = sg->length;
  		desc[i].next = i+1;
  		sg++;
  	}
  	for (; i < (out + in); i++) {
  		desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
  		desc[i].addr = sg_phys(sg);
  		desc[i].len = sg->length;
  		desc[i].next = i+1;
  		sg++;
  	}
  
  	/* Last one doesn't continue. */
  	desc[i-1].flags &= ~VRING_DESC_F_NEXT;
  	desc[i-1].next = 0;
  
  	/* We're about to use a buffer */
  	vq->num_free--;
  
  	/* Use a single buffer which doesn't continue */
  	head = vq->free_head;
  	vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT;
  	vq->vring.desc[head].addr = virt_to_phys(desc);
  	vq->vring.desc[head].len = i * sizeof(struct vring_desc);
  
  	/* Update free pointer */
  	vq->free_head = vq->vring.desc[head].next;
  
  	return head;
  }
5dfc17628   Rusty Russell   virtio: document ...
166
  /**
f96fde41f   Rusty Russell   virtio: rename vi...
167
   * virtqueue_add_buf - expose buffer to other end
5dfc17628   Rusty Russell   virtio: document ...
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
   * @vq: the struct virtqueue we're talking about.
   * @sg: the description of the buffer(s).
   * @out_num: the number of sg readable by other side
   * @in_num: the number of sg which are writable (after readable ones)
   * @data: the token identifying the buffer.
   * @gfp: how to do memory allocations (if necessary).
   *
   * Caller must ensure we don't call this with other virtqueue operations
   * at the same time (except where noted).
   *
   * Returns remaining capacity of queue or a negative error
   * (ie. ENOSPC).  Note that it only really makes sense to treat all
   * positive return values as "available": indirect buffers mean that
   * we can put an entire sg[] array inside a single queue entry.
   */
f96fde41f   Rusty Russell   virtio: rename vi...
183
184
185
186
187
188
  int virtqueue_add_buf(struct virtqueue *_vq,
  		      struct scatterlist sg[],
  		      unsigned int out,
  		      unsigned int in,
  		      void *data,
  		      gfp_t gfp)
0a8a69dd7   Rusty Russell   Virtio helper rou...
189
190
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
1fe9b6fef   Michael S. Tsirkin   virtio: fix oops ...
191
192
  	unsigned int i, avail, uninitialized_var(prev);
  	int head;
0a8a69dd7   Rusty Russell   Virtio helper rou...
193

9fa29b9df   Mark McLoughlin   virtio: indirect ...
194
  	START_USE(vq);
0a8a69dd7   Rusty Russell   Virtio helper rou...
195
  	BUG_ON(data == NULL);
9fa29b9df   Mark McLoughlin   virtio: indirect ...
196

e93300b1a   Rusty Russell   virtio: add debug...
197
198
199
200
201
202
203
204
205
206
207
208
  #ifdef DEBUG
  	{
  		ktime_t now = ktime_get();
  
  		/* No kick or get, with .1 second between?  Warn. */
  		if (vq->last_add_time_valid)
  			WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
  					    > 100);
  		vq->last_add_time = now;
  		vq->last_add_time_valid = true;
  	}
  #endif
9fa29b9df   Mark McLoughlin   virtio: indirect ...
209
210
211
  	/* If the host supports indirect descriptor tables, and we have multiple
  	 * buffers, then go indirect. FIXME: tune this threshold */
  	if (vq->indirect && (out + in) > 1 && vq->num_free) {
bbd603efb   Michael S. Tsirkin   virtio: add_buf_gfp
212
  		head = vring_add_indirect(vq, sg, out, in, gfp);
1fe9b6fef   Michael S. Tsirkin   virtio: fix oops ...
213
  		if (likely(head >= 0))
9fa29b9df   Mark McLoughlin   virtio: indirect ...
214
215
  			goto add_head;
  	}
0a8a69dd7   Rusty Russell   Virtio helper rou...
216
217
  	BUG_ON(out + in > vq->vring.num);
  	BUG_ON(out + in == 0);
0a8a69dd7   Rusty Russell   Virtio helper rou...
218
219
220
221
  	if (vq->num_free < out + in) {
  		pr_debug("Can't add buf len %i - avail = %i
  ",
  			 out + in, vq->num_free);
44653eae1   Rusty Russell   virtio: don't alw...
222
223
224
225
226
  		/* FIXME: for historical reasons, we force a notify here if
  		 * there are outgoing parts to the buffer.  Presumably the
  		 * host should service the ring ASAP. */
  		if (out)
  			vq->notify(&vq->vq);
0a8a69dd7   Rusty Russell   Virtio helper rou...
227
228
229
230
231
232
233
234
235
236
  		END_USE(vq);
  		return -ENOSPC;
  	}
  
  	/* We're about to use some buffers from the free list. */
  	vq->num_free -= out + in;
  
  	head = vq->free_head;
  	for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) {
  		vq->vring.desc[i].flags = VRING_DESC_F_NEXT;
15f9c8903   Rusty Russell   virtio: Use the s...
237
  		vq->vring.desc[i].addr = sg_phys(sg);
0a8a69dd7   Rusty Russell   Virtio helper rou...
238
239
240
241
242
243
  		vq->vring.desc[i].len = sg->length;
  		prev = i;
  		sg++;
  	}
  	for (; in; i = vq->vring.desc[i].next, in--) {
  		vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE;
15f9c8903   Rusty Russell   virtio: Use the s...
244
  		vq->vring.desc[i].addr = sg_phys(sg);
0a8a69dd7   Rusty Russell   Virtio helper rou...
245
246
247
248
249
250
251
252
253
  		vq->vring.desc[i].len = sg->length;
  		prev = i;
  		sg++;
  	}
  	/* Last one doesn't continue. */
  	vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT;
  
  	/* Update free pointer */
  	vq->free_head = i;
9fa29b9df   Mark McLoughlin   virtio: indirect ...
254
  add_head:
0a8a69dd7   Rusty Russell   Virtio helper rou...
255
256
257
258
  	/* Set token. */
  	vq->data[head] = data;
  
  	/* Put entry in available array (but don't update avail->idx until they
3b720b8c8   Rusty Russell   virtio: avoid mod...
259
  	 * do sync). */
ee7cd8981   Rusty Russell   virtio: expose ad...
260
  	avail = (vq->vring.avail->idx & (vq->vring.num-1));
0a8a69dd7   Rusty Russell   Virtio helper rou...
261
  	vq->vring.avail->ring[avail] = head;
ee7cd8981   Rusty Russell   virtio: expose ad...
262
263
264
265
266
267
268
269
270
271
  	/* Descriptors and available array need to be set before we expose the
  	 * new available array entries. */
  	virtio_wmb(vq);
  	vq->vring.avail->idx++;
  	vq->num_added++;
  
  	/* This is very unlikely, but theoretically possible.  Kick
  	 * just in case. */
  	if (unlikely(vq->num_added == (1 << 16) - 1))
  		virtqueue_kick(_vq);
0a8a69dd7   Rusty Russell   Virtio helper rou...
272
273
274
  	pr_debug("Added buffer head %i to %p
  ", head, vq);
  	END_USE(vq);
3c1b27d50   Rusty Russell   virtio: make add_...
275

3c1b27d50   Rusty Russell   virtio: make add_...
276
  	return vq->num_free;
0a8a69dd7   Rusty Russell   Virtio helper rou...
277
  }
f96fde41f   Rusty Russell   virtio: rename vi...
278
  EXPORT_SYMBOL_GPL(virtqueue_add_buf);
0a8a69dd7   Rusty Russell   Virtio helper rou...
279

5dfc17628   Rusty Russell   virtio: document ...
280
  /**
41f0377f7   Rusty Russell   virtio: support u...
281
   * virtqueue_kick_prepare - first half of split virtqueue_kick call.
5dfc17628   Rusty Russell   virtio: document ...
282
283
   * @vq: the struct virtqueue
   *
41f0377f7   Rusty Russell   virtio: support u...
284
285
286
   * Instead of virtqueue_kick(), you can do:
   *	if (virtqueue_kick_prepare(vq))
   *		virtqueue_notify(vq);
5dfc17628   Rusty Russell   virtio: document ...
287
   *
41f0377f7   Rusty Russell   virtio: support u...
288
289
   * This is sometimes useful because the virtqueue_kick_prepare() needs
   * to be serialized, but the actual virtqueue_notify() call does not.
5dfc17628   Rusty Russell   virtio: document ...
290
   */
41f0377f7   Rusty Russell   virtio: support u...
291
  bool virtqueue_kick_prepare(struct virtqueue *_vq)
0a8a69dd7   Rusty Russell   Virtio helper rou...
292
293
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
a5c262c5f   Michael S. Tsirkin   virtio_ring: supp...
294
  	u16 new, old;
41f0377f7   Rusty Russell   virtio: support u...
295
  	bool needs_kick;
0a8a69dd7   Rusty Russell   Virtio helper rou...
296
297
298
  	START_USE(vq);
  	/* Descriptors and available array need to be set before we expose the
  	 * new available array entries. */
7b21e34fd   Rusty Russell   virtio: harsher b...
299
  	virtio_wmb(vq);
0a8a69dd7   Rusty Russell   Virtio helper rou...
300

ee7cd8981   Rusty Russell   virtio: expose ad...
301
302
  	old = vq->vring.avail->idx - vq->num_added;
  	new = vq->vring.avail->idx;
0a8a69dd7   Rusty Russell   Virtio helper rou...
303
  	vq->num_added = 0;
e93300b1a   Rusty Russell   virtio: add debug...
304
305
306
307
308
309
310
  #ifdef DEBUG
  	if (vq->last_add_time_valid) {
  		WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
  					      vq->last_add_time)) > 100);
  	}
  	vq->last_add_time_valid = false;
  #endif
41f0377f7   Rusty Russell   virtio: support u...
311
312
313
314
315
316
  	if (vq->event) {
  		needs_kick = vring_need_event(vring_avail_event(&vq->vring),
  					      new, old);
  	} else {
  		needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
  	}
0a8a69dd7   Rusty Russell   Virtio helper rou...
317
  	END_USE(vq);
41f0377f7   Rusty Russell   virtio: support u...
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
  	return needs_kick;
  }
  EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
  
  /**
   * virtqueue_notify - second half of split virtqueue_kick call.
   * @vq: the struct virtqueue
   *
   * This does not need to be serialized.
   */
  void virtqueue_notify(struct virtqueue *_vq)
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
  
  	/* Prod other side to tell it about changes. */
  	vq->notify(_vq);
  }
  EXPORT_SYMBOL_GPL(virtqueue_notify);
  
  /**
   * virtqueue_kick - update after add_buf
   * @vq: the struct virtqueue
   *
   * After one or more virtqueue_add_buf calls, invoke this to kick
   * the other side.
   *
   * Caller must ensure we don't call this with other virtqueue
   * operations at the same time (except where noted).
   */
  void virtqueue_kick(struct virtqueue *vq)
  {
  	if (virtqueue_kick_prepare(vq))
  		virtqueue_notify(vq);
0a8a69dd7   Rusty Russell   Virtio helper rou...
351
  }
7c5e9ed0c   Michael S. Tsirkin   virtio_ring: remo...
352
  EXPORT_SYMBOL_GPL(virtqueue_kick);
0a8a69dd7   Rusty Russell   Virtio helper rou...
353
354
355
356
357
358
359
360
361
362
  
  static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
  {
  	unsigned int i;
  
  	/* Clear data ptr. */
  	vq->data[head] = NULL;
  
  	/* Put back on free list: find end */
  	i = head;
9fa29b9df   Mark McLoughlin   virtio: indirect ...
363
364
365
366
  
  	/* Free the indirect table */
  	if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT)
  		kfree(phys_to_virt(vq->vring.desc[i].addr));
0a8a69dd7   Rusty Russell   Virtio helper rou...
367
368
369
370
371
372
373
374
375
376
  	while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) {
  		i = vq->vring.desc[i].next;
  		vq->num_free++;
  	}
  
  	vq->vring.desc[i].next = vq->free_head;
  	vq->free_head = head;
  	/* Plus final descriptor */
  	vq->num_free++;
  }
0a8a69dd7   Rusty Russell   Virtio helper rou...
377
378
379
380
  static inline bool more_used(const struct vring_virtqueue *vq)
  {
  	return vq->last_used_idx != vq->vring.used->idx;
  }
5dfc17628   Rusty Russell   virtio: document ...
381
382
383
384
385
386
387
388
389
390
391
392
393
394
  /**
   * virtqueue_get_buf - get the next used buffer
   * @vq: the struct virtqueue we're talking about.
   * @len: the length written into the buffer
   *
   * If the driver wrote data into the buffer, @len will be set to the
   * amount written.  This means you don't need to clear the buffer
   * beforehand to ensure there's no data leakage in the case of short
   * writes.
   *
   * Caller must ensure we don't call this with other virtqueue
   * operations at the same time (except where noted).
   *
   * Returns NULL if there are no used buffers, or the "data" token
f96fde41f   Rusty Russell   virtio: rename vi...
395
   * handed to virtqueue_add_buf().
5dfc17628   Rusty Russell   virtio: document ...
396
   */
7c5e9ed0c   Michael S. Tsirkin   virtio_ring: remo...
397
  void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
0a8a69dd7   Rusty Russell   Virtio helper rou...
398
399
400
401
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
  	void *ret;
  	unsigned int i;
3b720b8c8   Rusty Russell   virtio: avoid mod...
402
  	u16 last_used;
0a8a69dd7   Rusty Russell   Virtio helper rou...
403
404
  
  	START_USE(vq);
5ef827526   Rusty Russell   virtio: ignore co...
405
406
407
408
  	if (unlikely(vq->broken)) {
  		END_USE(vq);
  		return NULL;
  	}
0a8a69dd7   Rusty Russell   Virtio helper rou...
409
410
411
412
413
414
  	if (!more_used(vq)) {
  		pr_debug("No more buffers in queue
  ");
  		END_USE(vq);
  		return NULL;
  	}
2d61ba950   Michael S. Tsirkin   virtio: order use...
415
  	/* Only get used array entries after they have been exposed by host. */
7b21e34fd   Rusty Russell   virtio: harsher b...
416
  	virtio_rmb(vq);
2d61ba950   Michael S. Tsirkin   virtio: order use...
417

3b720b8c8   Rusty Russell   virtio: avoid mod...
418
419
420
  	last_used = (vq->last_used_idx & (vq->vring.num - 1));
  	i = vq->vring.used->ring[last_used].id;
  	*len = vq->vring.used->ring[last_used].len;
0a8a69dd7   Rusty Russell   Virtio helper rou...
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
  
  	if (unlikely(i >= vq->vring.num)) {
  		BAD_RING(vq, "id %u out of range
  ", i);
  		return NULL;
  	}
  	if (unlikely(!vq->data[i])) {
  		BAD_RING(vq, "id %u is not a head!
  ", i);
  		return NULL;
  	}
  
  	/* detach_buf clears data, so grab it now. */
  	ret = vq->data[i];
  	detach_buf(vq, i);
  	vq->last_used_idx++;
a5c262c5f   Michael S. Tsirkin   virtio_ring: supp...
437
438
439
440
441
  	/* If we expect an interrupt for the next entry, tell host
  	 * by writing event index and flush out the write before
  	 * the read in the next get_buf call. */
  	if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
  		vring_used_event(&vq->vring) = vq->last_used_idx;
7b21e34fd   Rusty Russell   virtio: harsher b...
442
  		virtio_mb(vq);
a5c262c5f   Michael S. Tsirkin   virtio_ring: supp...
443
  	}
e93300b1a   Rusty Russell   virtio: add debug...
444
445
446
  #ifdef DEBUG
  	vq->last_add_time_valid = false;
  #endif
0a8a69dd7   Rusty Russell   Virtio helper rou...
447
448
449
  	END_USE(vq);
  	return ret;
  }
7c5e9ed0c   Michael S. Tsirkin   virtio_ring: remo...
450
  EXPORT_SYMBOL_GPL(virtqueue_get_buf);
0a8a69dd7   Rusty Russell   Virtio helper rou...
451

5dfc17628   Rusty Russell   virtio: document ...
452
453
454
455
456
457
458
459
460
  /**
   * virtqueue_disable_cb - disable callbacks
   * @vq: the struct virtqueue we're talking about.
   *
   * Note that this is not necessarily synchronous, hence unreliable and only
   * useful as an optimization.
   *
   * Unlike other operations, this need not be serialized.
   */
7c5e9ed0c   Michael S. Tsirkin   virtio_ring: remo...
461
  void virtqueue_disable_cb(struct virtqueue *_vq)
18445c4d5   Rusty Russell   virtio: explicit ...
462
463
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
18445c4d5   Rusty Russell   virtio: explicit ...
464
  	vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
18445c4d5   Rusty Russell   virtio: explicit ...
465
  }
7c5e9ed0c   Michael S. Tsirkin   virtio_ring: remo...
466
  EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
18445c4d5   Rusty Russell   virtio: explicit ...
467

5dfc17628   Rusty Russell   virtio: document ...
468
469
470
471
472
473
474
475
476
477
478
  /**
   * virtqueue_enable_cb - restart callbacks after disable_cb.
   * @vq: the struct virtqueue we're talking about.
   *
   * This re-enables callbacks; it returns "false" if there are pending
   * buffers in the queue, to detect a possible race between the driver
   * checking for more work, and enabling callbacks.
   *
   * Caller must ensure we don't call this with other virtqueue
   * operations at the same time (except where noted).
   */
7c5e9ed0c   Michael S. Tsirkin   virtio_ring: remo...
479
  bool virtqueue_enable_cb(struct virtqueue *_vq)
0a8a69dd7   Rusty Russell   Virtio helper rou...
480
481
482
483
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
  
  	START_USE(vq);
0a8a69dd7   Rusty Russell   Virtio helper rou...
484
485
486
  
  	/* We optimistically turn back on interrupts, then check if there was
  	 * more to do. */
a5c262c5f   Michael S. Tsirkin   virtio_ring: supp...
487
488
489
  	/* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
  	 * either clear the flags bit or point the event index at the next
  	 * entry. Always do both to keep code simple. */
0a8a69dd7   Rusty Russell   Virtio helper rou...
490
  	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
a5c262c5f   Michael S. Tsirkin   virtio_ring: supp...
491
  	vring_used_event(&vq->vring) = vq->last_used_idx;
7b21e34fd   Rusty Russell   virtio: harsher b...
492
  	virtio_mb(vq);
0a8a69dd7   Rusty Russell   Virtio helper rou...
493
  	if (unlikely(more_used(vq))) {
0a8a69dd7   Rusty Russell   Virtio helper rou...
494
495
496
497
498
499
500
  		END_USE(vq);
  		return false;
  	}
  
  	END_USE(vq);
  	return true;
  }
7c5e9ed0c   Michael S. Tsirkin   virtio_ring: remo...
501
  EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
0a8a69dd7   Rusty Russell   Virtio helper rou...
502

5dfc17628   Rusty Russell   virtio: document ...
503
504
505
506
507
508
509
510
511
512
513
514
515
  /**
   * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
   * @vq: the struct virtqueue we're talking about.
   *
   * This re-enables callbacks but hints to the other side to delay
   * interrupts until most of the available buffers have been processed;
   * it returns "false" if there are many pending buffers in the queue,
   * to detect a possible race between the driver checking for more work,
   * and enabling callbacks.
   *
   * Caller must ensure we don't call this with other virtqueue
   * operations at the same time (except where noted).
   */
7ab358c23   Michael S. Tsirkin   virtio: add api f...
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
  bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
  	u16 bufs;
  
  	START_USE(vq);
  
  	/* We optimistically turn back on interrupts, then check if there was
  	 * more to do. */
  	/* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
  	 * either clear the flags bit or point the event index at the next
  	 * entry. Always do both to keep code simple. */
  	vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
  	/* TODO: tune this threshold */
  	bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
  	vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
7b21e34fd   Rusty Russell   virtio: harsher b...
532
  	virtio_mb(vq);
7ab358c23   Michael S. Tsirkin   virtio: add api f...
533
534
535
536
537
538
539
540
541
  	if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
  		END_USE(vq);
  		return false;
  	}
  
  	END_USE(vq);
  	return true;
  }
  EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
5dfc17628   Rusty Russell   virtio: document ...
542
543
544
545
  /**
   * virtqueue_detach_unused_buf - detach first unused buffer
   * @vq: the struct virtqueue we're talking about.
   *
f96fde41f   Rusty Russell   virtio: rename vi...
546
   * Returns NULL or the "data" token handed to virtqueue_add_buf().
5dfc17628   Rusty Russell   virtio: document ...
547
548
549
   * This is not valid on an active queue; it is useful only for device
   * shutdown.
   */
7c5e9ed0c   Michael S. Tsirkin   virtio_ring: remo...
550
  void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
c021eac41   Shirley Ma   virtio: Add abili...
551
552
553
554
555
556
557
558
559
560
561
562
563
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
  	unsigned int i;
  	void *buf;
  
  	START_USE(vq);
  
  	for (i = 0; i < vq->vring.num; i++) {
  		if (!vq->data[i])
  			continue;
  		/* detach_buf clears data, so grab it now. */
  		buf = vq->data[i];
  		detach_buf(vq, i);
b3258ff1d   Amit Shah   virtio: Decrement...
564
  		vq->vring.avail->idx--;
c021eac41   Shirley Ma   virtio: Add abili...
565
566
567
568
569
570
571
572
573
  		END_USE(vq);
  		return buf;
  	}
  	/* That should have freed everything. */
  	BUG_ON(vq->num_free != vq->vring.num);
  
  	END_USE(vq);
  	return NULL;
  }
7c5e9ed0c   Michael S. Tsirkin   virtio_ring: remo...
574
  EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
c021eac41   Shirley Ma   virtio: Add abili...
575

0a8a69dd7   Rusty Russell   Virtio helper rou...
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
  irqreturn_t vring_interrupt(int irq, void *_vq)
  {
  	struct vring_virtqueue *vq = to_vvq(_vq);
  
  	if (!more_used(vq)) {
  		pr_debug("virtqueue interrupt with no work for %p
  ", vq);
  		return IRQ_NONE;
  	}
  
  	if (unlikely(vq->broken))
  		return IRQ_HANDLED;
  
  	pr_debug("virtqueue callback for %p (%p)
  ", vq, vq->vq.callback);
18445c4d5   Rusty Russell   virtio: explicit ...
591
592
  	if (vq->vq.callback)
  		vq->vq.callback(&vq->vq);
0a8a69dd7   Rusty Russell   Virtio helper rou...
593
594
595
  
  	return IRQ_HANDLED;
  }
c6fd47011   Rusty Russell   virtio: Allow vir...
596
  EXPORT_SYMBOL_GPL(vring_interrupt);
0a8a69dd7   Rusty Russell   Virtio helper rou...
597

0a8a69dd7   Rusty Russell   Virtio helper rou...
598
  struct virtqueue *vring_new_virtqueue(unsigned int num,
87c7d57c1   Rusty Russell   virtio: hand virt...
599
  				      unsigned int vring_align,
0a8a69dd7   Rusty Russell   Virtio helper rou...
600
  				      struct virtio_device *vdev,
7b21e34fd   Rusty Russell   virtio: harsher b...
601
  				      bool weak_barriers,
0a8a69dd7   Rusty Russell   Virtio helper rou...
602
603
  				      void *pages,
  				      void (*notify)(struct virtqueue *),
9499f5e7e   Rusty Russell   virtio: add names...
604
605
  				      void (*callback)(struct virtqueue *),
  				      const char *name)
0a8a69dd7   Rusty Russell   Virtio helper rou...
606
607
608
  {
  	struct vring_virtqueue *vq;
  	unsigned int i;
42b36cc0c   Rusty Russell   virtio: Force use...
609
610
611
612
613
614
  	/* We assume num is a power of 2. */
  	if (num & (num - 1)) {
  		dev_warn(&vdev->dev, "Bad virtqueue length %u
  ", num);
  		return NULL;
  	}
0a8a69dd7   Rusty Russell   Virtio helper rou...
615
616
617
  	vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
  	if (!vq)
  		return NULL;
87c7d57c1   Rusty Russell   virtio: hand virt...
618
  	vring_init(&vq->vring, num, pages, vring_align);
0a8a69dd7   Rusty Russell   Virtio helper rou...
619
620
  	vq->vq.callback = callback;
  	vq->vq.vdev = vdev;
9499f5e7e   Rusty Russell   virtio: add names...
621
  	vq->vq.name = name;
0a8a69dd7   Rusty Russell   Virtio helper rou...
622
  	vq->notify = notify;
7b21e34fd   Rusty Russell   virtio: harsher b...
623
  	vq->weak_barriers = weak_barriers;
0a8a69dd7   Rusty Russell   Virtio helper rou...
624
625
626
  	vq->broken = false;
  	vq->last_used_idx = 0;
  	vq->num_added = 0;
9499f5e7e   Rusty Russell   virtio: add names...
627
  	list_add_tail(&vq->vq.list, &vdev->vqs);
0a8a69dd7   Rusty Russell   Virtio helper rou...
628
629
  #ifdef DEBUG
  	vq->in_use = false;
e93300b1a   Rusty Russell   virtio: add debug...
630
  	vq->last_add_time_valid = false;
0a8a69dd7   Rusty Russell   Virtio helper rou...
631
  #endif
9fa29b9df   Mark McLoughlin   virtio: indirect ...
632
  	vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
a5c262c5f   Michael S. Tsirkin   virtio_ring: supp...
633
  	vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
9fa29b9df   Mark McLoughlin   virtio: indirect ...
634

0a8a69dd7   Rusty Russell   Virtio helper rou...
635
636
637
638
639
640
641
  	/* No callback?  Tell other side not to bother us. */
  	if (!callback)
  		vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
  
  	/* Put everything in free lists. */
  	vq->num_free = num;
  	vq->free_head = 0;
3b8706240   Amit Shah   virtio: Initializ...
642
  	for (i = 0; i < num-1; i++) {
0a8a69dd7   Rusty Russell   Virtio helper rou...
643
  		vq->vring.desc[i].next = i+1;
3b8706240   Amit Shah   virtio: Initializ...
644
645
646
  		vq->data[i] = NULL;
  	}
  	vq->data[i] = NULL;
0a8a69dd7   Rusty Russell   Virtio helper rou...
647
648
649
  
  	return &vq->vq;
  }
c6fd47011   Rusty Russell   virtio: Allow vir...
650
  EXPORT_SYMBOL_GPL(vring_new_virtqueue);
0a8a69dd7   Rusty Russell   Virtio helper rou...
651
652
653
  
  void vring_del_virtqueue(struct virtqueue *vq)
  {
9499f5e7e   Rusty Russell   virtio: add names...
654
  	list_del(&vq->list);
0a8a69dd7   Rusty Russell   Virtio helper rou...
655
656
  	kfree(to_vvq(vq));
  }
c6fd47011   Rusty Russell   virtio: Allow vir...
657
  EXPORT_SYMBOL_GPL(vring_del_virtqueue);
0a8a69dd7   Rusty Russell   Virtio helper rou...
658

e34f87256   Rusty Russell   virtio: Add trans...
659
660
661
662
663
664
665
  /* Manipulates transport-specific feature bits. */
  void vring_transport_features(struct virtio_device *vdev)
  {
  	unsigned int i;
  
  	for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
  		switch (i) {
9fa29b9df   Mark McLoughlin   virtio: indirect ...
666
667
  		case VIRTIO_RING_F_INDIRECT_DESC:
  			break;
a5c262c5f   Michael S. Tsirkin   virtio_ring: supp...
668
669
  		case VIRTIO_RING_F_EVENT_IDX:
  			break;
e34f87256   Rusty Russell   virtio: Add trans...
670
671
672
673
674
675
676
  		default:
  			/* We don't understand this bit. */
  			clear_bit(i, vdev->features);
  		}
  	}
  }
  EXPORT_SYMBOL_GPL(vring_transport_features);
5dfc17628   Rusty Russell   virtio: document ...
677
678
679
680
681
682
683
  /**
   * virtqueue_get_vring_size - return the size of the virtqueue's vring
   * @vq: the struct virtqueue containing the vring of interest.
   *
   * Returns the size of the vring.  This is mainly used for boasting to
   * userspace.  Unlike other operations, this need not be serialized.
   */
8f9f4668b   Rick Jones   Add ethtool -g su...
684
685
686
687
688
689
690
691
  unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
  {
  
  	struct vring_virtqueue *vq = to_vvq(_vq);
  
  	return vq->vring.num;
  }
  EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
c6fd47011   Rusty Russell   virtio: Allow vir...
692
  MODULE_LICENSE("GPL");