Blame view

drivers/vhost/net.c 21.5 KB
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
1
2
3
4
5
6
7
8
9
10
11
12
  /* Copyright (C) 2009 Red Hat, Inc.
   * Author: Michael S. Tsirkin <mst@redhat.com>
   *
   * This work is licensed under the terms of the GNU GPL, version 2.
   *
   * virtio-net server in host kernel.
   */
  
  #include <linux/compat.h>
  #include <linux/eventfd.h>
  #include <linux/vhost.h>
  #include <linux/virtio_net.h>
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
13
14
  #include <linux/miscdevice.h>
  #include <linux/module.h>
bab632d69   Michael S. Tsirkin   vhost: vhost TX z...
15
  #include <linux/moduleparam.h>
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
16
17
18
19
  #include <linux/mutex.h>
  #include <linux/workqueue.h>
  #include <linux/rcupdate.h>
  #include <linux/file.h>
5a0e3ad6a   Tejun Heo   include cleanup: ...
20
  #include <linux/slab.h>
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
21
22
23
24
25
  
  #include <linux/net.h>
  #include <linux/if_packet.h>
  #include <linux/if_arp.h>
  #include <linux/if_tun.h>
501c774cb   Arnd Bergmann   net/macvtap: add ...
26
  #include <linux/if_macvlan.h>
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
27
28
29
30
  
  #include <net/sock.h>
  
  #include "vhost.h"
bab632d69   Michael S. Tsirkin   vhost: vhost TX z...
31
32
33
  static int experimental_zcopytx;
  module_param(experimental_zcopytx, int, 0444);
  MODULE_PARM_DESC(experimental_zcopytx, "Enable Experimental Zero Copy TX");
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
34
35
36
  /* Max number of bytes transferred before requeueing the job.
   * Using this limit prevents one virtqueue from starving others. */
  #define VHOST_NET_WEIGHT 0x80000
bab632d69   Michael S. Tsirkin   vhost: vhost TX z...
37
38
39
  /* MAX number of TX used buffers for outstanding zerocopy */
  #define VHOST_MAX_PEND 128
  #define VHOST_GOODCOPY_LEN 256
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
  enum {
  	VHOST_NET_VQ_RX = 0,
  	VHOST_NET_VQ_TX = 1,
  	VHOST_NET_VQ_MAX = 2,
  };
  
  enum vhost_net_poll_state {
  	VHOST_NET_POLL_DISABLED = 0,
  	VHOST_NET_POLL_STARTED = 1,
  	VHOST_NET_POLL_STOPPED = 2,
  };
  
  struct vhost_net {
  	struct vhost_dev dev;
  	struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX];
  	struct vhost_poll poll[VHOST_NET_VQ_MAX];
  	/* Tells us whether we are polling a socket for TX.
  	 * We only do this when socket buffer fills up.
  	 * Protected by tx vq lock. */
  	enum vhost_net_poll_state tx_poll_state;
  };
bab632d69   Michael S. Tsirkin   vhost: vhost TX z...
61
62
63
64
65
  static bool vhost_sock_zcopy(struct socket *sock)
  {
  	return unlikely(experimental_zcopytx) &&
  		sock_flag(sock->sk, SOCK_ZEROCOPY);
  }
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
66
67
68
69
70
71
  /* Pop first len bytes from iovec. Return number of segments used. */
  static int move_iovec_hdr(struct iovec *from, struct iovec *to,
  			  size_t len, int iov_count)
  {
  	int seg = 0;
  	size_t size;
d47effe1b   Krishna Kumar   vhost: Cleanup vh...
72

3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
73
74
75
76
77
78
79
80
81
82
83
84
85
  	while (len && seg < iov_count) {
  		size = min(from->iov_len, len);
  		to->iov_base = from->iov_base;
  		to->iov_len = size;
  		from->iov_len -= size;
  		from->iov_base += size;
  		len -= size;
  		++from;
  		++to;
  		++seg;
  	}
  	return seg;
  }
8dd014adf   David Stevens   vhost-net: mergea...
86
87
88
89
90
91
  /* Copy iovec entries for len bytes from iovec. */
  static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
  			   size_t len, int iovcount)
  {
  	int seg = 0;
  	size_t size;
d47effe1b   Krishna Kumar   vhost: Cleanup vh...
92

8dd014adf   David Stevens   vhost-net: mergea...
93
94
95
96
97
98
99
100
101
102
  	while (len && seg < iovcount) {
  		size = min(from->iov_len, len);
  		to->iov_base = from->iov_base;
  		to->iov_len = size;
  		len -= size;
  		++from;
  		++to;
  		++seg;
  	}
  }
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
  
  /* Caller must have TX VQ lock */
  static void tx_poll_stop(struct vhost_net *net)
  {
  	if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED))
  		return;
  	vhost_poll_stop(net->poll + VHOST_NET_VQ_TX);
  	net->tx_poll_state = VHOST_NET_POLL_STOPPED;
  }
  
  /* Caller must have TX VQ lock */
  static void tx_poll_start(struct vhost_net *net, struct socket *sock)
  {
  	if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
  		return;
  	vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
  	net->tx_poll_state = VHOST_NET_POLL_STARTED;
  }
  
  /* Expects to be always run from workqueue - which acts as
   * read-size critical section for our kind of RCU. */
  static void handle_tx(struct vhost_net *net)
  {
  	struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX];
d5675bd20   Michael S. Tsirkin   vhost: break out ...
127
128
  	unsigned out, in, s;
  	int head;
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
129
130
131
132
133
134
135
136
137
138
139
  	struct msghdr msg = {
  		.msg_name = NULL,
  		.msg_namelen = 0,
  		.msg_control = NULL,
  		.msg_controllen = 0,
  		.msg_iov = vq->iov,
  		.msg_flags = MSG_DONTWAIT,
  	};
  	size_t len, total_len = 0;
  	int err, wmem;
  	size_t hdr_size;
28457ee69   Arnd Bergmann   vhost: add __rcu ...
140
  	struct socket *sock;
bab632d69   Michael S. Tsirkin   vhost: vhost TX z...
141
142
  	struct vhost_ubuf_ref *uninitialized_var(ubufs);
  	bool zcopy;
28457ee69   Arnd Bergmann   vhost: add __rcu ...
143

5e18247b0   Michael S. Tsirkin   vhost: rcu annota...
144
  	/* TODO: check that we are running from vhost_worker? */
11cd1a8b8   Michael S. Tsirkin   vhost/net: fix rc...
145
  	sock = rcu_dereference_check(vq->private_data, 1);
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
146
147
148
149
  	if (!sock)
  		return;
  
  	wmem = atomic_read(&sock->sk->sk_wmem_alloc);
39286fa41   Sridhar Samudrala   vhost-net: restar...
150
151
152
153
  	if (wmem >= sock->sk->sk_sndbuf) {
  		mutex_lock(&vq->mutex);
  		tx_poll_start(net, sock);
  		mutex_unlock(&vq->mutex);
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
154
  		return;
39286fa41   Sridhar Samudrala   vhost-net: restar...
155
  	}
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
156

3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
157
  	mutex_lock(&vq->mutex);
8ea8cf89e   Michael S. Tsirkin   vhost: support ev...
158
  	vhost_disable_notify(&net->dev, vq);
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
159

0e2555721   Michael S. Tsirkin   vhost: fix interr...
160
  	if (wmem < sock->sk->sk_sndbuf / 2)
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
161
  		tx_poll_stop(net);
8dd014adf   David Stevens   vhost-net: mergea...
162
  	hdr_size = vq->vhost_hlen;
bab632d69   Michael S. Tsirkin   vhost: vhost TX z...
163
  	zcopy = vhost_sock_zcopy(sock);
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
164
165
  
  	for (;;) {
bab632d69   Michael S. Tsirkin   vhost: vhost TX z...
166
167
168
  		/* Release DMAs done buffers first */
  		if (zcopy)
  			vhost_zerocopy_signal_used(vq);
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
169
170
171
172
  		head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
  					 ARRAY_SIZE(vq->iov),
  					 &out, &in,
  					 NULL, NULL);
d5675bd20   Michael S. Tsirkin   vhost: break out ...
173
  		/* On error, stop handling until the next kick. */
7b3384fc3   Michael S. Tsirkin   vhost: add unlike...
174
  		if (unlikely(head < 0))
d5675bd20   Michael S. Tsirkin   vhost: break out ...
175
  			break;
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
176
177
  		/* Nothing new?  Wait for eventfd to tell us they refilled. */
  		if (head == vq->num) {
9e380825a   Shirley Ma   vhost: handle wra...
178
  			int num_pends;
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
179
180
181
182
183
184
  			wmem = atomic_read(&sock->sk->sk_wmem_alloc);
  			if (wmem >= sock->sk->sk_sndbuf * 3 / 4) {
  				tx_poll_start(net, sock);
  				set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
  				break;
  			}
9e380825a   Shirley Ma   vhost: handle wra...
185
186
187
188
189
190
191
  			/* If more outstanding DMAs, queue the work.
  			 * Handle upend_idx wrap around
  			 */
  			num_pends = likely(vq->upend_idx >= vq->done_idx) ?
  				    (vq->upend_idx - vq->done_idx) :
  				    (vq->upend_idx + UIO_MAXIOV - vq->done_idx);
  			if (unlikely(num_pends > VHOST_MAX_PEND)) {
bab632d69   Michael S. Tsirkin   vhost: vhost TX z...
192
193
194
195
  				tx_poll_start(net, sock);
  				set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
  				break;
  			}
8ea8cf89e   Michael S. Tsirkin   vhost: support ev...
196
197
  			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
  				vhost_disable_notify(&net->dev, vq);
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
  				continue;
  			}
  			break;
  		}
  		if (in) {
  			vq_err(vq, "Unexpected descriptor format for TX: "
  			       "out %d, int %d
  ", out, in);
  			break;
  		}
  		/* Skip header. TODO: support TSO. */
  		s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out);
  		msg.msg_iovlen = out;
  		len = iov_length(vq->iov, out);
  		/* Sanity check */
  		if (!len) {
  			vq_err(vq, "Unexpected header len for TX: "
  			       "%zd expected %zd
  ",
  			       iov_length(vq->hdr, s), hdr_size);
  			break;
  		}
bab632d69   Michael S. Tsirkin   vhost: vhost TX z...
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
  		/* use msg_control to pass vhost zerocopy ubuf info to skb */
  		if (zcopy) {
  			vq->heads[vq->upend_idx].id = head;
  			if (len < VHOST_GOODCOPY_LEN) {
  				/* copy don't need to wait for DMA done */
  				vq->heads[vq->upend_idx].len =
  							VHOST_DMA_DONE_LEN;
  				msg.msg_control = NULL;
  				msg.msg_controllen = 0;
  				ubufs = NULL;
  			} else {
  				struct ubuf_info *ubuf = &vq->ubuf_info[head];
  
  				vq->heads[vq->upend_idx].len = len;
  				ubuf->callback = vhost_zerocopy_callback;
  				ubuf->arg = vq->ubufs;
  				ubuf->desc = vq->upend_idx;
  				msg.msg_control = ubuf;
  				msg.msg_controllen = sizeof(ubuf);
  				ubufs = vq->ubufs;
  				kref_get(&ubufs->kref);
  			}
  			vq->upend_idx = (vq->upend_idx + 1) % UIO_MAXIOV;
  		}
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
244
245
246
  		/* TODO: Check specific error and bomb out unless ENOBUFS? */
  		err = sock->ops->sendmsg(NULL, sock, &msg, len);
  		if (unlikely(err < 0)) {
bab632d69   Michael S. Tsirkin   vhost: vhost TX z...
247
248
249
250
251
252
  			if (zcopy) {
  				if (ubufs)
  					vhost_ubuf_put(ubufs);
  				vq->upend_idx = ((unsigned)vq->upend_idx - 1) %
  					UIO_MAXIOV;
  			}
8dd014adf   David Stevens   vhost-net: mergea...
253
  			vhost_discard_vq_desc(vq, 1);
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
254
255
256
257
  			tx_poll_start(net, sock);
  			break;
  		}
  		if (err != len)
95c0ec6a9   Michael S. Tsirkin   vhost: avoid pr_e...
258
259
260
  			pr_debug("Truncated TX packet: "
  				 " len %d != %zd
  ", err, len);
bab632d69   Michael S. Tsirkin   vhost: vhost TX z...
261
262
  		if (!zcopy)
  			vhost_add_used_and_signal(&net->dev, vq, head, 0);
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
263
264
265
266
267
268
269
270
  		total_len += len;
  		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
  			vhost_poll_queue(&vq->poll);
  			break;
  		}
  	}
  
  	mutex_unlock(&vq->mutex);
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
271
  }
8dd014adf   David Stevens   vhost-net: mergea...
272
273
274
275
  static int peek_head_len(struct sock *sk)
  {
  	struct sk_buff *head;
  	int len = 0;
783e39885   Jason Wang   vhost: lock recei...
276
  	unsigned long flags;
8dd014adf   David Stevens   vhost-net: mergea...
277

783e39885   Jason Wang   vhost: lock recei...
278
  	spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
8dd014adf   David Stevens   vhost-net: mergea...
279
  	head = skb_peek(&sk->sk_receive_queue);
783e39885   Jason Wang   vhost: lock recei...
280
  	if (likely(head))
8dd014adf   David Stevens   vhost-net: mergea...
281
  		len = head->len;
783e39885   Jason Wang   vhost: lock recei...
282
  	spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
8dd014adf   David Stevens   vhost-net: mergea...
283
284
285
286
287
288
289
290
291
292
  	return len;
  }
  
  /* This is a multi-buffer version of vhost_get_desc, that works if
   *	vq has read descriptors only.
   * @vq		- the relevant virtqueue
   * @datalen	- data length we'll be reading
   * @iovcount	- returned count of io vectors we fill
   * @log		- vhost log
   * @log_num	- log offset
94249369e   Jason Wang   vhost-net: Unify ...
293
   * @quota       - headcount quota, 1 for big buffer
8dd014adf   David Stevens   vhost-net: mergea...
294
295
296
297
298
299
300
   *	returns number of buffer heads allocated, negative on error
   */
  static int get_rx_bufs(struct vhost_virtqueue *vq,
  		       struct vring_used_elem *heads,
  		       int datalen,
  		       unsigned *iovcount,
  		       struct vhost_log *log,
94249369e   Jason Wang   vhost-net: Unify ...
301
302
  		       unsigned *log_num,
  		       unsigned int quota)
8dd014adf   David Stevens   vhost-net: mergea...
303
304
305
306
307
308
  {
  	unsigned int out, in;
  	int seg = 0;
  	int headcount = 0;
  	unsigned d;
  	int r, nlogs = 0;
94249369e   Jason Wang   vhost-net: Unify ...
309
  	while (datalen > 0 && headcount < quota) {
e0e9b4064   Jason Wang   vhost: max s/g to...
310
  		if (unlikely(seg >= UIO_MAXIOV)) {
8dd014adf   David Stevens   vhost-net: mergea...
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
  			r = -ENOBUFS;
  			goto err;
  		}
  		d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
  				      ARRAY_SIZE(vq->iov) - seg, &out,
  				      &in, log, log_num);
  		if (d == vq->num) {
  			r = 0;
  			goto err;
  		}
  		if (unlikely(out || in <= 0)) {
  			vq_err(vq, "unexpected descriptor format for RX: "
  				"out %d, in %d
  ", out, in);
  			r = -EINVAL;
  			goto err;
  		}
  		if (unlikely(log)) {
  			nlogs += *log_num;
  			log += *log_num;
  		}
  		heads[headcount].id = d;
  		heads[headcount].len = iov_length(vq->iov + seg, in);
  		datalen -= heads[headcount].len;
  		++headcount;
  		seg += in;
  	}
  	heads[headcount - 1].len += datalen;
  	*iovcount = seg;
  	if (unlikely(log))
  		*log_num = nlogs;
  	return headcount;
  err:
  	vhost_discard_vq_desc(vq, headcount);
  	return r;
  }
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
347
348
  /* Expects to be always run from workqueue - which acts as
   * read-size critical section for our kind of RCU. */
94249369e   Jason Wang   vhost-net: Unify ...
349
  static void handle_rx(struct vhost_net *net)
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
350
  {
8dd014adf   David Stevens   vhost-net: mergea...
351
352
353
354
355
356
357
358
359
360
361
  	struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
  	unsigned uninitialized_var(in), log;
  	struct vhost_log *vq_log;
  	struct msghdr msg = {
  		.msg_name = NULL,
  		.msg_namelen = 0,
  		.msg_control = NULL, /* FIXME: get and handle RX aux data. */
  		.msg_controllen = 0,
  		.msg_iov = vq->iov,
  		.msg_flags = MSG_DONTWAIT,
  	};
8dd014adf   David Stevens   vhost-net: mergea...
362
363
364
365
  	struct virtio_net_hdr_mrg_rxbuf hdr = {
  		.hdr.flags = 0,
  		.hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
  	};
8dd014adf   David Stevens   vhost-net: mergea...
366
  	size_t total_len = 0;
cfbdab951   Jason Wang   vhost-net: check ...
367
  	int err, headcount, mergeable;
8dd014adf   David Stevens   vhost-net: mergea...
368
369
  	size_t vhost_hlen, sock_hlen;
  	size_t vhost_len, sock_len;
5e18247b0   Michael S. Tsirkin   vhost: rcu annota...
370
371
  	/* TODO: check that we are running from vhost_worker? */
  	struct socket *sock = rcu_dereference_check(vq->private_data, 1);
d47effe1b   Krishna Kumar   vhost: Cleanup vh...
372

de4d768a4   Michael S. Tsirkin   vhost-net: remove...
373
  	if (!sock)
8dd014adf   David Stevens   vhost-net: mergea...
374
  		return;
8dd014adf   David Stevens   vhost-net: mergea...
375
  	mutex_lock(&vq->mutex);
8ea8cf89e   Michael S. Tsirkin   vhost: support ev...
376
  	vhost_disable_notify(&net->dev, vq);
8dd014adf   David Stevens   vhost-net: mergea...
377
378
379
380
381
  	vhost_hlen = vq->vhost_hlen;
  	sock_hlen = vq->sock_hlen;
  
  	vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
  		vq->log : NULL;
cfbdab951   Jason Wang   vhost-net: check ...
382
  	mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF);
8dd014adf   David Stevens   vhost-net: mergea...
383
384
385
386
387
  
  	while ((sock_len = peek_head_len(sock->sk))) {
  		sock_len += sock_hlen;
  		vhost_len = sock_len + vhost_hlen;
  		headcount = get_rx_bufs(vq, vq->heads, vhost_len,
94249369e   Jason Wang   vhost-net: Unify ...
388
389
  					&in, vq_log, &log,
  					likely(mergeable) ? UIO_MAXIOV : 1);
8dd014adf   David Stevens   vhost-net: mergea...
390
391
392
393
394
  		/* On error, stop handling until the next kick. */
  		if (unlikely(headcount < 0))
  			break;
  		/* OK, now we need to know about added descriptors. */
  		if (!headcount) {
8ea8cf89e   Michael S. Tsirkin   vhost: support ev...
395
  			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
8dd014adf   David Stevens   vhost-net: mergea...
396
397
  				/* They have slipped one in as we were
  				 * doing that: check again. */
8ea8cf89e   Michael S. Tsirkin   vhost: support ev...
398
  				vhost_disable_notify(&net->dev, vq);
8dd014adf   David Stevens   vhost-net: mergea...
399
400
401
402
403
404
405
406
407
408
409
410
  				continue;
  			}
  			/* Nothing new?  Wait for eventfd to tell us
  			 * they refilled. */
  			break;
  		}
  		/* We don't need to be notified again. */
  		if (unlikely((vhost_hlen)))
  			/* Skip header. TODO: support TSO. */
  			move_iovec_hdr(vq->iov, vq->hdr, vhost_hlen, in);
  		else
  			/* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
a290aec88   Jason Wang   vhost: fix typos ...
411
  			 * needed because recvmsg can modify msg_iov. */
8dd014adf   David Stevens   vhost-net: mergea...
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
  			copy_iovec_hdr(vq->iov, vq->hdr, sock_hlen, in);
  		msg.msg_iovlen = in;
  		err = sock->ops->recvmsg(NULL, sock, &msg,
  					 sock_len, MSG_DONTWAIT | MSG_TRUNC);
  		/* Userspace might have consumed the packet meanwhile:
  		 * it's not supposed to do this usually, but might be hard
  		 * to prevent. Discard data we got (if any) and keep going. */
  		if (unlikely(err != sock_len)) {
  			pr_debug("Discarded rx packet: "
  				 " len %d, expected %zd
  ", err, sock_len);
  			vhost_discard_vq_desc(vq, headcount);
  			continue;
  		}
  		if (unlikely(vhost_hlen) &&
  		    memcpy_toiovecend(vq->hdr, (unsigned char *)&hdr, 0,
  				      vhost_hlen)) {
  			vq_err(vq, "Unable to write vnet_hdr at addr %p
  ",
  			       vq->iov->iov_base);
  			break;
  		}
  		/* TODO: Should check and handle checksum. */
cfbdab951   Jason Wang   vhost-net: check ...
435
  		if (likely(mergeable) &&
8dd014adf   David Stevens   vhost-net: mergea...
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
  		    memcpy_toiovecend(vq->hdr, (unsigned char *)&headcount,
  				      offsetof(typeof(hdr), num_buffers),
  				      sizeof hdr.num_buffers)) {
  			vq_err(vq, "Failed num_buffers write");
  			vhost_discard_vq_desc(vq, headcount);
  			break;
  		}
  		vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
  					    headcount);
  		if (unlikely(vq_log))
  			vhost_log_write(vq, vq_log, log, vhost_len);
  		total_len += vhost_len;
  		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
  			vhost_poll_queue(&vq->poll);
  			break;
  		}
  	}
  
  	mutex_unlock(&vq->mutex);
8dd014adf   David Stevens   vhost-net: mergea...
455
  }
c23f3445e   Tejun Heo   vhost: replace vh...
456
  static void handle_tx_kick(struct vhost_work *work)
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
457
  {
c23f3445e   Tejun Heo   vhost: replace vh...
458
459
460
  	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
  						  poll.work);
  	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
461
462
  	handle_tx(net);
  }
c23f3445e   Tejun Heo   vhost: replace vh...
463
  static void handle_rx_kick(struct vhost_work *work)
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
464
  {
c23f3445e   Tejun Heo   vhost: replace vh...
465
466
467
  	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
  						  poll.work);
  	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
468
469
  	handle_rx(net);
  }
c23f3445e   Tejun Heo   vhost: replace vh...
470
  static void handle_tx_net(struct vhost_work *work)
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
471
  {
c23f3445e   Tejun Heo   vhost: replace vh...
472
473
  	struct vhost_net *net = container_of(work, struct vhost_net,
  					     poll[VHOST_NET_VQ_TX].work);
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
474
475
  	handle_tx(net);
  }
c23f3445e   Tejun Heo   vhost: replace vh...
476
  static void handle_rx_net(struct vhost_work *work)
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
477
  {
c23f3445e   Tejun Heo   vhost: replace vh...
478
479
  	struct vhost_net *net = container_of(work, struct vhost_net,
  					     poll[VHOST_NET_VQ_RX].work);
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
480
481
482
483
484
485
  	handle_rx(net);
  }
  
  static int vhost_net_open(struct inode *inode, struct file *f)
  {
  	struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
c23f3445e   Tejun Heo   vhost: replace vh...
486
  	struct vhost_dev *dev;
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
487
  	int r;
c23f3445e   Tejun Heo   vhost: replace vh...
488

3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
489
490
  	if (!n)
  		return -ENOMEM;
c23f3445e   Tejun Heo   vhost: replace vh...
491
492
  
  	dev = &n->dev;
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
493
494
  	n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick;
  	n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick;
c23f3445e   Tejun Heo   vhost: replace vh...
495
  	r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX);
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
496
497
498
499
  	if (r < 0) {
  		kfree(n);
  		return r;
  	}
c23f3445e   Tejun Heo   vhost: replace vh...
500
501
  	vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
  	vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
  	n->tx_poll_state = VHOST_NET_POLL_DISABLED;
  
  	f->private_data = n;
  
  	return 0;
  }
  
  static void vhost_net_disable_vq(struct vhost_net *n,
  				 struct vhost_virtqueue *vq)
  {
  	if (!vq->private_data)
  		return;
  	if (vq == n->vqs + VHOST_NET_VQ_TX) {
  		tx_poll_stop(n);
  		n->tx_poll_state = VHOST_NET_POLL_DISABLED;
  	} else
  		vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
  }
  
  static void vhost_net_enable_vq(struct vhost_net *n,
  				struct vhost_virtqueue *vq)
  {
28457ee69   Arnd Bergmann   vhost: add __rcu ...
524
525
526
527
  	struct socket *sock;
  
  	sock = rcu_dereference_protected(vq->private_data,
  					 lockdep_is_held(&vq->mutex));
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
  	if (!sock)
  		return;
  	if (vq == n->vqs + VHOST_NET_VQ_TX) {
  		n->tx_poll_state = VHOST_NET_POLL_STOPPED;
  		tx_poll_start(n, sock);
  	} else
  		vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
  }
  
  static struct socket *vhost_net_stop_vq(struct vhost_net *n,
  					struct vhost_virtqueue *vq)
  {
  	struct socket *sock;
  
  	mutex_lock(&vq->mutex);
28457ee69   Arnd Bergmann   vhost: add __rcu ...
543
544
  	sock = rcu_dereference_protected(vq->private_data,
  					 lockdep_is_held(&vq->mutex));
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
  	vhost_net_disable_vq(n, vq);
  	rcu_assign_pointer(vq->private_data, NULL);
  	mutex_unlock(&vq->mutex);
  	return sock;
  }
  
  static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
  			   struct socket **rx_sock)
  {
  	*tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX);
  	*rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX);
  }
  
  static void vhost_net_flush_vq(struct vhost_net *n, int index)
  {
  	vhost_poll_flush(n->poll + index);
  	vhost_poll_flush(&n->dev.vqs[index].poll);
  }
  
  static void vhost_net_flush(struct vhost_net *n)
  {
  	vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
  	vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
  }
  
  static int vhost_net_release(struct inode *inode, struct file *f)
  {
  	struct vhost_net *n = f->private_data;
  	struct socket *tx_sock;
  	struct socket *rx_sock;
  
  	vhost_net_stop(n, &tx_sock, &rx_sock);
  	vhost_net_flush(n);
  	vhost_dev_cleanup(&n->dev);
  	if (tx_sock)
  		fput(tx_sock->file);
  	if (rx_sock)
  		fput(rx_sock->file);
  	/* We do an extra flush before freeing memory,
  	 * since jobs can re-queue themselves. */
  	vhost_net_flush(n);
  	kfree(n);
  	return 0;
  }
  
  static struct socket *get_raw_socket(int fd)
  {
  	struct {
  		struct sockaddr_ll sa;
  		char  buf[MAX_ADDR_LEN];
  	} uaddr;
  	int uaddr_len = sizeof uaddr, r;
  	struct socket *sock = sockfd_lookup(fd, &r);
d47effe1b   Krishna Kumar   vhost: Cleanup vh...
598

3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
  	if (!sock)
  		return ERR_PTR(-ENOTSOCK);
  
  	/* Parameter checking */
  	if (sock->sk->sk_type != SOCK_RAW) {
  		r = -ESOCKTNOSUPPORT;
  		goto err;
  	}
  
  	r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
  			       &uaddr_len, 0);
  	if (r)
  		goto err;
  
  	if (uaddr.sa.sll_family != AF_PACKET) {
  		r = -EPFNOSUPPORT;
  		goto err;
  	}
  	return sock;
  err:
  	fput(sock->file);
  	return ERR_PTR(r);
  }
501c774cb   Arnd Bergmann   net/macvtap: add ...
622
  static struct socket *get_tap_socket(int fd)
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
623
624
625
  {
  	struct file *file = fget(fd);
  	struct socket *sock;
d47effe1b   Krishna Kumar   vhost: Cleanup vh...
626

3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
627
628
629
  	if (!file)
  		return ERR_PTR(-EBADF);
  	sock = tun_get_socket(file);
501c774cb   Arnd Bergmann   net/macvtap: add ...
630
631
632
  	if (!IS_ERR(sock))
  		return sock;
  	sock = macvtap_get_socket(file);
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
633
634
635
636
637
638
639
640
  	if (IS_ERR(sock))
  		fput(file);
  	return sock;
  }
  
  static struct socket *get_socket(int fd)
  {
  	struct socket *sock;
d47effe1b   Krishna Kumar   vhost: Cleanup vh...
641

3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
642
643
644
645
646
647
  	/* special case to disable backend */
  	if (fd == -1)
  		return NULL;
  	sock = get_raw_socket(fd);
  	if (!IS_ERR(sock))
  		return sock;
501c774cb   Arnd Bergmann   net/macvtap: add ...
648
  	sock = get_tap_socket(fd);
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
649
650
651
652
653
654
655
656
657
  	if (!IS_ERR(sock))
  		return sock;
  	return ERR_PTR(-ENOTSOCK);
  }
  
  static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
  {
  	struct socket *sock, *oldsock;
  	struct vhost_virtqueue *vq;
bab632d69   Michael S. Tsirkin   vhost: vhost TX z...
658
  	struct vhost_ubuf_ref *ubufs, *oldubufs = NULL;
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
  	int r;
  
  	mutex_lock(&n->dev.mutex);
  	r = vhost_dev_check_owner(&n->dev);
  	if (r)
  		goto err;
  
  	if (index >= VHOST_NET_VQ_MAX) {
  		r = -ENOBUFS;
  		goto err;
  	}
  	vq = n->vqs + index;
  	mutex_lock(&vq->mutex);
  
  	/* Verify that ring has been setup correctly. */
  	if (!vhost_vq_access_ok(vq)) {
  		r = -EFAULT;
1dace8c80   Jeff Dike   vhost: fix error ...
676
  		goto err_vq;
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
677
678
679
680
  	}
  	sock = get_socket(fd);
  	if (IS_ERR(sock)) {
  		r = PTR_ERR(sock);
1dace8c80   Jeff Dike   vhost: fix error ...
681
  		goto err_vq;
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
682
683
684
  	}
  
  	/* start polling new socket */
28457ee69   Arnd Bergmann   vhost: add __rcu ...
685
686
  	oldsock = rcu_dereference_protected(vq->private_data,
  					    lockdep_is_held(&vq->mutex));
11fe88393   David S. Miller   Merge branch 'mas...
687
  	if (sock != oldsock) {
bab632d69   Michael S. Tsirkin   vhost: vhost TX z...
688
689
690
691
692
693
694
  		ubufs = vhost_ubuf_alloc(vq, sock && vhost_sock_zcopy(sock));
  		if (IS_ERR(ubufs)) {
  			r = PTR_ERR(ubufs);
  			goto err_ubufs;
  		}
  		oldubufs = vq->ubufs;
  		vq->ubufs = ubufs;
d47effe1b   Krishna Kumar   vhost: Cleanup vh...
695
696
697
  		vhost_net_disable_vq(n, vq);
  		rcu_assign_pointer(vq->private_data, sock);
  		vhost_net_enable_vq(n, vq);
f59281daf   Jason Wang   vhost: init used ...
698
699
700
701
  
  		r = vhost_init_used(vq);
  		if (r)
  			goto err_vq;
dd1f4078f   Jeff Dike   vhost-net: minor ...
702
  	}
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
703

1680e9063   Michael S. Tsirkin   vhost-net: avoid ...
704
  	mutex_unlock(&vq->mutex);
c047e5f31   Michael S. Tsirkin   vhost-net: update...
705
  	if (oldubufs) {
bab632d69   Michael S. Tsirkin   vhost: vhost TX z...
706
  		vhost_ubuf_put_and_wait(oldubufs);
c047e5f31   Michael S. Tsirkin   vhost-net: update...
707
708
709
710
  		mutex_lock(&vq->mutex);
  		vhost_zerocopy_signal_used(vq);
  		mutex_unlock(&vq->mutex);
  	}
bab632d69   Michael S. Tsirkin   vhost: vhost TX z...
711

3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
712
713
714
715
  	if (oldsock) {
  		vhost_net_flush_vq(n, index);
  		fput(oldsock->file);
  	}
1dace8c80   Jeff Dike   vhost: fix error ...
716

1680e9063   Michael S. Tsirkin   vhost-net: avoid ...
717
718
  	mutex_unlock(&n->dev.mutex);
  	return 0;
bab632d69   Michael S. Tsirkin   vhost: vhost TX z...
719
720
  err_ubufs:
  	fput(sock->file);
1dace8c80   Jeff Dike   vhost: fix error ...
721
722
  err_vq:
  	mutex_unlock(&vq->mutex);
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
723
724
725
726
727
728
729
730
731
732
  err:
  	mutex_unlock(&n->dev.mutex);
  	return r;
  }
  
  static long vhost_net_reset_owner(struct vhost_net *n)
  {
  	struct socket *tx_sock = NULL;
  	struct socket *rx_sock = NULL;
  	long err;
d47effe1b   Krishna Kumar   vhost: Cleanup vh...
733

3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
  	mutex_lock(&n->dev.mutex);
  	err = vhost_dev_check_owner(&n->dev);
  	if (err)
  		goto done;
  	vhost_net_stop(n, &tx_sock, &rx_sock);
  	vhost_net_flush(n);
  	err = vhost_dev_reset_owner(&n->dev);
  done:
  	mutex_unlock(&n->dev.mutex);
  	if (tx_sock)
  		fput(tx_sock->file);
  	if (rx_sock)
  		fput(rx_sock->file);
  	return err;
  }
  
  static int vhost_net_set_features(struct vhost_net *n, u64 features)
  {
8dd014adf   David Stevens   vhost-net: mergea...
752
  	size_t vhost_hlen, sock_hlen, hdr_len;
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
753
  	int i;
8dd014adf   David Stevens   vhost-net: mergea...
754
755
756
757
758
759
760
761
762
763
764
765
766
  
  	hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ?
  			sizeof(struct virtio_net_hdr_mrg_rxbuf) :
  			sizeof(struct virtio_net_hdr);
  	if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
  		/* vhost provides vnet_hdr */
  		vhost_hlen = hdr_len;
  		sock_hlen = 0;
  	} else {
  		/* socket provides vnet_hdr */
  		vhost_hlen = 0;
  		sock_hlen = hdr_len;
  	}
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
767
768
769
770
771
772
773
774
775
776
  	mutex_lock(&n->dev.mutex);
  	if ((features & (1 << VHOST_F_LOG_ALL)) &&
  	    !vhost_log_access_ok(&n->dev)) {
  		mutex_unlock(&n->dev.mutex);
  		return -EFAULT;
  	}
  	n->dev.acked_features = features;
  	smp_wmb();
  	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
  		mutex_lock(&n->vqs[i].mutex);
8dd014adf   David Stevens   vhost-net: mergea...
777
778
  		n->vqs[i].vhost_hlen = vhost_hlen;
  		n->vqs[i].sock_hlen = sock_hlen;
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
  		mutex_unlock(&n->vqs[i].mutex);
  	}
  	vhost_net_flush(n);
  	mutex_unlock(&n->dev.mutex);
  	return 0;
  }
  
  static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
  			    unsigned long arg)
  {
  	struct vhost_net *n = f->private_data;
  	void __user *argp = (void __user *)arg;
  	u64 __user *featurep = argp;
  	struct vhost_vring_file backend;
  	u64 features;
  	int r;
d47effe1b   Krishna Kumar   vhost: Cleanup vh...
795

3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
796
797
  	switch (ioctl) {
  	case VHOST_NET_SET_BACKEND:
d3553a524   Takuya Yoshikawa   vhost-net: fix to...
798
799
  		if (copy_from_user(&backend, argp, sizeof backend))
  			return -EFAULT;
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
800
801
802
  		return vhost_net_set_backend(n, backend.index, backend.fd);
  	case VHOST_GET_FEATURES:
  		features = VHOST_FEATURES;
d3553a524   Takuya Yoshikawa   vhost-net: fix to...
803
804
805
  		if (copy_to_user(featurep, &features, sizeof features))
  			return -EFAULT;
  		return 0;
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
806
  	case VHOST_SET_FEATURES:
d3553a524   Takuya Yoshikawa   vhost-net: fix to...
807
808
  		if (copy_from_user(&features, featurep, sizeof features))
  			return -EFAULT;
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
  		if (features & ~VHOST_FEATURES)
  			return -EOPNOTSUPP;
  		return vhost_net_set_features(n, features);
  	case VHOST_RESET_OWNER:
  		return vhost_net_reset_owner(n);
  	default:
  		mutex_lock(&n->dev.mutex);
  		r = vhost_dev_ioctl(&n->dev, ioctl, arg);
  		vhost_net_flush(n);
  		mutex_unlock(&n->dev.mutex);
  		return r;
  	}
  }
  
  #ifdef CONFIG_COMPAT
  static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
  				   unsigned long arg)
  {
  	return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
  }
  #endif
373a83a69   Tobias Klauser   vhost: Storage cl...
830
  static const struct file_operations vhost_net_fops = {
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
831
832
833
834
835
836
837
  	.owner          = THIS_MODULE,
  	.release        = vhost_net_release,
  	.unlocked_ioctl = vhost_net_ioctl,
  #ifdef CONFIG_COMPAT
  	.compat_ioctl   = vhost_net_compat_ioctl,
  #endif
  	.open           = vhost_net_open,
6038f373a   Arnd Bergmann   llseek: automatic...
838
  	.llseek		= noop_llseek,
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
839
840
841
  };
  
  static struct miscdevice vhost_net_misc = {
79907d89c   Alan Cox   misc: Fix allocat...
842
  	MISC_DYNAMIC_MINOR,
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
843
844
845
  	"vhost-net",
  	&vhost_net_fops,
  };
a8d3782f9   Christoph Hellwig   vhost: fix sparse...
846
  static int vhost_net_init(void)
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
847
  {
bab632d69   Michael S. Tsirkin   vhost: vhost TX z...
848
849
  	if (experimental_zcopytx)
  		vhost_enable_zcopy(VHOST_NET_VQ_TX);
c23f3445e   Tejun Heo   vhost: replace vh...
850
  	return misc_register(&vhost_net_misc);
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
851
852
  }
  module_init(vhost_net_init);
a8d3782f9   Christoph Hellwig   vhost: fix sparse...
853
  static void vhost_net_exit(void)
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
854
855
  {
  	misc_deregister(&vhost_net_misc);
3a4d5c94e   Michael S. Tsirkin   vhost_net: a kern...
856
857
858
859
860
861
862
  }
  module_exit(vhost_net_exit);
  
  MODULE_VERSION("0.0.1");
  MODULE_LICENSE("GPL v2");
  MODULE_AUTHOR("Michael S. Tsirkin");
  MODULE_DESCRIPTION("Host kernel accelerator for virtio net");