Commit 94249369e9930276e30087da205349a55478cbb5
Committed by
Michael S. Tsirkin
1 parent
cfbdab9513
Exists in
master
and in
20 other branches
vhost-net: Unify the code of mergeable and big buffer handling
Codes duplication were found between the handling of mergeable and big buffers, so this patch tries to unify them. This could be easily done by adding a quota to the get_rx_bufs() which is used to limit the number of buffers it returns (for mergeable buffer, the quota is simply UIO_MAXIOV, for big buffers, the quota is just 1), and then the previous handle_rx_mergeable() could be resued also for big buffers. Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Showing 1 changed file with 7 additions and 121 deletions Side-by-side Diff
drivers/vhost/net.c
... | ... | @@ -229,6 +229,7 @@ |
229 | 229 | * @iovcount - returned count of io vectors we fill |
230 | 230 | * @log - vhost log |
231 | 231 | * @log_num - log offset |
232 | + * @quota - headcount quota, 1 for big buffer | |
232 | 233 | * returns number of buffer heads allocated, negative on error |
233 | 234 | */ |
234 | 235 | static int get_rx_bufs(struct vhost_virtqueue *vq, |
... | ... | @@ -236,7 +237,8 @@ |
236 | 237 | int datalen, |
237 | 238 | unsigned *iovcount, |
238 | 239 | struct vhost_log *log, |
239 | - unsigned *log_num) | |
240 | + unsigned *log_num, | |
241 | + unsigned int quota) | |
240 | 242 | { |
241 | 243 | unsigned int out, in; |
242 | 244 | int seg = 0; |
... | ... | @@ -244,7 +246,7 @@ |
244 | 246 | unsigned d; |
245 | 247 | int r, nlogs = 0; |
246 | 248 | |
247 | - while (datalen > 0) { | |
249 | + while (datalen > 0 && headcount < quota) { | |
248 | 250 | if (unlikely(seg >= UIO_MAXIOV)) { |
249 | 251 | r = -ENOBUFS; |
250 | 252 | goto err; |
251 | 253 | |
... | ... | @@ -284,118 +286,9 @@ |
284 | 286 | |
285 | 287 | /* Expects to be always run from workqueue - which acts as |
286 | 288 | * read-size critical section for our kind of RCU. */ |
287 | -static void handle_rx_big(struct vhost_net *net) | |
289 | +static void handle_rx(struct vhost_net *net) | |
288 | 290 | { |
289 | 291 | struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX]; |
290 | - unsigned out, in, log, s; | |
291 | - int head; | |
292 | - struct vhost_log *vq_log; | |
293 | - struct msghdr msg = { | |
294 | - .msg_name = NULL, | |
295 | - .msg_namelen = 0, | |
296 | - .msg_control = NULL, /* FIXME: get and handle RX aux data. */ | |
297 | - .msg_controllen = 0, | |
298 | - .msg_iov = vq->iov, | |
299 | - .msg_flags = MSG_DONTWAIT, | |
300 | - }; | |
301 | - struct virtio_net_hdr hdr = { | |
302 | - .flags = 0, | |
303 | - .gso_type = VIRTIO_NET_HDR_GSO_NONE | |
304 | - }; | |
305 | - size_t len, total_len = 0; | |
306 | - int err; | |
307 | - size_t hdr_size; | |
308 | - /* TODO: check that we are running from vhost_worker? */ | |
309 | - struct socket *sock = rcu_dereference_check(vq->private_data, 1); | |
310 | - | |
311 | - if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue)) | |
312 | - return; | |
313 | - | |
314 | - mutex_lock(&vq->mutex); | |
315 | - vhost_disable_notify(vq); | |
316 | - hdr_size = vq->vhost_hlen; | |
317 | - | |
318 | - vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ? | |
319 | - vq->log : NULL; | |
320 | - | |
321 | - for (;;) { | |
322 | - head = vhost_get_vq_desc(&net->dev, vq, vq->iov, | |
323 | - ARRAY_SIZE(vq->iov), | |
324 | - &out, &in, | |
325 | - vq_log, &log); | |
326 | - /* On error, stop handling until the next kick. */ | |
327 | - if (unlikely(head < 0)) | |
328 | - break; | |
329 | - /* OK, now we need to know about added descriptors. */ | |
330 | - if (head == vq->num) { | |
331 | - if (unlikely(vhost_enable_notify(vq))) { | |
332 | - /* They have slipped one in as we were | |
333 | - * doing that: check again. */ | |
334 | - vhost_disable_notify(vq); | |
335 | - continue; | |
336 | - } | |
337 | - /* Nothing new? Wait for eventfd to tell us | |
338 | - * they refilled. */ | |
339 | - break; | |
340 | - } | |
341 | - /* We don't need to be notified again. */ | |
342 | - if (out) { | |
343 | - vq_err(vq, "Unexpected descriptor format for RX: " | |
344 | - "out %d, int %d\n", | |
345 | - out, in); | |
346 | - break; | |
347 | - } | |
348 | - /* Skip header. TODO: support TSO/mergeable rx buffers. */ | |
349 | - s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, in); | |
350 | - msg.msg_iovlen = in; | |
351 | - len = iov_length(vq->iov, in); | |
352 | - /* Sanity check */ | |
353 | - if (!len) { | |
354 | - vq_err(vq, "Unexpected header len for RX: " | |
355 | - "%zd expected %zd\n", | |
356 | - iov_length(vq->hdr, s), hdr_size); | |
357 | - break; | |
358 | - } | |
359 | - err = sock->ops->recvmsg(NULL, sock, &msg, | |
360 | - len, MSG_DONTWAIT | MSG_TRUNC); | |
361 | - /* TODO: Check specific error and bomb out unless EAGAIN? */ | |
362 | - if (err < 0) { | |
363 | - vhost_discard_vq_desc(vq, 1); | |
364 | - break; | |
365 | - } | |
366 | - /* TODO: Should check and handle checksum. */ | |
367 | - if (err > len) { | |
368 | - pr_debug("Discarded truncated rx packet: " | |
369 | - " len %d > %zd\n", err, len); | |
370 | - vhost_discard_vq_desc(vq, 1); | |
371 | - continue; | |
372 | - } | |
373 | - len = err; | |
374 | - err = memcpy_toiovec(vq->hdr, (unsigned char *)&hdr, hdr_size); | |
375 | - if (err) { | |
376 | - vq_err(vq, "Unable to write vnet_hdr at addr %p: %d\n", | |
377 | - vq->iov->iov_base, err); | |
378 | - break; | |
379 | - } | |
380 | - len += hdr_size; | |
381 | - vhost_add_used_and_signal(&net->dev, vq, head, len); | |
382 | - if (unlikely(vq_log)) | |
383 | - vhost_log_write(vq, vq_log, log, len); | |
384 | - total_len += len; | |
385 | - if (unlikely(total_len >= VHOST_NET_WEIGHT)) { | |
386 | - vhost_poll_queue(&vq->poll); | |
387 | - break; | |
388 | - } | |
389 | - } | |
390 | - | |
391 | - mutex_unlock(&vq->mutex); | |
392 | -} | |
393 | - | |
394 | -/* Expects to be always run from workqueue - which acts as | |
395 | - * read-size critical section for our kind of RCU. */ | |
396 | -static void handle_rx_mergeable(struct vhost_net *net) | |
397 | -{ | |
398 | - struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX]; | |
399 | 292 | unsigned uninitialized_var(in), log; |
400 | 293 | struct vhost_log *vq_log; |
401 | 294 | struct msghdr msg = { |
... | ... | @@ -433,7 +326,8 @@ |
433 | 326 | sock_len += sock_hlen; |
434 | 327 | vhost_len = sock_len + vhost_hlen; |
435 | 328 | headcount = get_rx_bufs(vq, vq->heads, vhost_len, |
436 | - &in, vq_log, &log); | |
329 | + &in, vq_log, &log, | |
330 | + likely(mergeable) ? UIO_MAXIOV : 1); | |
437 | 331 | /* On error, stop handling until the next kick. */ |
438 | 332 | if (unlikely(headcount < 0)) |
439 | 333 | break; |
... | ... | @@ -497,14 +391,6 @@ |
497 | 391 | } |
498 | 392 | |
499 | 393 | mutex_unlock(&vq->mutex); |
500 | -} | |
501 | - | |
502 | -static void handle_rx(struct vhost_net *net) | |
503 | -{ | |
504 | - if (vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF)) | |
505 | - handle_rx_mergeable(net); | |
506 | - else | |
507 | - handle_rx_big(net); | |
508 | 394 | } |
509 | 395 | |
510 | 396 | static void handle_tx_kick(struct vhost_work *work) |