Commit d47effe1be0c4fc983306a9c704632e3a087eed8

Authored by Krishna Kumar
Committed by Michael S. Tsirkin
1 parent 1fc050a134

vhost: Cleanup vhost.c and net.c

Minor cleanup of vhost.c and net.c to match coding style.

Signed-off-by: Krishna Kumar <krkumar2@in.ibm.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

Showing 2 changed files with 49 additions and 23 deletions Side-by-side Diff

... ... @@ -60,6 +60,7 @@
60 60 {
61 61 int seg = 0;
62 62 size_t size;
  63 +
63 64 while (len && seg < iov_count) {
64 65 size = min(from->iov_len, len);
65 66 to->iov_base = from->iov_base;
... ... @@ -79,6 +80,7 @@
79 80 {
80 81 int seg = 0;
81 82 size_t size;
  83 +
82 84 while (len && seg < iovcount) {
83 85 size = min(from->iov_len, len);
84 86 to->iov_base = from->iov_base;
85 87  
86 88  
... ... @@ -296,17 +298,16 @@
296 298 .msg_iov = vq->iov,
297 299 .msg_flags = MSG_DONTWAIT,
298 300 };
299   -
300 301 struct virtio_net_hdr hdr = {
301 302 .flags = 0,
302 303 .gso_type = VIRTIO_NET_HDR_GSO_NONE
303 304 };
304   -
305 305 size_t len, total_len = 0;
306 306 int err;
307 307 size_t hdr_size;
308 308 /* TODO: check that we are running from vhost_worker? */
309 309 struct socket *sock = rcu_dereference_check(vq->private_data, 1);
  310 +
310 311 if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
311 312 return;
312 313  
313 314  
314 315  
... ... @@ -405,18 +406,17 @@
405 406 .msg_iov = vq->iov,
406 407 .msg_flags = MSG_DONTWAIT,
407 408 };
408   -
409 409 struct virtio_net_hdr_mrg_rxbuf hdr = {
410 410 .hdr.flags = 0,
411 411 .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
412 412 };
413   -
414 413 size_t total_len = 0;
415 414 int err, headcount;
416 415 size_t vhost_hlen, sock_hlen;
417 416 size_t vhost_len, sock_len;
418 417 /* TODO: check that we are running from vhost_worker? */
419 418 struct socket *sock = rcu_dereference_check(vq->private_data, 1);
  419 +
420 420 if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
421 421 return;
422 422  
... ... @@ -654,6 +654,7 @@
654 654 } uaddr;
655 655 int uaddr_len = sizeof uaddr, r;
656 656 struct socket *sock = sockfd_lookup(fd, &r);
  657 +
657 658 if (!sock)
658 659 return ERR_PTR(-ENOTSOCK);
659 660  
... ... @@ -682,6 +683,7 @@
682 683 {
683 684 struct file *file = fget(fd);
684 685 struct socket *sock;
  686 +
685 687 if (!file)
686 688 return ERR_PTR(-EBADF);
687 689 sock = tun_get_socket(file);
... ... @@ -696,6 +698,7 @@
696 698 static struct socket *get_socket(int fd)
697 699 {
698 700 struct socket *sock;
  701 +
699 702 /* special case to disable backend */
700 703 if (fd == -1)
701 704 return NULL;
... ... @@ -741,9 +744,9 @@
741 744 oldsock = rcu_dereference_protected(vq->private_data,
742 745 lockdep_is_held(&vq->mutex));
743 746 if (sock != oldsock) {
744   - vhost_net_disable_vq(n, vq);
745   - rcu_assign_pointer(vq->private_data, sock);
746   - vhost_net_enable_vq(n, vq);
  747 + vhost_net_disable_vq(n, vq);
  748 + rcu_assign_pointer(vq->private_data, sock);
  749 + vhost_net_enable_vq(n, vq);
747 750 }
748 751  
749 752 mutex_unlock(&vq->mutex);
... ... @@ -768,6 +771,7 @@
768 771 struct socket *tx_sock = NULL;
769 772 struct socket *rx_sock = NULL;
770 773 long err;
  774 +
771 775 mutex_lock(&n->dev.mutex);
772 776 err = vhost_dev_check_owner(&n->dev);
773 777 if (err)
... ... @@ -829,6 +833,7 @@
829 833 struct vhost_vring_file backend;
830 834 u64 features;
831 835 int r;
  836 +
832 837 switch (ioctl) {
833 838 case VHOST_NET_SET_BACKEND:
834 839 if (copy_from_user(&backend, argp, sizeof backend))
drivers/vhost/vhost.c
... ... @@ -41,8 +41,8 @@
41 41 poll_table *pt)
42 42 {
43 43 struct vhost_poll *poll;
44   - poll = container_of(pt, struct vhost_poll, table);
45 44  
  45 + poll = container_of(pt, struct vhost_poll, table);
46 46 poll->wqh = wqh;
47 47 add_wait_queue(wqh, &poll->wait);
48 48 }
... ... @@ -85,6 +85,7 @@
85 85 void vhost_poll_start(struct vhost_poll *poll, struct file *file)
86 86 {
87 87 unsigned long mask;
  88 +
88 89 mask = file->f_op->poll(file, &poll->table);
89 90 if (mask)
90 91 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
... ... @@ -101,6 +102,7 @@
101 102 unsigned seq)
102 103 {
103 104 int left;
  105 +
104 106 spin_lock_irq(&dev->work_lock);
105 107 left = seq - work->done_seq;
106 108 spin_unlock_irq(&dev->work_lock);
... ... @@ -222,6 +224,7 @@
222 224 static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
223 225 {
224 226 int i;
  227 +
225 228 for (i = 0; i < dev->nvqs; ++i) {
226 229 dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect *
227 230 UIO_MAXIOV, GFP_KERNEL);
... ... @@ -235,6 +238,7 @@
235 238 goto err_nomem;
236 239 }
237 240 return 0;
  241 +
238 242 err_nomem:
239 243 for (; i >= 0; --i) {
240 244 kfree(dev->vqs[i].indirect);
... ... @@ -247,6 +251,7 @@
247 251 static void vhost_dev_free_iovecs(struct vhost_dev *dev)
248 252 {
249 253 int i;
  254 +
250 255 for (i = 0; i < dev->nvqs; ++i) {
251 256 kfree(dev->vqs[i].indirect);
252 257 dev->vqs[i].indirect = NULL;
253 258  
254 259  
... ... @@ -296,26 +301,28 @@
296 301 }
297 302  
298 303 struct vhost_attach_cgroups_struct {
299   - struct vhost_work work;
300   - struct task_struct *owner;
301   - int ret;
  304 + struct vhost_work work;
  305 + struct task_struct *owner;
  306 + int ret;
302 307 };
303 308  
304 309 static void vhost_attach_cgroups_work(struct vhost_work *work)
305 310 {
306   - struct vhost_attach_cgroups_struct *s;
307   - s = container_of(work, struct vhost_attach_cgroups_struct, work);
308   - s->ret = cgroup_attach_task_all(s->owner, current);
  311 + struct vhost_attach_cgroups_struct *s;
  312 +
  313 + s = container_of(work, struct vhost_attach_cgroups_struct, work);
  314 + s->ret = cgroup_attach_task_all(s->owner, current);
309 315 }
310 316  
311 317 static int vhost_attach_cgroups(struct vhost_dev *dev)
312 318 {
313   - struct vhost_attach_cgroups_struct attach;
314   - attach.owner = current;
315   - vhost_work_init(&attach.work, vhost_attach_cgroups_work);
316   - vhost_work_queue(dev, &attach.work);
317   - vhost_work_flush(dev, &attach.work);
318   - return attach.ret;
  319 + struct vhost_attach_cgroups_struct attach;
  320 +
  321 + attach.owner = current;
  322 + vhost_work_init(&attach.work, vhost_attach_cgroups_work);
  323 + vhost_work_queue(dev, &attach.work);
  324 + vhost_work_flush(dev, &attach.work);
  325 + return attach.ret;
319 326 }
320 327  
321 328 /* Caller should have device mutex */
322 329  
... ... @@ -323,11 +330,13 @@
323 330 {
324 331 struct task_struct *worker;
325 332 int err;
  333 +
326 334 /* Is there an owner already? */
327 335 if (dev->mm) {
328 336 err = -EBUSY;
329 337 goto err_mm;
330 338 }
  339 +
331 340 /* No owner, become one */
332 341 dev->mm = get_task_mm(current);
333 342 worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
... ... @@ -380,6 +389,7 @@
380 389 void vhost_dev_cleanup(struct vhost_dev *dev)
381 390 {
382 391 int i;
  392 +
383 393 for (i = 0; i < dev->nvqs; ++i) {
384 394 if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
385 395 vhost_poll_stop(&dev->vqs[i].poll);
... ... @@ -421,6 +431,7 @@
421 431 static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
422 432 {
423 433 u64 a = addr / VHOST_PAGE_SIZE / 8;
  434 +
424 435 /* Make sure 64 bit math will not overflow. */
425 436 if (a > ULONG_MAX - (unsigned long)log_base ||
426 437 a + (unsigned long)log_base > ULONG_MAX)
... ... @@ -461,6 +472,7 @@
461 472 int log_all)
462 473 {
463 474 int i;
  475 +
464 476 for (i = 0; i < d->nvqs; ++i) {
465 477 int ok;
466 478 mutex_lock(&d->vqs[i].mutex);
... ... @@ -527,6 +539,7 @@
527 539 {
528 540 struct vhost_memory mem, *newmem, *oldmem;
529 541 unsigned long size = offsetof(struct vhost_memory, regions);
  542 +
530 543 if (copy_from_user(&mem, m, size))
531 544 return -EFAULT;
532 545 if (mem.padding)
... ... @@ -544,7 +557,8 @@
544 557 return -EFAULT;
545 558 }
546 559  
547   - if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL))) {
  560 + if (!memory_access_ok(d, newmem,
  561 + vhost_has_feature(d, VHOST_F_LOG_ALL))) {
548 562 kfree(newmem);
549 563 return -EFAULT;
550 564 }
... ... @@ -560,6 +574,7 @@
560 574 struct vring_used __user *used)
561 575 {
562 576 int r = put_user(vq->used_flags, &used->flags);
  577 +
563 578 if (r)
564 579 return r;
565 580 return get_user(vq->last_used_idx, &used->idx);
... ... @@ -849,6 +864,7 @@
849 864 {
850 865 struct vhost_memory_region *reg;
851 866 int i;
  867 +
852 868 /* linear search is not brilliant, but we really have on the order of 6
853 869 * regions in practice */
854 870 for (i = 0; i < mem->nregions; ++i) {
... ... @@ -871,6 +887,7 @@
871 887 void *base;
872 888 int bit = nr + (log % PAGE_SIZE) * 8;
873 889 int r;
  890 +
874 891 r = get_user_pages_fast(log, 1, 1, &page);
875 892 if (r < 0)
876 893 return r;
... ... @@ -888,6 +905,7 @@
888 905 {
889 906 u64 write_page = write_address / VHOST_PAGE_SIZE;
890 907 int r;
  908 +
891 909 if (!write_length)
892 910 return 0;
893 911 write_length += write_address % VHOST_PAGE_SIZE;
... ... @@ -1037,8 +1055,8 @@
1037 1055 i, count);
1038 1056 return -EINVAL;
1039 1057 }
1040   - if (unlikely(memcpy_fromiovec((unsigned char *)&desc, vq->indirect,
1041   - sizeof desc))) {
  1058 + if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
  1059 + vq->indirect, sizeof desc))) {
1042 1060 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
1043 1061 i, (size_t)indirect->addr + i * sizeof desc);
1044 1062 return -EINVAL;
... ... @@ -1317,6 +1335,7 @@
1317 1335 void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1318 1336 {
1319 1337 __u16 flags;
  1338 +
1320 1339 /* Flush out used index updates. This is paired
1321 1340 * with the barrier that the Guest executes when enabling
1322 1341 * interrupts. */
... ... @@ -1361,6 +1380,7 @@
1361 1380 {
1362 1381 u16 avail_idx;
1363 1382 int r;
  1383 +
1364 1384 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
1365 1385 return false;
1366 1386 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
... ... @@ -1387,6 +1407,7 @@
1387 1407 void vhost_disable_notify(struct vhost_virtqueue *vq)
1388 1408 {
1389 1409 int r;
  1410 +
1390 1411 if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
1391 1412 return;
1392 1413 vq->used_flags |= VRING_USED_F_NO_NOTIFY;