Commit 7b3384fc30633738ae4eaf8e1bc6ce70470ced80

Authored by Michael S. Tsirkin
1 parent d5675bd204

vhost: add unlikely annotations to error path

patch 'break out of polling loop on error' caused
a minor performance regression on my machine: recover
that performance by adding a bunch of unlikely annotations
in the error handling.

Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

Showing 2 changed files with 29 additions and 28 deletions Side-by-side Diff

... ... @@ -137,7 +137,7 @@
137 137 &out, &in,
138 138 NULL, NULL);
139 139 /* On error, stop handling until the next kick. */
140   - if (head < 0)
  140 + if (unlikely(head < 0))
141 141 break;
142 142 /* Nothing new? Wait for eventfd to tell us they refilled. */
143 143 if (head == vq->num) {
... ... @@ -234,7 +234,7 @@
234 234 &out, &in,
235 235 vq_log, &log);
236 236 /* On error, stop handling until the next kick. */
237   - if (head < 0)
  237 + if (unlikely(head < 0))
238 238 break;
239 239 /* OK, now we need to know about added descriptors. */
240 240 if (head == vq->num) {
drivers/vhost/vhost.c
... ... @@ -736,12 +736,12 @@
736 736 mem = rcu_dereference(dev->memory);
737 737 while ((u64)len > s) {
738 738 u64 size;
739   - if (ret >= iov_size) {
  739 + if (unlikely(ret >= iov_size)) {
740 740 ret = -ENOBUFS;
741 741 break;
742 742 }
743 743 reg = find_region(mem, addr, len);
744   - if (!reg) {
  744 + if (unlikely(!reg)) {
745 745 ret = -EFAULT;
746 746 break;
747 747 }
748 748  
... ... @@ -780,18 +780,18 @@
780 780 return next;
781 781 }
782 782  
783   -static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
784   - struct iovec iov[], unsigned int iov_size,
785   - unsigned int *out_num, unsigned int *in_num,
786   - struct vhost_log *log, unsigned int *log_num,
787   - struct vring_desc *indirect)
  783 +static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
  784 + struct iovec iov[], unsigned int iov_size,
  785 + unsigned int *out_num, unsigned int *in_num,
  786 + struct vhost_log *log, unsigned int *log_num,
  787 + struct vring_desc *indirect)
788 788 {
789 789 struct vring_desc desc;
790 790 unsigned int i = 0, count, found = 0;
791 791 int ret;
792 792  
793 793 /* Sanity check */
794   - if (indirect->len % sizeof desc) {
  794 + if (unlikely(indirect->len % sizeof desc)) {
795 795 vq_err(vq, "Invalid length in indirect descriptor: "
796 796 "len 0x%llx not multiple of 0x%zx\n",
797 797 (unsigned long long)indirect->len,
... ... @@ -801,7 +801,7 @@
801 801  
802 802 ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
803 803 ARRAY_SIZE(vq->indirect));
804   - if (ret < 0) {
  804 + if (unlikely(ret < 0)) {
805 805 vq_err(vq, "Translation failure %d in indirect.\n", ret);
806 806 return ret;
807 807 }
... ... @@ -813,7 +813,7 @@
813 813 count = indirect->len / sizeof desc;
814 814 /* Buffers are chained via a 16 bit next field, so
815 815 * we can have at most 2^16 of these. */
816   - if (count > USHRT_MAX + 1) {
  816 + if (unlikely(count > USHRT_MAX + 1)) {
817 817 vq_err(vq, "Indirect buffer length too big: %d\n",
818 818 indirect->len);
819 819 return -E2BIG;
820 820  
821 821  
... ... @@ -821,19 +821,19 @@
821 821  
822 822 do {
823 823 unsigned iov_count = *in_num + *out_num;
824   - if (++found > count) {
  824 + if (unlikely(++found > count)) {
825 825 vq_err(vq, "Loop detected: last one at %u "
826 826 "indirect size %u\n",
827 827 i, count);
828 828 return -EINVAL;
829 829 }
830   - if (memcpy_fromiovec((unsigned char *)&desc, vq->indirect,
831   - sizeof desc)) {
  830 + if (unlikely(memcpy_fromiovec((unsigned char *)&desc, vq->indirect,
  831 + sizeof desc))) {
832 832 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
833 833 i, (size_t)indirect->addr + i * sizeof desc);
834 834 return -EINVAL;
835 835 }
836   - if (desc.flags & VRING_DESC_F_INDIRECT) {
  836 + if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
837 837 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
838 838 i, (size_t)indirect->addr + i * sizeof desc);
839 839 return -EINVAL;
... ... @@ -841,7 +841,7 @@
841 841  
842 842 ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
843 843 iov_size - iov_count);
844   - if (ret < 0) {
  844 + if (unlikely(ret < 0)) {
845 845 vq_err(vq, "Translation failure %d indirect idx %d\n",
846 846 ret, i);
847 847 return ret;
... ... @@ -857,7 +857,7 @@
857 857 } else {
858 858 /* If it's an output descriptor, they're all supposed
859 859 * to come before any input descriptors. */
860   - if (*in_num) {
  860 + if (unlikely(*in_num)) {
861 861 vq_err(vq, "Indirect descriptor "
862 862 "has out after in: idx %d\n", i);
863 863 return -EINVAL;
864 864  
... ... @@ -888,13 +888,13 @@
888 888  
889 889 /* Check it isn't doing very strange things with descriptor numbers. */
890 890 last_avail_idx = vq->last_avail_idx;
891   - if (get_user(vq->avail_idx, &vq->avail->idx)) {
  891 + if (unlikely(get_user(vq->avail_idx, &vq->avail->idx))) {
892 892 vq_err(vq, "Failed to access avail idx at %p\n",
893 893 &vq->avail->idx);
894 894 return -EFAULT;
895 895 }
896 896  
897   - if ((u16)(vq->avail_idx - last_avail_idx) > vq->num) {
  897 + if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
898 898 vq_err(vq, "Guest moved used index from %u to %u",
899 899 last_avail_idx, vq->avail_idx);
900 900 return -EFAULT;
... ... @@ -909,7 +909,8 @@
909 909  
910 910 /* Grab the next descriptor number they're advertising, and increment
911 911 * the index we've seen. */
912   - if (get_user(head, &vq->avail->ring[last_avail_idx % vq->num])) {
  912 + if (unlikely(get_user(head,
  913 + &vq->avail->ring[last_avail_idx % vq->num]))) {
913 914 vq_err(vq, "Failed to read head: idx %d address %p\n",
914 915 last_avail_idx,
915 916 &vq->avail->ring[last_avail_idx % vq->num]);
... ... @@ -917,7 +918,7 @@
917 918 }
918 919  
919 920 /* If their number is silly, that's an error. */
920   - if (head >= vq->num) {
  921 + if (unlikely(head >= vq->num)) {
921 922 vq_err(vq, "Guest says index %u > %u is available",
922 923 head, vq->num);
923 924 return -EINVAL;
924 925  
925 926  
... ... @@ -931,19 +932,19 @@
931 932 i = head;
932 933 do {
933 934 unsigned iov_count = *in_num + *out_num;
934   - if (i >= vq->num) {
  935 + if (unlikely(i >= vq->num)) {
935 936 vq_err(vq, "Desc index is %u > %u, head = %u",
936 937 i, vq->num, head);
937 938 return -EINVAL;
938 939 }
939   - if (++found > vq->num) {
  940 + if (unlikely(++found > vq->num)) {
940 941 vq_err(vq, "Loop detected: last one at %u "
941 942 "vq size %u head %u\n",
942 943 i, vq->num, head);
943 944 return -EINVAL;
944 945 }
945 946 ret = copy_from_user(&desc, vq->desc + i, sizeof desc);
946   - if (ret) {
  947 + if (unlikely(ret)) {
947 948 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
948 949 i, vq->desc + i);
949 950 return -EFAULT;
... ... @@ -952,7 +953,7 @@
952 953 ret = get_indirect(dev, vq, iov, iov_size,
953 954 out_num, in_num,
954 955 log, log_num, &desc);
955   - if (ret < 0) {
  956 + if (unlikely(ret < 0)) {
956 957 vq_err(vq, "Failure detected "
957 958 "in indirect descriptor at idx %d\n", i);
958 959 return ret;
... ... @@ -962,7 +963,7 @@
962 963  
963 964 ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
964 965 iov_size - iov_count);
965   - if (ret < 0) {
  966 + if (unlikely(ret < 0)) {
966 967 vq_err(vq, "Translation failure %d descriptor idx %d\n",
967 968 ret, i);
968 969 return ret;
... ... @@ -979,7 +980,7 @@
979 980 } else {
980 981 /* If it's an output descriptor, they're all supposed
981 982 * to come before any input descriptors. */
982   - if (*in_num) {
  983 + if (unlikely(*in_num)) {
983 984 vq_err(vq, "Descriptor has out after in: "
984 985 "idx %d\n", i);
985 986 return -EINVAL;