Commit 2122ff5eab8faec853e43f6de886e8dc8f31e317

Authored by Avi Kivity
1 parent 1683b2416e

KVM: move vcpu locking to dispatcher for generic vcpu ioctls

All vcpu ioctls need to be locked, so instead of locking each one specifically
we lock at the generic dispatcher.

This patch only updates generic ioctls and leaves arch specific ioctls alone.

Signed-off-by: Avi Kivity <avi@redhat.com>

Showing 7 changed files with 17 additions and 95 deletions Side-by-side Diff

arch/ia64/kvm/kvm-ia64.c
... ... @@ -725,8 +725,6 @@
725 725 int r;
726 726 sigset_t sigsaved;
727 727  
728   - vcpu_load(vcpu);
729   -
730 728 if (vcpu->sigset_active)
731 729 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
732 730  
... ... @@ -748,7 +746,6 @@
748 746 if (vcpu->sigset_active)
749 747 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
750 748  
751   - vcpu_put(vcpu);
752 749 return r;
753 750 }
754 751  
... ... @@ -883,8 +880,6 @@
883 880 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
884 881 int i;
885 882  
886   - vcpu_load(vcpu);
887   -
888 883 for (i = 0; i < 16; i++) {
889 884 vpd->vgr[i] = regs->vpd.vgr[i];
890 885 vpd->vbgr[i] = regs->vpd.vbgr[i];
... ... @@ -931,8 +926,6 @@
931 926 vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu);
932 927 set_bit(KVM_REQ_RESUME, &vcpu->requests);
933 928  
934   - vcpu_put(vcpu);
935   -
936 929 return 0;
937 930 }
938 931  
939 932  
... ... @@ -1967,9 +1960,7 @@
1967 1960 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1968 1961 struct kvm_mp_state *mp_state)
1969 1962 {
1970   - vcpu_load(vcpu);
1971 1963 mp_state->mp_state = vcpu->arch.mp_state;
1972   - vcpu_put(vcpu);
1973 1964 return 0;
1974 1965 }
1975 1966  
1976 1967  
... ... @@ -2000,11 +1991,9 @@
2000 1991 {
2001 1992 int r = 0;
2002 1993  
2003   - vcpu_load(vcpu);
2004 1994 vcpu->arch.mp_state = mp_state->mp_state;
2005 1995 if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)
2006 1996 r = vcpu_reset(vcpu);
2007   - vcpu_put(vcpu);
2008 1997 return r;
2009 1998 }
arch/powerpc/kvm/book3s.c
... ... @@ -1047,8 +1047,6 @@
1047 1047 {
1048 1048 int i;
1049 1049  
1050   - vcpu_load(vcpu);
1051   -
1052 1050 regs->pc = kvmppc_get_pc(vcpu);
1053 1051 regs->cr = kvmppc_get_cr(vcpu);
1054 1052 regs->ctr = kvmppc_get_ctr(vcpu);
... ... @@ -1069,8 +1067,6 @@
1069 1067 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1070 1068 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
1071 1069  
1072   - vcpu_put(vcpu);
1073   -
1074 1070 return 0;
1075 1071 }
1076 1072  
... ... @@ -1078,8 +1074,6 @@
1078 1074 {
1079 1075 int i;
1080 1076  
1081   - vcpu_load(vcpu);
1082   -
1083 1077 kvmppc_set_pc(vcpu, regs->pc);
1084 1078 kvmppc_set_cr(vcpu, regs->cr);
1085 1079 kvmppc_set_ctr(vcpu, regs->ctr);
... ... @@ -1099,8 +1093,6 @@
1099 1093 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1100 1094 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
1101 1095  
1102   - vcpu_put(vcpu);
1103   -
1104 1096 return 0;
1105 1097 }
1106 1098  
... ... @@ -1110,8 +1102,6 @@
1110 1102 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1111 1103 int i;
1112 1104  
1113   - vcpu_load(vcpu);
1114   -
1115 1105 sregs->pvr = vcpu->arch.pvr;
1116 1106  
1117 1107 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
... ... @@ -1131,8 +1121,6 @@
1131 1121 }
1132 1122 }
1133 1123  
1134   - vcpu_put(vcpu);
1135   -
1136 1124 return 0;
1137 1125 }
1138 1126  
... ... @@ -1142,8 +1130,6 @@
1142 1130 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1143 1131 int i;
1144 1132  
1145   - vcpu_load(vcpu);
1146   -
1147 1133 kvmppc_set_pvr(vcpu, sregs->pvr);
1148 1134  
1149 1135 vcpu3s->sdr1 = sregs->u.s.sdr1;
... ... @@ -1170,8 +1156,6 @@
1170 1156  
1171 1157 /* Flush the MMU after messing with the segments */
1172 1158 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1173   -
1174   - vcpu_put(vcpu);
1175 1159  
1176 1160 return 0;
1177 1161 }
arch/powerpc/kvm/booke.c
... ... @@ -485,8 +485,6 @@
485 485 {
486 486 int i;
487 487  
488   - vcpu_load(vcpu);
489   -
490 488 regs->pc = vcpu->arch.pc;
491 489 regs->cr = kvmppc_get_cr(vcpu);
492 490 regs->ctr = vcpu->arch.ctr;
... ... @@ -507,8 +505,6 @@
507 505 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
508 506 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
509 507  
510   - vcpu_put(vcpu);
511   -
512 508 return 0;
513 509 }
514 510  
... ... @@ -516,8 +512,6 @@
516 512 {
517 513 int i;
518 514  
519   - vcpu_load(vcpu);
520   -
521 515 vcpu->arch.pc = regs->pc;
522 516 kvmppc_set_cr(vcpu, regs->cr);
523 517 vcpu->arch.ctr = regs->ctr;
... ... @@ -537,8 +531,6 @@
537 531 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
538 532 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
539 533  
540   - vcpu_put(vcpu);
541   -
542 534 return 0;
543 535 }
544 536  
545 537  
... ... @@ -569,9 +561,7 @@
569 561 {
570 562 int r;
571 563  
572   - vcpu_load(vcpu);
573 564 r = kvmppc_core_vcpu_translate(vcpu, tr);
574   - vcpu_put(vcpu);
575 565 return r;
576 566 }
577 567  
arch/powerpc/kvm/powerpc.c
... ... @@ -423,8 +423,6 @@
423 423 int r;
424 424 sigset_t sigsaved;
425 425  
426   - vcpu_load(vcpu);
427   -
428 426 if (vcpu->sigset_active)
429 427 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
430 428  
... ... @@ -455,8 +453,6 @@
455 453  
456 454 if (vcpu->sigset_active)
457 455 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
458   -
459   - vcpu_put(vcpu);
460 456  
461 457 return r;
462 458 }
arch/s390/kvm/kvm-s390.c
... ... @@ -371,55 +371,43 @@
371 371  
372 372 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
373 373 {
374   - vcpu_load(vcpu);
375 374 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
376   - vcpu_put(vcpu);
377 375 return 0;
378 376 }
379 377  
380 378 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
381 379 {
382   - vcpu_load(vcpu);
383 380 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
384   - vcpu_put(vcpu);
385 381 return 0;
386 382 }
387 383  
388 384 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
389 385 struct kvm_sregs *sregs)
390 386 {
391   - vcpu_load(vcpu);
392 387 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
393 388 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
394   - vcpu_put(vcpu);
395 389 return 0;
396 390 }
397 391  
398 392 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
399 393 struct kvm_sregs *sregs)
400 394 {
401   - vcpu_load(vcpu);
402 395 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
403 396 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
404   - vcpu_put(vcpu);
405 397 return 0;
406 398 }
407 399  
408 400 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
409 401 {
410   - vcpu_load(vcpu);
411 402 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
412 403 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
413   - vcpu_put(vcpu);
414 404 return 0;
415 405 }
416 406  
417 407 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
418 408 {
419   - vcpu_load(vcpu);
420 409 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
421 410 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
422   - vcpu_put(vcpu);
423 411 return 0;
424 412 }
425 413  
... ... @@ -498,8 +486,6 @@
498 486 int rc;
499 487 sigset_t sigsaved;
500 488  
501   - vcpu_load(vcpu);
502   -
503 489 rerun_vcpu:
504 490 if (vcpu->requests)
505 491 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
... ... @@ -567,8 +553,6 @@
567 553  
568 554 if (vcpu->sigset_active)
569 555 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
570   -
571   - vcpu_put(vcpu);
572 556  
573 557 vcpu->stat.exit_userspace++;
574 558 return rc;
... ... @@ -4773,8 +4773,6 @@
4773 4773 int r;
4774 4774 sigset_t sigsaved;
4775 4775  
4776   - vcpu_load(vcpu);
4777   -
4778 4776 if (vcpu->sigset_active)
4779 4777 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
4780 4778  
4781 4779  
... ... @@ -4815,14 +4813,11 @@
4815 4813 if (vcpu->sigset_active)
4816 4814 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
4817 4815  
4818   - vcpu_put(vcpu);
4819 4816 return r;
4820 4817 }
4821 4818  
4822 4819 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4823 4820 {
4824   - vcpu_load(vcpu);
4825   -
4826 4821 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4827 4822 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4828 4823 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
4829 4824  
... ... @@ -4845,15 +4840,11 @@
4845 4840 regs->rip = kvm_rip_read(vcpu);
4846 4841 regs->rflags = kvm_get_rflags(vcpu);
4847 4842  
4848   - vcpu_put(vcpu);
4849   -
4850 4843 return 0;
4851 4844 }
4852 4845  
4853 4846 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4854 4847 {
4855   - vcpu_load(vcpu);
4856   -
4857 4848 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
4858 4849 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
4859 4850 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
... ... @@ -4878,8 +4869,6 @@
4878 4869  
4879 4870 vcpu->arch.exception.pending = false;
4880 4871  
4881   - vcpu_put(vcpu);
4882   -
4883 4872 return 0;
4884 4873 }
4885 4874  
... ... @@ -4898,8 +4887,6 @@
4898 4887 {
4899 4888 struct desc_ptr dt;
4900 4889  
4901   - vcpu_load(vcpu);
4902   -
4903 4890 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
4904 4891 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
4905 4892 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
4906 4893  
4907 4894  
4908 4895  
4909 4896  
... ... @@ -4931,26 +4918,20 @@
4931 4918 set_bit(vcpu->arch.interrupt.nr,
4932 4919 (unsigned long *)sregs->interrupt_bitmap);
4933 4920  
4934   - vcpu_put(vcpu);
4935   -
4936 4921 return 0;
4937 4922 }
4938 4923  
4939 4924 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
4940 4925 struct kvm_mp_state *mp_state)
4941 4926 {
4942   - vcpu_load(vcpu);
4943 4927 mp_state->mp_state = vcpu->arch.mp_state;
4944   - vcpu_put(vcpu);
4945 4928 return 0;
4946 4929 }
4947 4930  
4948 4931 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
4949 4932 struct kvm_mp_state *mp_state)
4950 4933 {
4951   - vcpu_load(vcpu);
4952 4934 vcpu->arch.mp_state = mp_state->mp_state;
4953   - vcpu_put(vcpu);
4954 4935 return 0;
4955 4936 }
4956 4937  
... ... @@ -4996,8 +4977,6 @@
4996 4977 int pending_vec, max_bits;
4997 4978 struct desc_ptr dt;
4998 4979  
4999   - vcpu_load(vcpu);
5000   -
5001 4980 dt.size = sregs->idt.limit;
5002 4981 dt.address = sregs->idt.base;
5003 4982 kvm_x86_ops->set_idt(vcpu, &dt);
... ... @@ -5057,8 +5036,6 @@
5057 5036 !is_protmode(vcpu))
5058 5037 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5059 5038  
5060   - vcpu_put(vcpu);
5061   -
5062 5039 return 0;
5063 5040 }
5064 5041  
5065 5042  
... ... @@ -5068,12 +5045,10 @@
5068 5045 unsigned long rflags;
5069 5046 int i, r;
5070 5047  
5071   - vcpu_load(vcpu);
5072   -
5073 5048 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
5074 5049 r = -EBUSY;
5075 5050 if (vcpu->arch.exception.pending)
5076   - goto unlock_out;
  5051 + goto out;
5077 5052 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
5078 5053 kvm_queue_exception(vcpu, DB_VECTOR);
5079 5054 else
... ... @@ -5115,8 +5090,7 @@
5115 5090  
5116 5091 r = 0;
5117 5092  
5118   -unlock_out:
5119   - vcpu_put(vcpu);
  5093 +out:
5120 5094  
5121 5095 return r;
5122 5096 }
... ... @@ -5152,7 +5126,6 @@
5152 5126 gpa_t gpa;
5153 5127 int idx;
5154 5128  
5155   - vcpu_load(vcpu);
5156 5129 idx = srcu_read_lock(&vcpu->kvm->srcu);
5157 5130 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
5158 5131 srcu_read_unlock(&vcpu->kvm->srcu, idx);
... ... @@ -5160,7 +5133,6 @@
5160 5133 tr->valid = gpa != UNMAPPED_GVA;
5161 5134 tr->writeable = 1;
5162 5135 tr->usermode = 0;
5163   - vcpu_put(vcpu);
5164 5136  
5165 5137 return 0;
5166 5138 }
... ... @@ -5169,8 +5141,6 @@
5169 5141 {
5170 5142 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
5171 5143  
5172   - vcpu_load(vcpu);
5173   -
5174 5144 memcpy(fpu->fpr, fxsave->st_space, 128);
5175 5145 fpu->fcw = fxsave->cwd;
5176 5146 fpu->fsw = fxsave->swd;
... ... @@ -5180,8 +5150,6 @@
5180 5150 fpu->last_dp = fxsave->rdp;
5181 5151 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
5182 5152  
5183   - vcpu_put(vcpu);
5184   -
5185 5153 return 0;
5186 5154 }
5187 5155  
... ... @@ -5189,8 +5157,6 @@
5189 5157 {
5190 5158 struct fxsave *fxsave = (struct fxsave *)&vcpu->arch.guest_fx_image;
5191 5159  
5192   - vcpu_load(vcpu);
5193   -
5194 5160 memcpy(fxsave->st_space, fpu->fpr, 128);
5195 5161 fxsave->cwd = fpu->fcw;
5196 5162 fxsave->swd = fpu->fsw;
... ... @@ -5199,8 +5165,6 @@
5199 5165 fxsave->rip = fpu->last_ip;
5200 5166 fxsave->rdp = fpu->last_dp;
5201 5167 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
5202   -
5203   - vcpu_put(vcpu);
5204 5168  
5205 5169 return 0;
5206 5170 }
... ... @@ -1392,6 +1392,18 @@
1392 1392  
1393 1393 if (vcpu->kvm->mm != current->mm)
1394 1394 return -EIO;
  1395 +
  1396 +#if defined(CONFIG_S390) || defined(CONFIG_PPC)
  1397 + /*
  1398 + * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
  1399 + * so vcpu_load() would break it.
  1400 + */
  1401 + if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT)
  1402 + return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
  1403 +#endif
  1404 +
  1405 +
  1406 + vcpu_load(vcpu);
1395 1407 switch (ioctl) {
1396 1408 case KVM_RUN:
1397 1409 r = -EINVAL;
1398 1410  
1399 1411  
... ... @@ -1566,9 +1578,12 @@
1566 1578 break;
1567 1579 }
1568 1580 default:
  1581 + vcpu_put(vcpu);
1569 1582 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
  1583 + vcpu_load(vcpu);
1570 1584 }
1571 1585 out:
  1586 + vcpu_put(vcpu);
1572 1587 kfree(fpu);
1573 1588 kfree(kvm_sregs);
1574 1589 return r;