Commit a3fac08085136fb8b56bbd290500ed03c94ee5d1

Authored by Linus Torvalds

Merge branch 'kvm-updates/3.4' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull a few KVM fixes from Avi Kivity:
 "A bunch of powerpc KVM fixes, a guest and a host RCU fix (unrelated),
  and a small build fix."

* 'kvm-updates/3.4' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: Resolve RCU vs. async page fault problem
  KVM: VMX: vmx_set_cr0 expects kvm->srcu locked
  KVM: PMU: Fix integer constant is too large warning in kvm_pmu_set_msr()
  KVM: PPC: Book3S: PR: Fix preemption
  KVM: PPC: Save/Restore CR over vcpu_run
  KVM: PPC: Book3S HV: Save and restore CR in __kvmppc_vcore_entry
  KVM: PPC: Book3S HV: Fix kvm_alloc_linear in case where no linears exist
  KVM: PPC: Book3S: Compile fix for ppc32 in HIOR access code

Showing 8 changed files Side-by-side Diff

arch/powerpc/kvm/book3s_hv_builtin.c
... ... @@ -173,9 +173,9 @@
173 173  
174 174 static struct kvmppc_linear_info *kvm_alloc_linear(int type)
175 175 {
176   - struct kvmppc_linear_info *ri;
  176 + struct kvmppc_linear_info *ri, *ret;
177 177  
178   - ri = NULL;
  178 + ret = NULL;
179 179 spin_lock(&linear_lock);
180 180 list_for_each_entry(ri, &free_linears, list) {
181 181 if (ri->type != type)
182 182  
... ... @@ -183,11 +183,12 @@
183 183  
184 184 list_del(&ri->list);
185 185 atomic_inc(&ri->use_count);
  186 + memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT);
  187 + ret = ri;
186 188 break;
187 189 }
188 190 spin_unlock(&linear_lock);
189   - memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT);
190   - return ri;
  191 + return ret;
191 192 }
192 193  
193 194 static void kvm_release_linear(struct kvmppc_linear_info *ri)
arch/powerpc/kvm/book3s_hv_interrupts.S
... ... @@ -46,8 +46,10 @@
46 46 /* Save host state to the stack */
47 47 stdu r1, -SWITCH_FRAME_SIZE(r1)
48 48  
49   - /* Save non-volatile registers (r14 - r31) */
  49 + /* Save non-volatile registers (r14 - r31) and CR */
50 50 SAVE_NVGPRS(r1)
  51 + mfcr r3
  52 + std r3, _CCR(r1)
51 53  
52 54 /* Save host DSCR */
53 55 BEGIN_FTR_SECTION
54 56  
... ... @@ -157,8 +159,10 @@
157 159 * R13 = PACA
158 160 */
159 161  
160   - /* Restore non-volatile host registers (r14 - r31) */
  162 + /* Restore non-volatile host registers (r14 - r31) and CR */
161 163 REST_NVGPRS(r1)
  164 + ld r4, _CCR(r1)
  165 + mtcr r4
162 166  
163 167 addi r1, r1, SWITCH_FRAME_SIZE
164 168 ld r0, PPC_LR_STKOFF(r1)
arch/powerpc/kvm/book3s_interrupts.S
... ... @@ -84,6 +84,10 @@
84 84 /* Save non-volatile registers (r14 - r31) */
85 85 SAVE_NVGPRS(r1)
86 86  
  87 + /* Save CR */
  88 + mfcr r14
  89 + stw r14, _CCR(r1)
  90 +
87 91 /* Save LR */
88 92 PPC_STL r0, _LINK(r1)
89 93  
... ... @@ -164,6 +168,9 @@
164 168  
165 169 PPC_LL r4, _LINK(r1)
166 170 mtlr r4
  171 +
  172 + lwz r14, _CCR(r1)
  173 + mtcr r14
167 174  
168 175 /* Restore non-volatile host registers (r14 - r31) */
169 176 REST_NVGPRS(r1)
arch/powerpc/kvm/book3s_pr.c
... ... @@ -777,6 +777,7 @@
777 777 }
778 778 }
779 779  
  780 + preempt_disable();
780 781 if (!(r & RESUME_HOST)) {
781 782 /* To avoid clobbering exit_reason, only check for signals if
782 783 * we aren't already exiting to userspace for some other
... ... @@ -798,8 +799,6 @@
798 799 run->exit_reason = KVM_EXIT_INTR;
799 800 r = -EINTR;
800 801 } else {
801   - preempt_disable();
802   -
803 802 /* In case an interrupt came in that was triggered
804 803 * from userspace (like DEC), we need to check what
805 804 * to inject now! */
... ... @@ -881,7 +880,8 @@
881 880  
882 881 switch (reg->id) {
883 882 case KVM_REG_PPC_HIOR:
884   - r = put_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr);
  883 + r = copy_to_user((u64 __user *)(long)reg->addr,
  884 + &to_book3s(vcpu)->hior, sizeof(u64));
885 885 break;
886 886 default:
887 887 break;
... ... @@ -896,7 +896,8 @@
896 896  
897 897 switch (reg->id) {
898 898 case KVM_REG_PPC_HIOR:
899   - r = get_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr);
  899 + r = copy_from_user(&to_book3s(vcpu)->hior,
  900 + (u64 __user *)(long)reg->addr, sizeof(u64));
900 901 if (!r)
901 902 to_book3s(vcpu)->hior_explicit = true;
902 903 break;
arch/powerpc/kvm/booke_interrupts.S
... ... @@ -34,7 +34,8 @@
34 34 /* r2 is special: it holds 'current', and it made nonvolatile in the
35 35 * kernel with the -ffixed-r2 gcc option. */
36 36 #define HOST_R2 12
37   -#define HOST_NV_GPRS 16
  37 +#define HOST_CR 16
  38 +#define HOST_NV_GPRS 20
38 39 #define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4))
39 40 #define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4)
40 41 #define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
41 42  
... ... @@ -296,8 +297,10 @@
296 297  
297 298 /* Return to kvm_vcpu_run(). */
298 299 lwz r4, HOST_STACK_LR(r1)
  300 + lwz r5, HOST_CR(r1)
299 301 addi r1, r1, HOST_STACK_SIZE
300 302 mtlr r4
  303 + mtcr r5
301 304 /* r3 still contains the return code from kvmppc_handle_exit(). */
302 305 blr
303 306  
... ... @@ -314,6 +317,8 @@
314 317 stw r3, HOST_RUN(r1)
315 318 mflr r3
316 319 stw r3, HOST_STACK_LR(r1)
  320 + mfcr r5
  321 + stw r5, HOST_CR(r1)
317 322  
318 323 /* Save host non-volatile register state to stack. */
319 324 stw r14, HOST_NV_GPR(r14)(r1)
arch/x86/kernel/kvm.c
... ... @@ -38,6 +38,7 @@
38 38 #include <asm/traps.h>
39 39 #include <asm/desc.h>
40 40 #include <asm/tlbflush.h>
  41 +#include <asm/idle.h>
41 42  
42 43 static int kvmapf = 1;
43 44  
44 45  
... ... @@ -253,7 +254,10 @@
253 254 kvm_async_pf_task_wait((u32)read_cr2());
254 255 break;
255 256 case KVM_PV_REASON_PAGE_READY:
  257 + rcu_irq_enter();
  258 + exit_idle();
256 259 kvm_async_pf_task_wake((u32)read_cr2());
  260 + rcu_irq_exit();
257 261 break;
258 262 }
259 263 }
... ... @@ -369,7 +369,7 @@
369 369 case MSR_CORE_PERF_FIXED_CTR_CTRL:
370 370 if (pmu->fixed_ctr_ctrl == data)
371 371 return 0;
372   - if (!(data & 0xfffffffffffff444)) {
  372 + if (!(data & 0xfffffffffffff444ull)) {
373 373 reprogram_fixed_counters(pmu, data);
374 374 return 0;
375 375 }
... ... @@ -3906,7 +3906,9 @@
3906 3906 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
3907 3907  
3908 3908 vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
  3909 + vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3909 3910 vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
  3911 + srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
3910 3912 vmx_set_cr4(&vmx->vcpu, 0);
3911 3913 vmx_set_efer(&vmx->vcpu, 0);
3912 3914 vmx_fpu_activate(&vmx->vcpu);