Commit 8b3c3104c3f4f706e99365c3e0d2aa61b95f969f
Committed by
Paolo Bonzini
1 parent
854e8bb1aa
Exists in
ti-lsk-linux-4.1.y
and in
10 other branches
KVM: x86: Prevent host from panicking on shared MSR writes.
The previous patch blocked invalid writes directly when the MSR is written. As a precaution, prevent future similar mistakes by gracefulling handle GPs caused by writes to shared MSRs. Cc: stable@vger.kernel.org Signed-off-by: Andrew Honig <ahonig@google.com> [Remove parts obsoleted by Nadav's patch. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Showing 3 changed files with 14 additions and 6 deletions Side-by-side Diff
arch/x86/include/asm/kvm_host.h
... | ... | @@ -1064,7 +1064,7 @@ |
1064 | 1064 | unsigned long address); |
1065 | 1065 | |
1066 | 1066 | void kvm_define_shared_msr(unsigned index, u32 msr); |
1067 | -void kvm_set_shared_msr(unsigned index, u64 val, u64 mask); | |
1067 | +int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); | |
1068 | 1068 | |
1069 | 1069 | bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); |
1070 | 1070 |
arch/x86/kvm/vmx.c
... | ... | @@ -2659,12 +2659,15 @@ |
2659 | 2659 | default: |
2660 | 2660 | msr = find_msr_entry(vmx, msr_index); |
2661 | 2661 | if (msr) { |
2662 | + u64 old_msr_data = msr->data; | |
2662 | 2663 | msr->data = data; |
2663 | 2664 | if (msr - vmx->guest_msrs < vmx->save_nmsrs) { |
2664 | 2665 | preempt_disable(); |
2665 | - kvm_set_shared_msr(msr->index, msr->data, | |
2666 | - msr->mask); | |
2666 | + ret = kvm_set_shared_msr(msr->index, msr->data, | |
2667 | + msr->mask); | |
2667 | 2668 | preempt_enable(); |
2669 | + if (ret) | |
2670 | + msr->data = old_msr_data; | |
2668 | 2671 | } |
2669 | 2672 | break; |
2670 | 2673 | } |
arch/x86/kvm/x86.c
... | ... | @@ -229,20 +229,25 @@ |
229 | 229 | shared_msr_update(i, shared_msrs_global.msrs[i]); |
230 | 230 | } |
231 | 231 | |
232 | -void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) | |
232 | +int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) | |
233 | 233 | { |
234 | 234 | unsigned int cpu = smp_processor_id(); |
235 | 235 | struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); |
236 | + int err; | |
236 | 237 | |
237 | 238 | if (((value ^ smsr->values[slot].curr) & mask) == 0) |
238 | - return; | |
239 | + return 0; | |
239 | 240 | smsr->values[slot].curr = value; |
240 | - wrmsrl(shared_msrs_global.msrs[slot], value); | |
241 | + err = wrmsrl_safe(shared_msrs_global.msrs[slot], value); | |
242 | + if (err) | |
243 | + return 1; | |
244 | + | |
241 | 245 | if (!smsr->registered) { |
242 | 246 | smsr->urn.on_user_return = kvm_on_user_return; |
243 | 247 | user_return_notifier_register(&smsr->urn); |
244 | 248 | smsr->registered = true; |
245 | 249 | } |
250 | + return 0; | |
246 | 251 | } |
247 | 252 | EXPORT_SYMBOL_GPL(kvm_set_shared_msr); |
248 | 253 |