Commit 55e3748e8902ff641e334226bdcb432f9a5d78d3

Authored by Marc Zyngier
Committed by Catalin Marinas
1 parent 85478bab40

arm64: KVM: Add ARCH_WORKAROUND_2 support for guests

In order to offer ARCH_WORKAROUND_2 support to guests, we need
a bit of infrastructure.

Let's add a flag indicating whether or not the guest uses
SSBD mitigation. Depending on the state of this flag, allow
KVM to disable ARCH_WORKAROUND_2 before entering the guest,
and enable it when exiting it.

Reviewed-by: Christoffer Dall <christoffer.dall@arm.com>
Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

Showing 6 changed files with 81 additions and 0 deletions Side-by-side Diff

arch/arm/include/asm/kvm_mmu.h
... ... @@ -319,6 +319,11 @@
319 319 return 0;
320 320 }
321 321  
  322 +static inline int hyp_map_aux_data(void)
  323 +{
  324 + return 0;
  325 +}
  326 +
322 327 #define kvm_phys_to_vttbr(addr) (addr)
323 328  
324 329 #endif /* !__ASSEMBLY__ */
arch/arm64/include/asm/kvm_asm.h
... ... @@ -33,6 +33,9 @@
33 33 #define KVM_ARM64_DEBUG_DIRTY_SHIFT 0
34 34 #define KVM_ARM64_DEBUG_DIRTY (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
35 35  
  36 +#define VCPU_WORKAROUND_2_FLAG_SHIFT 0
  37 +#define VCPU_WORKAROUND_2_FLAG (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
  38 +
36 39 /* Translate a kernel address of @sym into its equivalent linear mapping */
37 40 #define kvm_ksym_ref(sym) \
38 41 ({ \
arch/arm64/include/asm/kvm_host.h
... ... @@ -216,6 +216,9 @@
216 216 /* Exception Information */
217 217 struct kvm_vcpu_fault_info fault;
218 218  
  219 + /* State of various workarounds, see kvm_asm.h for bit assignment */
  220 + u64 workaround_flags;
  221 +
219 222 /* Guest debug state */
220 223 u64 debug_flags;
221 224  
arch/arm64/include/asm/kvm_mmu.h
... ... @@ -456,6 +456,30 @@
456 456 }
457 457 #endif
458 458  
  459 +#ifdef CONFIG_ARM64_SSBD
  460 +DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
  461 +
  462 +static inline int hyp_map_aux_data(void)
  463 +{
  464 + int cpu, err;
  465 +
  466 + for_each_possible_cpu(cpu) {
  467 + u64 *ptr;
  468 +
  469 + ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
  470 + err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
  471 + if (err)
  472 + return err;
  473 + }
  474 + return 0;
  475 +}
  476 +#else
  477 +static inline int hyp_map_aux_data(void)
  478 +{
  479 + return 0;
  480 +}
  481 +#endif
  482 +
459 483 #define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
460 484  
461 485 #endif /* __ASSEMBLY__ */
arch/arm64/kvm/hyp/switch.c
... ... @@ -15,6 +15,7 @@
15 15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 16 */
17 17  
  18 +#include <linux/arm-smccc.h>
18 19 #include <linux/types.h>
19 20 #include <linux/jump_label.h>
20 21 #include <uapi/linux/psci.h>
... ... @@ -389,6 +390,39 @@
389 390 return false;
390 391 }
391 392  
  393 +static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
  394 +{
  395 + if (!cpus_have_const_cap(ARM64_SSBD))
  396 + return false;
  397 +
  398 + return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
  399 +}
  400 +
  401 +static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
  402 +{
  403 +#ifdef CONFIG_ARM64_SSBD
  404 + /*
  405 + * The host runs with the workaround always present. If the
  406 + * guest wants it disabled, so be it...
  407 + */
  408 + if (__needs_ssbd_off(vcpu) &&
  409 + __hyp_this_cpu_read(arm64_ssbd_callback_required))
  410 + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
  411 +#endif
  412 +}
  413 +
  414 +static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
  415 +{
  416 +#ifdef CONFIG_ARM64_SSBD
  417 + /*
  418 + * If the guest has disabled the workaround, bring it back on.
  419 + */
  420 + if (__needs_ssbd_off(vcpu) &&
  421 + __hyp_this_cpu_read(arm64_ssbd_callback_required))
  422 + arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
  423 +#endif
  424 +}
  425 +
392 426 /* Switch to the guest for VHE systems running in EL2 */
393 427 int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
394 428 {
... ... @@ -409,6 +443,8 @@
409 443 sysreg_restore_guest_state_vhe(guest_ctxt);
410 444 __debug_switch_to_guest(vcpu);
411 445  
  446 + __set_guest_arch_workaround_state(vcpu);
  447 +
412 448 do {
413 449 /* Jump in the fire! */
414 450 exit_code = __guest_enter(vcpu, host_ctxt);
... ... @@ -416,6 +452,8 @@
416 452 /* And we're baaack! */
417 453 } while (fixup_guest_exit(vcpu, &exit_code));
418 454  
  455 + __set_host_arch_workaround_state(vcpu);
  456 +
419 457 fp_enabled = fpsimd_enabled_vhe();
420 458  
421 459 sysreg_save_guest_state_vhe(guest_ctxt);
422 460  
... ... @@ -465,12 +503,16 @@
465 503 __sysreg_restore_state_nvhe(guest_ctxt);
466 504 __debug_switch_to_guest(vcpu);
467 505  
  506 + __set_guest_arch_workaround_state(vcpu);
  507 +
468 508 do {
469 509 /* Jump in the fire! */
470 510 exit_code = __guest_enter(vcpu, host_ctxt);
471 511  
472 512 /* And we're baaack! */
473 513 } while (fixup_guest_exit(vcpu, &exit_code));
  514 +
  515 + __set_host_arch_workaround_state(vcpu);
474 516  
475 517 fp_enabled = __fpsimd_enabled_nvhe();
476 518  
... ... @@ -1490,6 +1490,10 @@
1490 1490 }
1491 1491 }
1492 1492  
  1493 + err = hyp_map_aux_data();
  1494 + if (err)
  1495 + kvm_err("Cannot map host auxilary data: %d\n", err);
  1496 +
1493 1497 return 0;
1494 1498  
1495 1499 out_err: