Skip to content

Commit 3f5c90b

Browse files
chazyMarc Zyngier
authored andcommitted
KVM: arm64: Introduce VHE-specific kvm_vcpu_run
So far this is mostly (see below) a copy of the legacy non-VHE switch function, but we will start reworking these functions in separate directions to work on VHE and non-VHE in the most optimal way in later patches. The only difference after this patch between the VHE and non-VHE run functions is that we omit the branch-predictor variant-2 hardening for QC Falkor CPUs, because this workaround is specific to a series of non-VHE ARMv8.0 CPUs. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
1 parent dc25140 commit 3f5c90b

File tree

6 files changed

+87
-9
lines changed

6 files changed

+87
-9
lines changed

arch/arm/include/asm/kvm_asm.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,10 @@ extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
7070

7171
extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
7272

73-
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
73+
/* no VHE on 32-bit :( */
74+
static inline int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) { BUG(); return 0; }
75+
76+
extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);
7477

7578
extern void __init_stage2_translation(void);
7679

arch/arm/kvm/hyp/switch.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -154,7 +154,7 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
154154
return true;
155155
}
156156

157-
int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
157+
int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
158158
{
159159
struct kvm_cpu_context *host_ctxt;
160160
struct kvm_cpu_context *guest_ctxt;

arch/arm64/include/asm/kvm_asm.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,9 @@ extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
5858

5959
extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
6060

61-
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
61+
extern int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu);
62+
63+
extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);
6264

6365
extern u64 __vgic_v3_get_ich_vtr_el2(void);
6466
extern u64 __vgic_v3_read_vmcr(void);

arch/arm64/include/asm/kvm_host.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -428,6 +428,13 @@ static inline void kvm_arm_vhe_guest_enter(void)
428428
static inline void kvm_arm_vhe_guest_exit(void)
429429
{
430430
local_daif_restore(DAIF_PROCCTX_NOIRQ);
431+
432+
/*
433+
* When we exit from the guest we change a number of CPU configuration
434+
* parameters, such as traps. Make sure these changes take effect
435+
* before running the host or additional guests.
436+
*/
437+
isb();
431438
}
432439

433440
static inline bool kvm_arm_harden_branch_predictor(void)

arch/arm64/kvm/hyp/switch.c

Lines changed: 65 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -362,7 +362,71 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
362362
return false;
363363
}
364364

365-
int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
365+
/* Switch to the guest for VHE systems running in EL2 */
366+
int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
367+
{
368+
struct kvm_cpu_context *host_ctxt;
369+
struct kvm_cpu_context *guest_ctxt;
370+
bool fp_enabled;
371+
u64 exit_code;
372+
373+
vcpu = kern_hyp_va(vcpu);
374+
375+
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
376+
host_ctxt->__hyp_running_vcpu = vcpu;
377+
guest_ctxt = &vcpu->arch.ctxt;
378+
379+
__sysreg_save_host_state(host_ctxt);
380+
381+
__activate_traps(vcpu);
382+
__activate_vm(vcpu);
383+
384+
__vgic_restore_state(vcpu);
385+
__timer_enable_traps(vcpu);
386+
387+
/*
388+
* We must restore the 32-bit state before the sysregs, thanks
389+
* to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
390+
*/
391+
__sysreg32_restore_state(vcpu);
392+
__sysreg_restore_guest_state(guest_ctxt);
393+
__debug_switch_to_guest(vcpu);
394+
395+
do {
396+
/* Jump in the fire! */
397+
exit_code = __guest_enter(vcpu, host_ctxt);
398+
399+
/* And we're baaack! */
400+
} while (fixup_guest_exit(vcpu, &exit_code));
401+
402+
fp_enabled = __fpsimd_enabled();
403+
404+
__sysreg_save_guest_state(guest_ctxt);
405+
__sysreg32_save_state(vcpu);
406+
__timer_disable_traps(vcpu);
407+
__vgic_save_state(vcpu);
408+
409+
__deactivate_traps(vcpu);
410+
__deactivate_vm(vcpu);
411+
412+
__sysreg_restore_host_state(host_ctxt);
413+
414+
if (fp_enabled) {
415+
__fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
416+
__fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
417+
}
418+
419+
/*
420+
* This must come after restoring the host sysregs, since a non-VHE
421+
* system may enable SPE here and make use of the TTBRs.
422+
*/
423+
__debug_switch_to_host(vcpu);
424+
425+
return exit_code;
426+
}
427+
428+
/* Switch to the guest for legacy non-VHE systems */
429+
int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
366430
{
367431
struct kvm_cpu_context *host_ctxt;
368432
struct kvm_cpu_context *guest_ctxt;

virt/kvm/arm/arm.c

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -733,13 +733,15 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
733733
*/
734734
trace_kvm_entry(*vcpu_pc(vcpu));
735735
guest_enter_irqoff();
736-
if (has_vhe())
737-
kvm_arm_vhe_guest_enter();
738-
739-
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
740736

741-
if (has_vhe())
737+
if (has_vhe()) {
738+
kvm_arm_vhe_guest_enter();
739+
ret = kvm_vcpu_run_vhe(vcpu);
742740
kvm_arm_vhe_guest_exit();
741+
} else {
742+
ret = kvm_call_hyp(__kvm_vcpu_run_nvhe, vcpu);
743+
}
744+
743745
vcpu->mode = OUTSIDE_GUEST_MODE;
744746
vcpu->stat.exits++;
745747
/*

0 commit comments

Comments
 (0)