Skip to content

Commit dc25140

Browse files
chazyMarc Zyngier
authored andcommitted
KVM: arm64: Factor out fault info population and gic workarounds
The current world-switch function has functionality to detect a number of cases where we need to fixup some part of the exit condition and possibly run the guest again, before having restored the host state. This includes populating missing fault info, emulating GICv2 CPU interface accesses when mapped at unaligned addresses, and emulating the GICv3 CPU interface on systems that need it. As we are about to have an alternative switch function for VHE systems, but VHE systems still need the same early fixup logic, factor out this logic into a separate function that can be shared by both switch functions. No functional change. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Reviewed-by: Andrew Jones <drjones@redhat.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
1 parent 014c4c7 commit dc25140

File tree

1 file changed

+57
-47
lines changed

1 file changed

+57
-47
lines changed

arch/arm64/kvm/hyp/switch.c

Lines changed: 57 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -291,53 +291,27 @@ static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
291291
}
292292
}
293293

294-
int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
294+
/*
295+
* Return true when we were able to fixup the guest exit and should return to
296+
* the guest, false when we should restore the host state and return to the
297+
* main run loop.
298+
*/
299+
static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
295300
{
296-
struct kvm_cpu_context *host_ctxt;
297-
struct kvm_cpu_context *guest_ctxt;
298-
bool fp_enabled;
299-
u64 exit_code;
300-
301-
vcpu = kern_hyp_va(vcpu);
302-
303-
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
304-
host_ctxt->__hyp_running_vcpu = vcpu;
305-
guest_ctxt = &vcpu->arch.ctxt;
306-
307-
__sysreg_save_host_state(host_ctxt);
308-
309-
__activate_traps(vcpu);
310-
__activate_vm(vcpu);
311-
312-
__vgic_restore_state(vcpu);
313-
__timer_enable_traps(vcpu);
314-
315-
/*
316-
* We must restore the 32-bit state before the sysregs, thanks
317-
* to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
318-
*/
319-
__sysreg32_restore_state(vcpu);
320-
__sysreg_restore_guest_state(guest_ctxt);
321-
__debug_switch_to_guest(vcpu);
322-
323-
/* Jump in the fire! */
324-
again:
325-
exit_code = __guest_enter(vcpu, host_ctxt);
326-
/* And we're baaack! */
327-
328-
if (ARM_EXCEPTION_CODE(exit_code) != ARM_EXCEPTION_IRQ)
301+
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
329302
vcpu->arch.fault.esr_el2 = read_sysreg_el2(esr);
303+
330304
/*
331305
* We're using the raw exception code in order to only process
332306
* the trap if no SError is pending. We will come back to the
333307
* same PC once the SError has been injected, and replay the
334308
* trapping instruction.
335309
*/
336-
if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
337-
goto again;
310+
if (*exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
311+
return true;
338312

339313
if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
340-
exit_code == ARM_EXCEPTION_TRAP) {
314+
*exit_code == ARM_EXCEPTION_TRAP) {
341315
bool valid;
342316

343317
valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
@@ -351,9 +325,9 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
351325

352326
if (ret == 1) {
353327
if (__skip_instr(vcpu))
354-
goto again;
328+
return true;
355329
else
356-
exit_code = ARM_EXCEPTION_TRAP;
330+
*exit_code = ARM_EXCEPTION_TRAP;
357331
}
358332

359333
if (ret == -1) {
@@ -365,29 +339,65 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
365339
*/
366340
if (!__skip_instr(vcpu))
367341
*vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
368-
exit_code = ARM_EXCEPTION_EL1_SERROR;
342+
*exit_code = ARM_EXCEPTION_EL1_SERROR;
369343
}
370-
371-
/* 0 falls through to be handler out of EL2 */
372344
}
373345
}
374346

375347
if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
376-
exit_code == ARM_EXCEPTION_TRAP &&
348+
*exit_code == ARM_EXCEPTION_TRAP &&
377349
(kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
378350
kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
379351
int ret = __vgic_v3_perform_cpuif_access(vcpu);
380352

381353
if (ret == 1) {
382354
if (__skip_instr(vcpu))
383-
goto again;
355+
return true;
384356
else
385-
exit_code = ARM_EXCEPTION_TRAP;
357+
*exit_code = ARM_EXCEPTION_TRAP;
386358
}
387-
388-
/* 0 falls through to be handled out of EL2 */
389359
}
390360

361+
/* Return to the host kernel and handle the exit */
362+
return false;
363+
}
364+
365+
int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
366+
{
367+
struct kvm_cpu_context *host_ctxt;
368+
struct kvm_cpu_context *guest_ctxt;
369+
bool fp_enabled;
370+
u64 exit_code;
371+
372+
vcpu = kern_hyp_va(vcpu);
373+
374+
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
375+
host_ctxt->__hyp_running_vcpu = vcpu;
376+
guest_ctxt = &vcpu->arch.ctxt;
377+
378+
__sysreg_save_host_state(host_ctxt);
379+
380+
__activate_traps(vcpu);
381+
__activate_vm(vcpu);
382+
383+
__vgic_restore_state(vcpu);
384+
__timer_enable_traps(vcpu);
385+
386+
/*
387+
* We must restore the 32-bit state before the sysregs, thanks
388+
* to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
389+
*/
390+
__sysreg32_restore_state(vcpu);
391+
__sysreg_restore_guest_state(guest_ctxt);
392+
__debug_switch_to_guest(vcpu);
393+
394+
do {
395+
/* Jump in the fire! */
396+
exit_code = __guest_enter(vcpu, host_ctxt);
397+
398+
/* And we're baaack! */
399+
} while (fixup_guest_exit(vcpu, &exit_code));
400+
391401
if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) {
392402
u32 midr = read_cpuid_id();
393403

0 commit comments

Comments
 (0)