Skip to content

Commit 1261bfa

Browse files
Wanpeng Lirkrcmar
authored andcommitted
KVM: async_pf: Add L1 guest async_pf #PF vmexit handler
This patch adds the L1 guest async page fault #PF vmexit handler, such by L1 similar to ordinary async page fault. Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Radim Krčmář <rkrcmar@redhat.com> Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com> [Passed insn parameters to kvm_mmu_page_fault().] Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
1 parent cfcd20e commit 1261bfa

File tree

5 files changed

+51
-37
lines changed

5 files changed

+51
-37
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -650,6 +650,7 @@ struct kvm_vcpu_arch {
650650
u64 msr_val;
651651
u32 id;
652652
bool send_user_only;
653+
u32 host_apf_reason;
653654
} apf;
654655

655656
/* OSVW MSRs (AMD only) */

arch/x86/kvm/mmu.c

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@
4646
#include <asm/io.h>
4747
#include <asm/vmx.h>
4848
#include <asm/kvm_page_track.h>
49+
#include "trace.h"
4950

5051
/*
5152
* When setting this variable to true it enables Two-Dimensional-Paging
@@ -3780,6 +3781,38 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
37803781
return false;
37813782
}
37823783

3784+
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
3785+
u64 fault_address, char *insn, int insn_len,
3786+
bool need_unprotect)
3787+
{
3788+
int r = 1;
3789+
3790+
switch (vcpu->arch.apf.host_apf_reason) {
3791+
default:
3792+
trace_kvm_page_fault(fault_address, error_code);
3793+
3794+
if (need_unprotect && kvm_event_needs_reinjection(vcpu))
3795+
kvm_mmu_unprotect_page_virt(vcpu, fault_address);
3796+
r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
3797+
insn_len);
3798+
break;
3799+
case KVM_PV_REASON_PAGE_NOT_PRESENT:
3800+
vcpu->arch.apf.host_apf_reason = 0;
3801+
local_irq_disable();
3802+
kvm_async_pf_task_wait(fault_address);
3803+
local_irq_enable();
3804+
break;
3805+
case KVM_PV_REASON_PAGE_READY:
3806+
vcpu->arch.apf.host_apf_reason = 0;
3807+
local_irq_disable();
3808+
kvm_async_pf_task_wake(fault_address);
3809+
local_irq_enable();
3810+
break;
3811+
}
3812+
return r;
3813+
}
3814+
EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
3815+
37833816
static bool
37843817
check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
37853818
{

arch/x86/kvm/mmu.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,9 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
7777
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
7878
bool accessed_dirty);
7979
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
80+
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
81+
u64 fault_address, char *insn, int insn_len,
82+
bool need_unprotect);
8083

8184
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
8285
{

arch/x86/kvm/svm.c

Lines changed: 6 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -194,7 +194,6 @@ struct vcpu_svm {
194194

195195
unsigned int3_injected;
196196
unsigned long int3_rip;
197-
u32 apf_reason;
198197

199198
/* cached guest cpuid flags for faster access */
200199
bool nrips_enabled : 1;
@@ -2122,34 +2121,11 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
21222121
static int pf_interception(struct vcpu_svm *svm)
21232122
{
21242123
u64 fault_address = svm->vmcb->control.exit_info_2;
2125-
u64 error_code;
2126-
int r = 1;
2124+
u64 error_code = svm->vmcb->control.exit_info_1;
21272125

2128-
switch (svm->apf_reason) {
2129-
default:
2130-
error_code = svm->vmcb->control.exit_info_1;
2131-
2132-
trace_kvm_page_fault(fault_address, error_code);
2133-
if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
2134-
kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
2135-
r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
2126+
return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
21362127
svm->vmcb->control.insn_bytes,
2137-
svm->vmcb->control.insn_len);
2138-
break;
2139-
case KVM_PV_REASON_PAGE_NOT_PRESENT:
2140-
svm->apf_reason = 0;
2141-
local_irq_disable();
2142-
kvm_async_pf_task_wait(fault_address);
2143-
local_irq_enable();
2144-
break;
2145-
case KVM_PV_REASON_PAGE_READY:
2146-
svm->apf_reason = 0;
2147-
local_irq_disable();
2148-
kvm_async_pf_task_wake(fault_address);
2149-
local_irq_enable();
2150-
break;
2151-
}
2152-
return r;
2128+
svm->vmcb->control.insn_len, !npt_enabled);
21532129
}
21542130

21552131
static int db_interception(struct vcpu_svm *svm)
@@ -2630,7 +2606,7 @@ static int nested_svm_exit_special(struct vcpu_svm *svm)
26302606
break;
26312607
case SVM_EXIT_EXCP_BASE + PF_VECTOR:
26322608
/* When we're shadowing, trap PFs, but not async PF */
2633-
if (!npt_enabled && svm->apf_reason == 0)
2609+
if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0)
26342610
return NESTED_EXIT_HOST;
26352611
break;
26362612
default:
@@ -2677,7 +2653,7 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
26772653
}
26782654
/* async page fault always cause vmexit */
26792655
else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
2680-
svm->apf_reason != 0)
2656+
svm->vcpu.arch.apf.host_apf_reason != 0)
26812657
vmexit = NESTED_EXIT_DONE;
26822658
break;
26832659
}
@@ -4998,7 +4974,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
49984974

49994975
/* if exit due to PF check for async PF */
50004976
if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
5001-
svm->apf_reason = kvm_read_and_reset_pf_reason();
4977+
svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
50024978

50034979
if (npt_enabled) {
50044980
vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);

arch/x86/kvm/vmx.c

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -5698,14 +5698,11 @@ static int handle_exception(struct kvm_vcpu *vcpu)
56985698
}
56995699

57005700
if (is_page_fault(intr_info)) {
5701-
/* EPT won't cause page fault directly */
5702-
BUG_ON(enable_ept);
57035701
cr2 = vmcs_readl(EXIT_QUALIFICATION);
5704-
trace_kvm_page_fault(cr2, error_code);
5705-
5706-
if (kvm_event_needs_reinjection(vcpu))
5707-
kvm_mmu_unprotect_page_virt(vcpu, cr2);
5708-
return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
5702+
/* EPT won't cause page fault directly */
5703+
WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept);
5704+
return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0,
5705+
true);
57095706
}
57105707

57115708
ex_no = intr_info & INTR_INFO_VECTOR_MASK;
@@ -8643,6 +8640,10 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
86438640
exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
86448641
vmx->exit_intr_info = exit_intr_info;
86458642

8643+
/* if exit due to PF check for async PF */
8644+
if (is_page_fault(exit_intr_info))
8645+
vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
8646+
86468647
/* Handle machine checks before interrupts are enabled */
86478648
if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY ||
86488649
is_machine_check(exit_intr_info))

0 commit comments

Comments
 (0)