Skip to content

Commit 7f7f1ba

Browse files
committed
KVM: x86: do not load vmcs12 pages while still in SMM
If the vCPU enters system management mode while running a nested guest, RSM starts processing the vmentry while still in SMM. In that case, however, the pages pointed to by the vmcs12 might be incorrectly loaded from SMRAM. To avoid this, delay the handling of the pages until just before the next vmentry. This is done with a new request and a new entry in kvm_x86_ops, which we will be able to reuse for nested VMX state migration. Extracted from a patch by Jim Mattson and KarimAllah Ahmed. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent fa3899a commit 7f7f1ba

File tree

3 files changed

+41
-18
lines changed

3 files changed

+41
-18
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,7 @@
7575
#define KVM_REQ_HV_EXIT KVM_ARCH_REQ(21)
7676
#define KVM_REQ_HV_STIMER KVM_ARCH_REQ(22)
7777
#define KVM_REQ_LOAD_EOI_EXITMAP KVM_ARCH_REQ(23)
78+
#define KVM_REQ_GET_VMCS12_PAGES KVM_ARCH_REQ(24)
7879

7980
#define CR0_RESERVED_BITS \
8081
(~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
@@ -1085,6 +1086,8 @@ struct kvm_x86_ops {
10851086

10861087
void (*setup_mce)(struct kvm_vcpu *vcpu);
10871088

1089+
void (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
1090+
10881091
int (*smi_allowed)(struct kvm_vcpu *vcpu);
10891092
int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
10901093
int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);

arch/x86/kvm/vmx.c

Lines changed: 36 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -10660,9 +10660,9 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
1066010660
static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
1066110661
struct vmcs12 *vmcs12);
1066210662

10663-
static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
10664-
struct vmcs12 *vmcs12)
10663+
static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
1066510664
{
10665+
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1066610666
struct vcpu_vmx *vmx = to_vmx(vcpu);
1066710667
struct page *page;
1066810668
u64 hpa;
@@ -11774,12 +11774,17 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
1177411774
return 0;
1177511775
}
1177611776

11777-
static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
11777+
/*
11778+
* If exit_qual is NULL, this is being called from RSM.
11779+
* Otherwise it's called from vmlaunch/vmresume.
11780+
*/
11781+
static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
1177811782
{
1177911783
struct vcpu_vmx *vmx = to_vmx(vcpu);
1178011784
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
11781-
u32 exit_qual;
11782-
int r;
11785+
bool from_vmentry = !!exit_qual;
11786+
u32 dummy_exit_qual;
11787+
int r = 0;
1178311788

1178411789
enter_guest_mode(vcpu);
1178511790

@@ -11793,17 +11798,28 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
1179311798
vcpu->arch.tsc_offset += vmcs12->tsc_offset;
1179411799

1179511800
r = EXIT_REASON_INVALID_STATE;
11796-
if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
11801+
if (prepare_vmcs02(vcpu, vmcs12, from_vmentry ? exit_qual : &dummy_exit_qual))
1179711802
goto fail;
1179811803

11799-
nested_get_vmcs12_pages(vcpu, vmcs12);
11804+
if (from_vmentry) {
11805+
nested_get_vmcs12_pages(vcpu);
1180011806

11801-
r = EXIT_REASON_MSR_LOAD_FAIL;
11802-
exit_qual = nested_vmx_load_msr(vcpu,
11803-
vmcs12->vm_entry_msr_load_addr,
11804-
vmcs12->vm_entry_msr_load_count);
11805-
if (exit_qual)
11806-
goto fail;
11807+
r = EXIT_REASON_MSR_LOAD_FAIL;
11808+
*exit_qual = nested_vmx_load_msr(vcpu,
11809+
vmcs12->vm_entry_msr_load_addr,
11810+
vmcs12->vm_entry_msr_load_count);
11811+
if (*exit_qual)
11812+
goto fail;
11813+
} else {
11814+
/*
11815+
* The MMU is not initialized to point at the right entities yet and
11816+
* "get pages" would need to read data from the guest (i.e. we will
11817+
* need to perform gpa to hpa translation). Request a call
11818+
* to nested_get_vmcs12_pages before the next VM-entry. The MSRs
11819+
* have already been set at vmentry time and should not be reset.
11820+
*/
11821+
kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
11822+
}
1180711823

1180811824
/*
1180911825
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
@@ -11818,8 +11834,7 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
1181811834
vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
1181911835
leave_guest_mode(vcpu);
1182011836
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
11821-
nested_vmx_entry_failure(vcpu, vmcs12, r, exit_qual);
11822-
return 1;
11837+
return r;
1182311838
}
1182411839

1182511840
/*
@@ -11896,10 +11911,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
1189611911
*/
1189711912

1189811913
vmx->nested.nested_run_pending = 1;
11899-
ret = enter_vmx_non_root_mode(vcpu);
11914+
ret = enter_vmx_non_root_mode(vcpu, &exit_qual);
1190011915
if (ret) {
11916+
nested_vmx_entry_failure(vcpu, vmcs12, ret, exit_qual);
1190111917
vmx->nested.nested_run_pending = 0;
11902-
return ret;
11918+
return 1;
1190311919
}
1190411920

1190511921
/*
@@ -12985,7 +13001,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
1298513001

1298613002
if (vmx->nested.smm.guest_mode) {
1298713003
vcpu->arch.hflags &= ~HF_SMM_MASK;
12988-
ret = enter_vmx_non_root_mode(vcpu);
13004+
ret = enter_vmx_non_root_mode(vcpu, NULL);
1298913005
vcpu->arch.hflags |= HF_SMM_MASK;
1299013006
if (ret)
1299113007
return ret;
@@ -13134,6 +13150,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
1313413150

1313513151
.setup_mce = vmx_setup_mce,
1313613152

13153+
.get_vmcs12_pages = nested_get_vmcs12_pages,
13154+
1313713155
.smi_allowed = vmx_smi_allowed,
1313813156
.pre_enter_smm = vmx_pre_enter_smm,
1313913157
.pre_leave_smm = vmx_pre_leave_smm,

arch/x86/kvm/x86.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7260,6 +7260,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
72607260
bool req_immediate_exit = false;
72617261

72627262
if (kvm_request_pending(vcpu)) {
7263+
if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu))
7264+
kvm_x86_ops->get_vmcs12_pages(vcpu);
72637265
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
72647266
kvm_mmu_unload(vcpu);
72657267
if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))

0 commit comments

Comments
 (0)