@@ -10660,9 +10660,9 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
10660
10660
static inline bool nested_vmx_prepare_msr_bitmap (struct kvm_vcpu * vcpu ,
10661
10661
struct vmcs12 * vmcs12 );
10662
10662
10663
- static void nested_get_vmcs12_pages (struct kvm_vcpu * vcpu ,
10664
- struct vmcs12 * vmcs12 )
10663
+ static void nested_get_vmcs12_pages (struct kvm_vcpu * vcpu )
10665
10664
{
10665
+ struct vmcs12 * vmcs12 = get_vmcs12 (vcpu );
10666
10666
struct vcpu_vmx * vmx = to_vmx (vcpu );
10667
10667
struct page * page ;
10668
10668
u64 hpa ;
@@ -11774,12 +11774,17 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
11774
11774
return 0 ;
11775
11775
}
11776
11776
11777
- static int enter_vmx_non_root_mode (struct kvm_vcpu * vcpu )
11777
+ /*
11778
+ * If exit_qual is NULL, this is being called from RSM.
11779
+ * Otherwise it's called from vmlaunch/vmresume.
11780
+ */
11781
+ static int enter_vmx_non_root_mode (struct kvm_vcpu * vcpu , u32 * exit_qual )
11778
11782
{
11779
11783
struct vcpu_vmx * vmx = to_vmx (vcpu );
11780
11784
struct vmcs12 * vmcs12 = get_vmcs12 (vcpu );
11781
- u32 exit_qual ;
11782
- int r ;
11785
+ bool from_vmentry = !!exit_qual ;
11786
+ u32 dummy_exit_qual ;
11787
+ int r = 0 ;
11783
11788
11784
11789
enter_guest_mode (vcpu );
11785
11790
@@ -11793,17 +11798,28 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
11793
11798
vcpu -> arch .tsc_offset += vmcs12 -> tsc_offset ;
11794
11799
11795
11800
r = EXIT_REASON_INVALID_STATE ;
11796
- if (prepare_vmcs02 (vcpu , vmcs12 , & exit_qual ))
11801
+ if (prepare_vmcs02 (vcpu , vmcs12 , from_vmentry ? exit_qual : & dummy_exit_qual ))
11797
11802
goto fail ;
11798
11803
11799
- nested_get_vmcs12_pages (vcpu , vmcs12 );
11804
+ if (from_vmentry ) {
11805
+ nested_get_vmcs12_pages (vcpu );
11800
11806
11801
- r = EXIT_REASON_MSR_LOAD_FAIL ;
11802
- exit_qual = nested_vmx_load_msr (vcpu ,
11803
- vmcs12 -> vm_entry_msr_load_addr ,
11804
- vmcs12 -> vm_entry_msr_load_count );
11805
- if (exit_qual )
11806
- goto fail ;
11807
+ r = EXIT_REASON_MSR_LOAD_FAIL ;
11808
+ * exit_qual = nested_vmx_load_msr (vcpu ,
11809
+ vmcs12 -> vm_entry_msr_load_addr ,
11810
+ vmcs12 -> vm_entry_msr_load_count );
11811
+ if (* exit_qual )
11812
+ goto fail ;
11813
+ } else {
11814
+ /*
11815
+ * The MMU is not initialized to point at the right entities yet and
11816
+ * "get pages" would need to read data from the guest (i.e. we will
11817
+ * need to perform gpa to hpa translation). Request a call
11818
+ * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
11819
+ * have already been set at vmentry time and should not be reset.
11820
+ */
11821
+ kvm_make_request (KVM_REQ_GET_VMCS12_PAGES , vcpu );
11822
+ }
11807
11823
11808
11824
/*
11809
11825
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
@@ -11818,8 +11834,7 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
11818
11834
vcpu -> arch .tsc_offset -= vmcs12 -> tsc_offset ;
11819
11835
leave_guest_mode (vcpu );
11820
11836
vmx_switch_vmcs (vcpu , & vmx -> vmcs01 );
11821
- nested_vmx_entry_failure (vcpu , vmcs12 , r , exit_qual );
11822
- return 1 ;
11837
+ return r ;
11823
11838
}
11824
11839
11825
11840
/*
@@ -11896,10 +11911,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
11896
11911
*/
11897
11912
11898
11913
vmx -> nested .nested_run_pending = 1 ;
11899
- ret = enter_vmx_non_root_mode (vcpu );
11914
+ ret = enter_vmx_non_root_mode (vcpu , & exit_qual );
11900
11915
if (ret ) {
11916
+ nested_vmx_entry_failure (vcpu , vmcs12 , ret , exit_qual );
11901
11917
vmx -> nested .nested_run_pending = 0 ;
11902
- return ret ;
11918
+ return 1 ;
11903
11919
}
11904
11920
11905
11921
/*
@@ -12985,7 +13001,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
12985
13001
12986
13002
if (vmx -> nested .smm .guest_mode ) {
12987
13003
vcpu -> arch .hflags &= ~HF_SMM_MASK ;
12988
- ret = enter_vmx_non_root_mode (vcpu );
13004
+ ret = enter_vmx_non_root_mode (vcpu , NULL );
12989
13005
vcpu -> arch .hflags |= HF_SMM_MASK ;
12990
13006
if (ret )
12991
13007
return ret ;
@@ -13134,6 +13150,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
13134
13150
13135
13151
.setup_mce = vmx_setup_mce ,
13136
13152
13153
+ .get_vmcs12_pages = nested_get_vmcs12_pages ,
13154
+
13137
13155
.smi_allowed = vmx_smi_allowed ,
13138
13156
.pre_enter_smm = vmx_pre_enter_smm ,
13139
13157
.pre_leave_smm = vmx_pre_leave_smm ,
0 commit comments