@@ -1618,7 +1618,8 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
1618
1618
* maximum supported version. KVM supports versions from 1 to
1619
1619
* KVM_EVMCS_VERSION.
1620
1620
*/
1621
- * vmcs_version = (KVM_EVMCS_VERSION << 8 ) | 1 ;
1621
+ if (vmcs_version )
1622
+ * vmcs_version = (KVM_EVMCS_VERSION << 8 ) | 1 ;
1622
1623
1623
1624
vmx -> nested .msrs .pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL ;
1624
1625
vmx -> nested .msrs .entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL ;
@@ -9338,7 +9339,8 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
9338
9339
* This is an equivalent of the nested hypervisor executing the vmptrld
9339
9340
* instruction.
9340
9341
*/
9341
- static int nested_vmx_handle_enlightened_vmptrld (struct kvm_vcpu * vcpu )
9342
+ static int nested_vmx_handle_enlightened_vmptrld (struct kvm_vcpu * vcpu ,
9343
+ bool from_launch )
9342
9344
{
9343
9345
struct vcpu_vmx * vmx = to_vmx (vcpu );
9344
9346
struct hv_vp_assist_page assist_page ;
@@ -9389,8 +9391,9 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu)
9389
9391
* present in struct hv_enlightened_vmcs, ...). Make sure there
9390
9392
* are no leftovers.
9391
9393
*/
9392
- memset (vmx -> nested .cached_vmcs12 , 0 ,
9393
- sizeof (* vmx -> nested .cached_vmcs12 ));
9394
+ if (from_launch )
9395
+ memset (vmx -> nested .cached_vmcs12 , 0 ,
9396
+ sizeof (* vmx -> nested .cached_vmcs12 ));
9394
9397
9395
9398
}
9396
9399
return 1 ;
@@ -11147,6 +11150,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
11147
11150
}
11148
11151
11149
11152
if (vmx -> nested .need_vmcs12_sync ) {
11153
+ /*
11154
+ * hv_evmcs may end up being not mapped after migration (when
11155
+ * L2 was running), map it here to make sure vmcs12 changes are
11156
+ * properly reflected.
11157
+ */
11158
+ if (vmx -> nested .enlightened_vmcs_enabled &&
11159
+ !vmx -> nested .hv_evmcs )
11160
+ nested_vmx_handle_enlightened_vmptrld (vcpu , false);
11161
+
11150
11162
if (vmx -> nested .hv_evmcs ) {
11151
11163
copy_vmcs12_to_enlightened (vmx );
11152
11164
/* All fields are clean */
@@ -13424,7 +13436,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
13424
13436
if (!nested_vmx_check_permission (vcpu ))
13425
13437
return 1 ;
13426
13438
13427
- if (!nested_vmx_handle_enlightened_vmptrld (vcpu ))
13439
+ if (!nested_vmx_handle_enlightened_vmptrld (vcpu , true ))
13428
13440
return 1 ;
13429
13441
13430
13442
if (!vmx -> nested .hv_evmcs && vmx -> nested .current_vmptr == -1ull )
@@ -14711,6 +14723,20 @@ static int enable_smi_window(struct kvm_vcpu *vcpu)
14711
14723
return 0 ;
14712
14724
}
14713
14725
14726
+ static inline int vmx_has_valid_vmcs12 (struct kvm_vcpu * vcpu )
14727
+ {
14728
+ struct vcpu_vmx * vmx = to_vmx (vcpu );
14729
+
14730
+ /*
14731
+ * In case we do two consecutive get/set_nested_state()s while L2 was
14732
+ * running hv_evmcs may end up not being mapped (we map it from
14733
+ * nested_vmx_run()/vmx_vcpu_run()). Check is_guest_mode() as we always
14734
+ * have vmcs12 if it is true.
14735
+ */
14736
+ return is_guest_mode (vcpu ) || vmx -> nested .current_vmptr != -1ull ||
14737
+ vmx -> nested .hv_evmcs ;
14738
+ }
14739
+
14714
14740
static int vmx_get_nested_state (struct kvm_vcpu * vcpu ,
14715
14741
struct kvm_nested_state __user * user_kvm_nested_state ,
14716
14742
u32 user_data_size )
@@ -14731,16 +14757,15 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
14731
14757
vmx = to_vmx (vcpu );
14732
14758
vmcs12 = get_vmcs12 (vcpu );
14733
14759
14734
- /* FIXME: Enlightened VMCS is currently unsupported */
14735
- if (vmx -> nested .hv_evmcs )
14736
- return - ENOTSUPP ;
14760
+ if (nested_vmx_allowed (vcpu ) && vmx -> nested .enlightened_vmcs_enabled )
14761
+ kvm_state .flags |= KVM_STATE_NESTED_EVMCS ;
14737
14762
14738
14763
if (nested_vmx_allowed (vcpu ) &&
14739
14764
(vmx -> nested .vmxon || vmx -> nested .smm .vmxon )) {
14740
14765
kvm_state .vmx .vmxon_pa = vmx -> nested .vmxon_ptr ;
14741
14766
kvm_state .vmx .vmcs_pa = vmx -> nested .current_vmptr ;
14742
14767
14743
- if (vmx -> nested . current_vmptr != -1ull ) {
14768
+ if (vmx_has_valid_vmcs12 ( vcpu ) ) {
14744
14769
kvm_state .size += VMCS12_SIZE ;
14745
14770
14746
14771
if (is_guest_mode (vcpu ) &&
@@ -14769,20 +14794,24 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
14769
14794
if (copy_to_user (user_kvm_nested_state , & kvm_state , sizeof (kvm_state )))
14770
14795
return - EFAULT ;
14771
14796
14772
- if (vmx -> nested . current_vmptr == -1ull )
14797
+ if (! vmx_has_valid_vmcs12 ( vcpu ) )
14773
14798
goto out ;
14774
14799
14775
14800
/*
14776
14801
* When running L2, the authoritative vmcs12 state is in the
14777
14802
* vmcs02. When running L1, the authoritative vmcs12 state is
14778
- * in the shadow vmcs linked to vmcs01, unless
14803
+ * in the shadow or enlightened vmcs linked to vmcs01, unless
14779
14804
* need_vmcs12_sync is set, in which case, the authoritative
14780
14805
* vmcs12 state is in the vmcs12 already.
14781
14806
*/
14782
- if (is_guest_mode (vcpu ))
14807
+ if (is_guest_mode (vcpu )) {
14783
14808
sync_vmcs12 (vcpu , vmcs12 );
14784
- else if (enable_shadow_vmcs && !vmx -> nested .need_vmcs12_sync )
14785
- copy_shadow_to_vmcs12 (vmx );
14809
+ } else if (!vmx -> nested .need_vmcs12_sync ) {
14810
+ if (vmx -> nested .hv_evmcs )
14811
+ copy_enlightened_to_vmcs12 (vmx );
14812
+ else if (enable_shadow_vmcs )
14813
+ copy_shadow_to_vmcs12 (vmx );
14814
+ }
14786
14815
14787
14816
if (copy_to_user (user_kvm_nested_state -> data , vmcs12 , sizeof (* vmcs12 )))
14788
14817
return - EFAULT ;
@@ -14810,6 +14839,9 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
14810
14839
if (kvm_state -> format != 0 )
14811
14840
return - EINVAL ;
14812
14841
14842
+ if (kvm_state -> flags & KVM_STATE_NESTED_EVMCS )
14843
+ nested_enable_evmcs (vcpu , NULL );
14844
+
14813
14845
if (!nested_vmx_allowed (vcpu ))
14814
14846
return kvm_state -> vmx .vmxon_pa == -1ull ? 0 : - EINVAL ;
14815
14847
@@ -14860,11 +14892,21 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
14860
14892
if (kvm_state -> size < sizeof (kvm_state ) + sizeof (* vmcs12 ))
14861
14893
return 0 ;
14862
14894
14863
- if (kvm_state -> vmx .vmcs_pa == kvm_state -> vmx .vmxon_pa ||
14864
- !page_address_valid (vcpu , kvm_state -> vmx .vmcs_pa ))
14865
- return - EINVAL ;
14895
+ if (kvm_state -> vmx .vmcs_pa != -1ull ) {
14896
+ if (kvm_state -> vmx .vmcs_pa == kvm_state -> vmx .vmxon_pa ||
14897
+ !page_address_valid (vcpu , kvm_state -> vmx .vmcs_pa ))
14898
+ return - EINVAL ;
14866
14899
14867
- set_current_vmptr (vmx , kvm_state -> vmx .vmcs_pa );
14900
+ set_current_vmptr (vmx , kvm_state -> vmx .vmcs_pa );
14901
+ } else if (kvm_state -> flags & KVM_STATE_NESTED_EVMCS ) {
14902
+ /*
14903
+ * Sync eVMCS upon entry as we may not have
14904
+ * HV_X64_MSR_VP_ASSIST_PAGE set up yet.
14905
+ */
14906
+ vmx -> nested .need_vmcs12_sync = true;
14907
+ } else {
14908
+ return - EINVAL ;
14909
+ }
14868
14910
14869
14911
if (kvm_state -> vmx .smm .flags & KVM_STATE_NESTED_SMM_VMXON ) {
14870
14912
vmx -> nested .smm .vmxon = true;
0 commit comments