@@ -188,12 +188,21 @@ module_param(ple_window_max, uint, 0444);
188
188
189
189
extern const ulong vmx_return ;
190
190
191
+ enum ept_pointers_status {
192
+ EPT_POINTERS_CHECK = 0 ,
193
+ EPT_POINTERS_MATCH = 1 ,
194
+ EPT_POINTERS_MISMATCH = 2
195
+ };
196
+
191
197
struct kvm_vmx {
192
198
struct kvm kvm ;
193
199
194
200
unsigned int tss_addr ;
195
201
bool ept_identity_pagetable_done ;
196
202
gpa_t ept_identity_map_addr ;
203
+
204
+ enum ept_pointers_status ept_pointers_match ;
205
+ spinlock_t ept_pointer_lock ;
197
206
};
198
207
199
208
#define NR_AUTOLOAD_MSRS 8
@@ -863,6 +872,7 @@ struct vcpu_vmx {
863
872
*/
864
873
u64 msr_ia32_feature_control ;
865
874
u64 msr_ia32_feature_control_valid_bits ;
875
+ u64 ept_pointer ;
866
876
};
867
877
868
878
enum segment_cache_field {
@@ -1357,6 +1367,48 @@ static void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
1357
1367
* GUEST_IA32_RTIT_CTL = 0x00002814,
1358
1368
*/
1359
1369
}
1370
+
1371
+ /* check_ept_pointer() should be under protection of ept_pointer_lock. */
1372
+ static void check_ept_pointer_match (struct kvm * kvm )
1373
+ {
1374
+ struct kvm_vcpu * vcpu ;
1375
+ u64 tmp_eptp = INVALID_PAGE ;
1376
+ int i ;
1377
+
1378
+ kvm_for_each_vcpu (i , vcpu , kvm ) {
1379
+ if (!VALID_PAGE (tmp_eptp )) {
1380
+ tmp_eptp = to_vmx (vcpu )-> ept_pointer ;
1381
+ } else if (tmp_eptp != to_vmx (vcpu )-> ept_pointer ) {
1382
+ to_kvm_vmx (kvm )-> ept_pointers_match
1383
+ = EPT_POINTERS_MISMATCH ;
1384
+ return ;
1385
+ }
1386
+ }
1387
+
1388
+ to_kvm_vmx (kvm )-> ept_pointers_match = EPT_POINTERS_MATCH ;
1389
+ }
1390
+
1391
+ static int vmx_hv_remote_flush_tlb (struct kvm * kvm )
1392
+ {
1393
+ int ret ;
1394
+
1395
+ spin_lock (& to_kvm_vmx (kvm )-> ept_pointer_lock );
1396
+
1397
+ if (to_kvm_vmx (kvm )-> ept_pointers_match == EPT_POINTERS_CHECK )
1398
+ check_ept_pointer_match (kvm );
1399
+
1400
+ if (to_kvm_vmx (kvm )-> ept_pointers_match != EPT_POINTERS_MATCH ) {
1401
+ ret = - ENOTSUPP ;
1402
+ goto out ;
1403
+ }
1404
+
1405
+ ret = hyperv_flush_guest_mapping (
1406
+ to_vmx (kvm_get_vcpu (kvm , 0 ))-> ept_pointer );
1407
+
1408
+ out :
1409
+ spin_unlock (& to_kvm_vmx (kvm )-> ept_pointer_lock );
1410
+ return ret ;
1411
+ }
1360
1412
#else /* !IS_ENABLED(CONFIG_HYPERV) */
1361
1413
static inline void evmcs_write64 (unsigned long field , u64 value ) {}
1362
1414
static inline void evmcs_write32 (unsigned long field , u32 value ) {}
@@ -5041,18 +5093,28 @@ static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
5041
5093
5042
5094
static void vmx_set_cr3 (struct kvm_vcpu * vcpu , unsigned long cr3 )
5043
5095
{
5096
+ struct kvm * kvm = vcpu -> kvm ;
5044
5097
unsigned long guest_cr3 ;
5045
5098
u64 eptp ;
5046
5099
5047
5100
guest_cr3 = cr3 ;
5048
5101
if (enable_ept ) {
5049
5102
eptp = construct_eptp (vcpu , cr3 );
5050
5103
vmcs_write64 (EPT_POINTER , eptp );
5104
+
5105
+ if (kvm_x86_ops -> tlb_remote_flush ) {
5106
+ spin_lock (& to_kvm_vmx (kvm )-> ept_pointer_lock );
5107
+ to_vmx (vcpu )-> ept_pointer = eptp ;
5108
+ to_kvm_vmx (kvm )-> ept_pointers_match
5109
+ = EPT_POINTERS_CHECK ;
5110
+ spin_unlock (& to_kvm_vmx (kvm )-> ept_pointer_lock );
5111
+ }
5112
+
5051
5113
if (enable_unrestricted_guest || is_paging (vcpu ) ||
5052
5114
is_guest_mode (vcpu ))
5053
5115
guest_cr3 = kvm_read_cr3 (vcpu );
5054
5116
else
5055
- guest_cr3 = to_kvm_vmx (vcpu -> kvm )-> ept_identity_map_addr ;
5117
+ guest_cr3 = to_kvm_vmx (kvm )-> ept_identity_map_addr ;
5056
5118
ept_load_pdptrs (vcpu );
5057
5119
}
5058
5120
@@ -7622,6 +7684,12 @@ static __init int hardware_setup(void)
7622
7684
if (enable_ept && !cpu_has_vmx_ept_2m_page ())
7623
7685
kvm_disable_largepages ();
7624
7686
7687
+ #if IS_ENABLED (CONFIG_HYPERV )
7688
+ if (ms_hyperv .nested_features & HV_X64_NESTED_GUEST_MAPPING_FLUSH
7689
+ && enable_ept )
7690
+ kvm_x86_ops -> tlb_remote_flush = vmx_hv_remote_flush_tlb ;
7691
+ #endif
7692
+
7625
7693
if (!cpu_has_vmx_ple ()) {
7626
7694
ple_gap = 0 ;
7627
7695
ple_window = 0 ;
@@ -10665,6 +10733,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
10665
10733
10666
10734
static int vmx_vm_init (struct kvm * kvm )
10667
10735
{
10736
+ spin_lock_init (& to_kvm_vmx (kvm )-> ept_pointer_lock );
10737
+
10668
10738
if (!ple_gap )
10669
10739
kvm -> arch .pause_in_guest = true;
10670
10740
return 0 ;
0 commit comments