@@ -6143,6 +6143,13 @@ static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
6143
6143
*/
6144
6144
}
6145
6145
6146
+ static void nested_vmx_abort (struct kvm_vcpu * vcpu , u32 indicator )
6147
+ {
6148
+ /* TODO: not to reset guest simply here. */
6149
+ kvm_make_request (KVM_REQ_TRIPLE_FAULT , vcpu );
6150
+ pr_warn ("kvm: nested vmx abort, indicator %d\n" , indicator );
6151
+ }
6152
+
6146
6153
static enum hrtimer_restart vmx_preemption_timer_fn (struct hrtimer * timer )
6147
6154
{
6148
6155
struct vcpu_vmx * vmx =
@@ -8286,6 +8293,67 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
8286
8293
ns_to_ktime (preemption_timeout ), HRTIMER_MODE_REL );
8287
8294
}
8288
8295
8296
+ static inline int nested_vmx_msr_check_common (struct vmx_msr_entry * e )
8297
+ {
8298
+ if (e -> index >> 8 == 0x8 || e -> reserved != 0 )
8299
+ return - EINVAL ;
8300
+ return 0 ;
8301
+ }
8302
+
8303
+ static inline int nested_vmx_load_msr_check (struct vmx_msr_entry * e )
8304
+ {
8305
+ if (e -> index == MSR_FS_BASE ||
8306
+ e -> index == MSR_GS_BASE ||
8307
+ nested_vmx_msr_check_common (e ))
8308
+ return - EINVAL ;
8309
+ return 0 ;
8310
+ }
8311
+
8312
+ /*
8313
+ * Load guest's/host's msr at nested entry/exit.
8314
+ * return 0 for success, entry index for failure.
8315
+ */
8316
+ static u32 nested_vmx_load_msr (struct kvm_vcpu * vcpu , u64 gpa , u32 count )
8317
+ {
8318
+ u32 i ;
8319
+ struct vmx_msr_entry e ;
8320
+ struct msr_data msr ;
8321
+
8322
+ msr .host_initiated = false;
8323
+ for (i = 0 ; i < count ; i ++ ) {
8324
+ kvm_read_guest (vcpu -> kvm , gpa + i * sizeof (e ), & e , sizeof (e ));
8325
+ if (nested_vmx_load_msr_check (& e ))
8326
+ goto fail ;
8327
+ msr .index = e .index ;
8328
+ msr .data = e .value ;
8329
+ if (kvm_set_msr (vcpu , & msr ))
8330
+ goto fail ;
8331
+ }
8332
+ return 0 ;
8333
+ fail :
8334
+ return i + 1 ;
8335
+ }
8336
+
8337
+ static int nested_vmx_store_msr (struct kvm_vcpu * vcpu , u64 gpa , u32 count )
8338
+ {
8339
+ u32 i ;
8340
+ struct vmx_msr_entry e ;
8341
+
8342
+ for (i = 0 ; i < count ; i ++ ) {
8343
+ kvm_read_guest (vcpu -> kvm , gpa + i * sizeof (e ),
8344
+ & e , 2 * sizeof (u32 ));
8345
+ if (nested_vmx_msr_check_common (& e ))
8346
+ return - EINVAL ;
8347
+ if (kvm_get_msr (vcpu , e .index , & e .value ))
8348
+ return - EINVAL ;
8349
+ kvm_write_guest (vcpu -> kvm ,
8350
+ gpa + i * sizeof (e ) +
8351
+ offsetof(struct vmx_msr_entry , value ),
8352
+ & e .value , sizeof (e .value ));
8353
+ }
8354
+ return 0 ;
8355
+ }
8356
+
8289
8357
/*
8290
8358
* prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
8291
8359
* L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
@@ -8582,6 +8650,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
8582
8650
int cpu ;
8583
8651
struct loaded_vmcs * vmcs02 ;
8584
8652
bool ia32e ;
8653
+ u32 msr_entry_idx ;
8585
8654
8586
8655
if (!nested_vmx_check_permission (vcpu ) ||
8587
8656
!nested_vmx_check_vmcs12 (vcpu ))
@@ -8629,15 +8698,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
8629
8698
return 1 ;
8630
8699
}
8631
8700
8632
- if (vmcs12 -> vm_entry_msr_load_count > 0 ||
8633
- vmcs12 -> vm_exit_msr_load_count > 0 ||
8634
- vmcs12 -> vm_exit_msr_store_count > 0 ) {
8635
- pr_warn_ratelimited ("%s: VMCS MSR_{LOAD,STORE} unsupported\n" ,
8636
- __func__ );
8637
- nested_vmx_failValid (vcpu , VMXERR_ENTRY_INVALID_CONTROL_FIELD );
8638
- return 1 ;
8639
- }
8640
-
8641
8701
if (!vmx_control_verify (vmcs12 -> cpu_based_vm_exec_control ,
8642
8702
nested_vmx_true_procbased_ctls_low ,
8643
8703
nested_vmx_procbased_ctls_high ) ||
@@ -8739,10 +8799,21 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
8739
8799
8740
8800
vmx_segment_cache_clear (vmx );
8741
8801
8742
- vmcs12 -> launch_state = 1 ;
8743
-
8744
8802
prepare_vmcs02 (vcpu , vmcs12 );
8745
8803
8804
+ msr_entry_idx = nested_vmx_load_msr (vcpu ,
8805
+ vmcs12 -> vm_entry_msr_load_addr ,
8806
+ vmcs12 -> vm_entry_msr_load_count );
8807
+ if (msr_entry_idx ) {
8808
+ leave_guest_mode (vcpu );
8809
+ vmx_load_vmcs01 (vcpu );
8810
+ nested_vmx_entry_failure (vcpu , vmcs12 ,
8811
+ EXIT_REASON_MSR_LOAD_FAIL , msr_entry_idx );
8812
+ return 1 ;
8813
+ }
8814
+
8815
+ vmcs12 -> launch_state = 1 ;
8816
+
8746
8817
if (vmcs12 -> guest_activity_state == GUEST_ACTIVITY_HLT )
8747
8818
return kvm_emulate_halt (vcpu );
8748
8819
@@ -9172,6 +9243,10 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
9172
9243
9173
9244
kvm_set_dr (vcpu , 7 , 0x400 );
9174
9245
vmcs_write64 (GUEST_IA32_DEBUGCTL , 0 );
9246
+
9247
+ if (nested_vmx_load_msr (vcpu , vmcs12 -> vm_exit_msr_load_addr ,
9248
+ vmcs12 -> vm_exit_msr_load_count ))
9249
+ nested_vmx_abort (vcpu , VMX_ABORT_LOAD_HOST_MSR_FAIL );
9175
9250
}
9176
9251
9177
9252
/*
@@ -9193,6 +9268,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
9193
9268
prepare_vmcs12 (vcpu , vmcs12 , exit_reason , exit_intr_info ,
9194
9269
exit_qualification );
9195
9270
9271
+ if (nested_vmx_store_msr (vcpu , vmcs12 -> vm_exit_msr_store_addr ,
9272
+ vmcs12 -> vm_exit_msr_store_count ))
9273
+ nested_vmx_abort (vcpu , VMX_ABORT_SAVE_GUEST_MSR_FAIL );
9274
+
9196
9275
vmx_load_vmcs01 (vcpu );
9197
9276
9198
9277
if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT )
0 commit comments