@@ -8293,18 +8293,80 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
8293
8293
ns_to_ktime (preemption_timeout ), HRTIMER_MODE_REL );
8294
8294
}
8295
8295
8296
- static inline int nested_vmx_msr_check_common (struct vmx_msr_entry * e )
8296
+ static int nested_vmx_check_msr_switch (struct kvm_vcpu * vcpu ,
8297
+ unsigned long count_field ,
8298
+ unsigned long addr_field ,
8299
+ int maxphyaddr )
8297
8300
{
8298
- if (e -> index >> 8 == 0x8 || e -> reserved != 0 )
8301
+ u64 count , addr ;
8302
+
8303
+ if (vmcs12_read_any (vcpu , count_field , & count ) ||
8304
+ vmcs12_read_any (vcpu , addr_field , & addr )) {
8305
+ WARN_ON (1 );
8306
+ return - EINVAL ;
8307
+ }
8308
+ if (count == 0 )
8309
+ return 0 ;
8310
+ if (!IS_ALIGNED (addr , 16 ) || addr >> maxphyaddr ||
8311
+ (addr + count * sizeof (struct vmx_msr_entry ) - 1 ) >> maxphyaddr ) {
8312
+ pr_warn_ratelimited (
8313
+ "nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)" ,
8314
+ addr_field , maxphyaddr , count , addr );
8315
+ return - EINVAL ;
8316
+ }
8317
+ return 0 ;
8318
+ }
8319
+
8320
+ static int nested_vmx_check_msr_switch_controls (struct kvm_vcpu * vcpu ,
8321
+ struct vmcs12 * vmcs12 )
8322
+ {
8323
+ int maxphyaddr ;
8324
+
8325
+ if (vmcs12 -> vm_exit_msr_load_count == 0 &&
8326
+ vmcs12 -> vm_exit_msr_store_count == 0 &&
8327
+ vmcs12 -> vm_entry_msr_load_count == 0 )
8328
+ return 0 ; /* Fast path */
8329
+ maxphyaddr = cpuid_maxphyaddr (vcpu );
8330
+ if (nested_vmx_check_msr_switch (vcpu , VM_EXIT_MSR_LOAD_COUNT ,
8331
+ VM_EXIT_MSR_LOAD_ADDR , maxphyaddr ) ||
8332
+ nested_vmx_check_msr_switch (vcpu , VM_EXIT_MSR_STORE_COUNT ,
8333
+ VM_EXIT_MSR_STORE_ADDR , maxphyaddr ) ||
8334
+ nested_vmx_check_msr_switch (vcpu , VM_ENTRY_MSR_LOAD_COUNT ,
8335
+ VM_ENTRY_MSR_LOAD_ADDR , maxphyaddr ))
8336
+ return - EINVAL ;
8337
+ return 0 ;
8338
+ }
8339
+
8340
+ static int nested_vmx_msr_check_common (struct kvm_vcpu * vcpu ,
8341
+ struct vmx_msr_entry * e )
8342
+ {
8343
+ /* x2APIC MSR accesses are not allowed */
8344
+ if (apic_x2apic_mode (vcpu -> arch .apic ) && e -> index >> 8 == 0x8 )
8345
+ return - EINVAL ;
8346
+ if (e -> index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */
8347
+ e -> index == MSR_IA32_UCODE_REV )
8348
+ return - EINVAL ;
8349
+ if (e -> reserved != 0 )
8299
8350
return - EINVAL ;
8300
8351
return 0 ;
8301
8352
}
8302
8353
8303
- static inline int nested_vmx_load_msr_check (struct vmx_msr_entry * e )
8354
+ static int nested_vmx_load_msr_check (struct kvm_vcpu * vcpu ,
8355
+ struct vmx_msr_entry * e )
8304
8356
{
8305
8357
if (e -> index == MSR_FS_BASE ||
8306
8358
e -> index == MSR_GS_BASE ||
8307
- nested_vmx_msr_check_common (e ))
8359
+ e -> index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */
8360
+ nested_vmx_msr_check_common (vcpu , e ))
8361
+ return - EINVAL ;
8362
+ return 0 ;
8363
+ }
8364
+
8365
+ static int nested_vmx_store_msr_check (struct kvm_vcpu * vcpu ,
8366
+ struct vmx_msr_entry * e )
8367
+ {
8368
+ if (e -> index == MSR_IA32_SMBASE || /* SMM is not supported */
8369
+ nested_vmx_msr_check_common (vcpu , e ))
8308
8370
return - EINVAL ;
8309
8371
return 0 ;
8310
8372
}
@@ -8321,13 +8383,27 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
8321
8383
8322
8384
msr .host_initiated = false;
8323
8385
for (i = 0 ; i < count ; i ++ ) {
8324
- kvm_read_guest (vcpu -> kvm , gpa + i * sizeof (e ), & e , sizeof (e ));
8325
- if (nested_vmx_load_msr_check (& e ))
8386
+ if (kvm_read_guest (vcpu -> kvm , gpa + i * sizeof (e ),
8387
+ & e , sizeof (e ))) {
8388
+ pr_warn_ratelimited (
8389
+ "%s cannot read MSR entry (%u, 0x%08llx)\n" ,
8390
+ __func__ , i , gpa + i * sizeof (e ));
8326
8391
goto fail ;
8392
+ }
8393
+ if (nested_vmx_load_msr_check (vcpu , & e )) {
8394
+ pr_warn_ratelimited (
8395
+ "%s check failed (%u, 0x%x, 0x%x)\n" ,
8396
+ __func__ , i , e .index , e .reserved );
8397
+ goto fail ;
8398
+ }
8327
8399
msr .index = e .index ;
8328
8400
msr .data = e .value ;
8329
- if (kvm_set_msr (vcpu , & msr ))
8401
+ if (kvm_set_msr (vcpu , & msr )) {
8402
+ pr_warn_ratelimited (
8403
+ "%s cannot write MSR (%u, 0x%x, 0x%llx)\n" ,
8404
+ __func__ , i , e .index , e .value );
8330
8405
goto fail ;
8406
+ }
8331
8407
}
8332
8408
return 0 ;
8333
8409
fail :
@@ -8340,16 +8416,35 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
8340
8416
struct vmx_msr_entry e ;
8341
8417
8342
8418
for (i = 0 ; i < count ; i ++ ) {
8343
- kvm_read_guest (vcpu -> kvm , gpa + i * sizeof (e ),
8344
- & e , 2 * sizeof (u32 ));
8345
- if (nested_vmx_msr_check_common (& e ))
8419
+ if (kvm_read_guest (vcpu -> kvm ,
8420
+ gpa + i * sizeof (e ),
8421
+ & e , 2 * sizeof (u32 ))) {
8422
+ pr_warn_ratelimited (
8423
+ "%s cannot read MSR entry (%u, 0x%08llx)\n" ,
8424
+ __func__ , i , gpa + i * sizeof (e ));
8346
8425
return - EINVAL ;
8347
- if (kvm_get_msr (vcpu , e .index , & e .value ))
8426
+ }
8427
+ if (nested_vmx_store_msr_check (vcpu , & e )) {
8428
+ pr_warn_ratelimited (
8429
+ "%s check failed (%u, 0x%x, 0x%x)\n" ,
8430
+ __func__ , i , e .index , e .reserved );
8348
8431
return - EINVAL ;
8349
- kvm_write_guest (vcpu -> kvm ,
8350
- gpa + i * sizeof (e ) +
8432
+ }
8433
+ if (kvm_get_msr (vcpu , e .index , & e .value )) {
8434
+ pr_warn_ratelimited (
8435
+ "%s cannot read MSR (%u, 0x%x)\n" ,
8436
+ __func__ , i , e .index );
8437
+ return - EINVAL ;
8438
+ }
8439
+ if (kvm_write_guest (vcpu -> kvm ,
8440
+ gpa + i * sizeof (e ) +
8351
8441
offsetof(struct vmx_msr_entry , value ),
8352
- & e .value , sizeof (e .value ));
8442
+ & e .value , sizeof (e .value ))) {
8443
+ pr_warn_ratelimited (
8444
+ "%s cannot write MSR (%u, 0x%x, 0x%llx)\n" ,
8445
+ __func__ , i , e .index , e .value );
8446
+ return - EINVAL ;
8447
+ }
8353
8448
}
8354
8449
return 0 ;
8355
8450
}
@@ -8698,6 +8793,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
8698
8793
return 1 ;
8699
8794
}
8700
8795
8796
+ if (nested_vmx_check_msr_switch_controls (vcpu , vmcs12 )) {
8797
+ nested_vmx_failValid (vcpu , VMXERR_ENTRY_INVALID_CONTROL_FIELD );
8798
+ return 1 ;
8799
+ }
8800
+
8701
8801
if (!vmx_control_verify (vmcs12 -> cpu_based_vm_exec_control ,
8702
8802
nested_vmx_true_procbased_ctls_low ,
8703
8803
nested_vmx_procbased_ctls_high ) ||
0 commit comments