@@ -808,6 +808,11 @@ static inline int pi_test_sn(struct pi_desc *pi_desc)
808
808
(unsigned long * )& pi_desc -> control );
809
809
}
810
810
811
+ struct vmx_msrs {
812
+ unsigned int nr ;
813
+ struct vmx_msr_entry val [NR_AUTOLOAD_MSRS ];
814
+ };
815
+
811
816
struct vcpu_vmx {
812
817
struct kvm_vcpu vcpu ;
813
818
unsigned long host_rsp ;
@@ -841,9 +846,8 @@ struct vcpu_vmx {
841
846
struct loaded_vmcs * loaded_vmcs ;
842
847
bool __launched ; /* temporary, used in vmx_vcpu_run */
843
848
struct msr_autoload {
844
- unsigned nr ;
845
- struct vmx_msr_entry guest [NR_AUTOLOAD_MSRS ];
846
- struct vmx_msr_entry host [NR_AUTOLOAD_MSRS ];
849
+ struct vmx_msrs guest ;
850
+ struct vmx_msrs host ;
847
851
} msr_autoload ;
848
852
struct {
849
853
int loaded ;
@@ -2440,18 +2444,18 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
2440
2444
}
2441
2445
break ;
2442
2446
}
2443
-
2444
- for (i = 0 ; i < m -> nr ; ++ i )
2445
- if (m -> guest [i ].index == msr )
2447
+ for (i = 0 ; i < m -> guest .nr ; ++ i )
2448
+ if (m -> guest .val [i ].index == msr )
2446
2449
break ;
2447
2450
2448
- if (i == m -> nr )
2451
+ if (i == m -> guest . nr )
2449
2452
return ;
2450
- -- m -> nr ;
2451
- m -> guest [i ] = m -> guest [m -> nr ];
2452
- m -> host [i ] = m -> host [m -> nr ];
2453
- vmcs_write32 (VM_ENTRY_MSR_LOAD_COUNT , m -> nr );
2454
- vmcs_write32 (VM_EXIT_MSR_LOAD_COUNT , m -> nr );
2453
+ -- m -> guest .nr ;
2454
+ -- m -> host .nr ;
2455
+ m -> guest .val [i ] = m -> guest .val [m -> guest .nr ];
2456
+ m -> host .val [i ] = m -> host .val [m -> host .nr ];
2457
+ vmcs_write32 (VM_ENTRY_MSR_LOAD_COUNT , m -> guest .nr );
2458
+ vmcs_write32 (VM_EXIT_MSR_LOAD_COUNT , m -> host .nr );
2455
2459
}
2456
2460
2457
2461
static void add_atomic_switch_msr_special (struct vcpu_vmx * vmx ,
@@ -2503,24 +2507,25 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
2503
2507
wrmsrl (MSR_IA32_PEBS_ENABLE , 0 );
2504
2508
}
2505
2509
2506
- for (i = 0 ; i < m -> nr ; ++ i )
2507
- if (m -> guest [i ].index == msr )
2510
+ for (i = 0 ; i < m -> guest . nr ; ++ i )
2511
+ if (m -> guest . val [i ].index == msr )
2508
2512
break ;
2509
2513
2510
2514
if (i == NR_AUTOLOAD_MSRS ) {
2511
2515
printk_once (KERN_WARNING "Not enough msr switch entries. "
2512
2516
"Can't add msr %x\n" , msr );
2513
2517
return ;
2514
- } else if (i == m -> nr ) {
2515
- ++ m -> nr ;
2516
- vmcs_write32 (VM_ENTRY_MSR_LOAD_COUNT , m -> nr );
2517
- vmcs_write32 (VM_EXIT_MSR_LOAD_COUNT , m -> nr );
2518
+ } else if (i == m -> guest .nr ) {
2519
+ ++ m -> guest .nr ;
2520
+ ++ m -> host .nr ;
2521
+ vmcs_write32 (VM_ENTRY_MSR_LOAD_COUNT , m -> guest .nr );
2522
+ vmcs_write32 (VM_EXIT_MSR_LOAD_COUNT , m -> host .nr );
2518
2523
}
2519
2524
2520
- m -> guest [i ].index = msr ;
2521
- m -> guest [i ].value = guest_val ;
2522
- m -> host [i ].index = msr ;
2523
- m -> host [i ].value = host_val ;
2525
+ m -> guest . val [i ].index = msr ;
2526
+ m -> guest . val [i ].value = guest_val ;
2527
+ m -> host . val [i ].index = msr ;
2528
+ m -> host . val [i ].value = host_val ;
2524
2529
}
2525
2530
2526
2531
static bool update_transition_efer (struct vcpu_vmx * vmx , int efer_offset )
@@ -6290,9 +6295,9 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
6290
6295
6291
6296
vmcs_write32 (VM_EXIT_MSR_STORE_COUNT , 0 );
6292
6297
vmcs_write32 (VM_EXIT_MSR_LOAD_COUNT , 0 );
6293
- vmcs_write64 (VM_EXIT_MSR_LOAD_ADDR , __pa (vmx -> msr_autoload .host ));
6298
+ vmcs_write64 (VM_EXIT_MSR_LOAD_ADDR , __pa (vmx -> msr_autoload .host . val ));
6294
6299
vmcs_write32 (VM_ENTRY_MSR_LOAD_COUNT , 0 );
6295
- vmcs_write64 (VM_ENTRY_MSR_LOAD_ADDR , __pa (vmx -> msr_autoload .guest ));
6300
+ vmcs_write64 (VM_ENTRY_MSR_LOAD_ADDR , __pa (vmx -> msr_autoload .guest . val ));
6296
6301
6297
6302
if (vmcs_config .vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT )
6298
6303
vmcs_write64 (GUEST_IA32_PAT , vmx -> vcpu .arch .pat );
@@ -11350,10 +11355,10 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
11350
11355
* Set the MSR load/store lists to match L0's settings.
11351
11356
*/
11352
11357
vmcs_write32 (VM_EXIT_MSR_STORE_COUNT , 0 );
11353
- vmcs_write32 (VM_EXIT_MSR_LOAD_COUNT , vmx -> msr_autoload .nr );
11354
- vmcs_write64 (VM_EXIT_MSR_LOAD_ADDR , __pa (vmx -> msr_autoload .host ));
11355
- vmcs_write32 (VM_ENTRY_MSR_LOAD_COUNT , vmx -> msr_autoload .nr );
11356
- vmcs_write64 (VM_ENTRY_MSR_LOAD_ADDR , __pa (vmx -> msr_autoload .guest ));
11358
+ vmcs_write32 (VM_EXIT_MSR_LOAD_COUNT , vmx -> msr_autoload .host . nr );
11359
+ vmcs_write64 (VM_EXIT_MSR_LOAD_ADDR , __pa (vmx -> msr_autoload .host . val ));
11360
+ vmcs_write32 (VM_ENTRY_MSR_LOAD_COUNT , vmx -> msr_autoload .guest . nr );
11361
+ vmcs_write64 (VM_ENTRY_MSR_LOAD_ADDR , __pa (vmx -> msr_autoload .guest . val ));
11357
11362
11358
11363
set_cr4_guest_host_mask (vmx );
11359
11364
@@ -12457,8 +12462,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
12457
12462
vmx_segment_cache_clear (vmx );
12458
12463
12459
12464
/* Update any VMCS fields that might have changed while L2 ran */
12460
- vmcs_write32 (VM_EXIT_MSR_LOAD_COUNT , vmx -> msr_autoload .nr );
12461
- vmcs_write32 (VM_ENTRY_MSR_LOAD_COUNT , vmx -> msr_autoload .nr );
12465
+ vmcs_write32 (VM_EXIT_MSR_LOAD_COUNT , vmx -> msr_autoload .host . nr );
12466
+ vmcs_write32 (VM_ENTRY_MSR_LOAD_COUNT , vmx -> msr_autoload .guest . nr );
12462
12467
vmcs_write64 (TSC_OFFSET , vcpu -> arch .tsc_offset );
12463
12468
if (vmx -> hv_deadline_tsc == -1 )
12464
12469
vmcs_clear_bits (PIN_BASED_VM_EXEC_CONTROL ,
0 commit comments