Skip to content

Commit 33966dd

Browse files
konradwilkKAGA-KOKO
authored andcommitted
x86/KVM/VMX: Split the VMX MSR LOAD structures to have an host/guest numbers
There is no semantic change but this change allows an unbalanced amount of MSRs to be loaded on VMEXIT and VMENTER, i.e. the number of MSRs to save or restore on VMEXIT or VMENTER may be different. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 parent c595cee commit 33966dd

File tree

1 file changed

+35
-30
lines changed

1 file changed

+35
-30
lines changed

arch/x86/kvm/vmx.c

Lines changed: 35 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -808,6 +808,11 @@ static inline int pi_test_sn(struct pi_desc *pi_desc)
808808
(unsigned long *)&pi_desc->control);
809809
}
810810

811+
struct vmx_msrs {
812+
unsigned int nr;
813+
struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
814+
};
815+
811816
struct vcpu_vmx {
812817
struct kvm_vcpu vcpu;
813818
unsigned long host_rsp;
@@ -841,9 +846,8 @@ struct vcpu_vmx {
841846
struct loaded_vmcs *loaded_vmcs;
842847
bool __launched; /* temporary, used in vmx_vcpu_run */
843848
struct msr_autoload {
844-
unsigned nr;
845-
struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
846-
struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
849+
struct vmx_msrs guest;
850+
struct vmx_msrs host;
847851
} msr_autoload;
848852
struct {
849853
int loaded;
@@ -2440,18 +2444,18 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
24402444
}
24412445
break;
24422446
}
2443-
2444-
for (i = 0; i < m->nr; ++i)
2445-
if (m->guest[i].index == msr)
2447+
for (i = 0; i < m->guest.nr; ++i)
2448+
if (m->guest.val[i].index == msr)
24462449
break;
24472450

2448-
if (i == m->nr)
2451+
if (i == m->guest.nr)
24492452
return;
2450-
--m->nr;
2451-
m->guest[i] = m->guest[m->nr];
2452-
m->host[i] = m->host[m->nr];
2453-
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
2454-
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
2453+
--m->guest.nr;
2454+
--m->host.nr;
2455+
m->guest.val[i] = m->guest.val[m->guest.nr];
2456+
m->host.val[i] = m->host.val[m->host.nr];
2457+
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
2458+
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
24552459
}
24562460

24572461
static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
@@ -2503,24 +2507,25 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
25032507
wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
25042508
}
25052509

2506-
for (i = 0; i < m->nr; ++i)
2507-
if (m->guest[i].index == msr)
2510+
for (i = 0; i < m->guest.nr; ++i)
2511+
if (m->guest.val[i].index == msr)
25082512
break;
25092513

25102514
if (i == NR_AUTOLOAD_MSRS) {
25112515
printk_once(KERN_WARNING "Not enough msr switch entries. "
25122516
"Can't add msr %x\n", msr);
25132517
return;
2514-
} else if (i == m->nr) {
2515-
++m->nr;
2516-
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
2517-
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
2518+
} else if (i == m->guest.nr) {
2519+
++m->guest.nr;
2520+
++m->host.nr;
2521+
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
2522+
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
25182523
}
25192524

2520-
m->guest[i].index = msr;
2521-
m->guest[i].value = guest_val;
2522-
m->host[i].index = msr;
2523-
m->host[i].value = host_val;
2525+
m->guest.val[i].index = msr;
2526+
m->guest.val[i].value = guest_val;
2527+
m->host.val[i].index = msr;
2528+
m->host.val[i].value = host_val;
25242529
}
25252530

25262531
static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
@@ -6290,9 +6295,9 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
62906295

62916296
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
62926297
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
6293-
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
6298+
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
62946299
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
6295-
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
6300+
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
62966301

62976302
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
62986303
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
@@ -11350,10 +11355,10 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
1135011355
* Set the MSR load/store lists to match L0's settings.
1135111356
*/
1135211357
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
11353-
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
11354-
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
11355-
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
11356-
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
11358+
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
11359+
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
11360+
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
11361+
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
1135711362

1135811363
set_cr4_guest_host_mask(vmx);
1135911364

@@ -12457,8 +12462,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
1245712462
vmx_segment_cache_clear(vmx);
1245812463

1245912464
/* Update any VMCS fields that might have changed while L2 ran */
12460-
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
12461-
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.nr);
12465+
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
12466+
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
1246212467
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
1246312468
if (vmx->hv_deadline_tsc == -1)
1246412469
vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,

0 commit comments

Comments
 (0)