Skip to content

Commit e9ac033

Browse files
ekorenevskybonzini
authored andcommitted
KVM: nVMX: Improve nested msr switch checking
This patch improve checks required by Intel Software Developer Manual. - SMM MSRs are not allowed. - microcode MSRs are not allowed. - check x2apic MSRs only when LAPIC is in x2apic mode. - MSR switch areas must be aligned to 16 bytes. - address of first and last byte in MSR switch areas should not set any bits beyond the processor's physical-address width. Also it adds warning messages on failures during MSR switch. These messages are useful for people who debug their VMMs in nVMX. Signed-off-by: Eugene Korenevsky <ekorenevsky@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent ff651cb commit e9ac033

File tree

2 files changed

+117
-14
lines changed

2 files changed

+117
-14
lines changed

arch/x86/include/uapi/asm/msr-index.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -356,6 +356,9 @@
356356
#define MSR_IA32_UCODE_WRITE 0x00000079
357357
#define MSR_IA32_UCODE_REV 0x0000008b
358358

359+
#define MSR_IA32_SMM_MONITOR_CTL 0x0000009b
360+
#define MSR_IA32_SMBASE 0x0000009e
361+
359362
#define MSR_IA32_PERF_STATUS 0x00000198
360363
#define MSR_IA32_PERF_CTL 0x00000199
361364
#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064

arch/x86/kvm/vmx.c

Lines changed: 114 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -8293,18 +8293,80 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
82938293
ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
82948294
}
82958295

8296-
static inline int nested_vmx_msr_check_common(struct vmx_msr_entry *e)
8296+
static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
8297+
unsigned long count_field,
8298+
unsigned long addr_field,
8299+
int maxphyaddr)
82978300
{
8298-
if (e->index >> 8 == 0x8 || e->reserved != 0)
8301+
u64 count, addr;
8302+
8303+
if (vmcs12_read_any(vcpu, count_field, &count) ||
8304+
vmcs12_read_any(vcpu, addr_field, &addr)) {
8305+
WARN_ON(1);
8306+
return -EINVAL;
8307+
}
8308+
if (count == 0)
8309+
return 0;
8310+
if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
8311+
(addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) {
8312+
pr_warn_ratelimited(
8313+
"nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)",
8314+
addr_field, maxphyaddr, count, addr);
8315+
return -EINVAL;
8316+
}
8317+
return 0;
8318+
}
8319+
8320+
static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
8321+
struct vmcs12 *vmcs12)
8322+
{
8323+
int maxphyaddr;
8324+
8325+
if (vmcs12->vm_exit_msr_load_count == 0 &&
8326+
vmcs12->vm_exit_msr_store_count == 0 &&
8327+
vmcs12->vm_entry_msr_load_count == 0)
8328+
return 0; /* Fast path */
8329+
maxphyaddr = cpuid_maxphyaddr(vcpu);
8330+
if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT,
8331+
VM_EXIT_MSR_LOAD_ADDR, maxphyaddr) ||
8332+
nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT,
8333+
VM_EXIT_MSR_STORE_ADDR, maxphyaddr) ||
8334+
nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
8335+
VM_ENTRY_MSR_LOAD_ADDR, maxphyaddr))
8336+
return -EINVAL;
8337+
return 0;
8338+
}
8339+
8340+
static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
8341+
struct vmx_msr_entry *e)
8342+
{
8343+
/* x2APIC MSR accesses are not allowed */
8344+
if (apic_x2apic_mode(vcpu->arch.apic) && e->index >> 8 == 0x8)
8345+
return -EINVAL;
8346+
if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */
8347+
e->index == MSR_IA32_UCODE_REV)
8348+
return -EINVAL;
8349+
if (e->reserved != 0)
82998350
return -EINVAL;
83008351
return 0;
83018352
}
83028353

8303-
static inline int nested_vmx_load_msr_check(struct vmx_msr_entry *e)
8354+
static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
8355+
struct vmx_msr_entry *e)
83048356
{
83058357
if (e->index == MSR_FS_BASE ||
83068358
e->index == MSR_GS_BASE ||
8307-
nested_vmx_msr_check_common(e))
8359+
e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */
8360+
nested_vmx_msr_check_common(vcpu, e))
8361+
return -EINVAL;
8362+
return 0;
8363+
}
8364+
8365+
static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
8366+
struct vmx_msr_entry *e)
8367+
{
8368+
if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */
8369+
nested_vmx_msr_check_common(vcpu, e))
83088370
return -EINVAL;
83098371
return 0;
83108372
}
@@ -8321,13 +8383,27 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
83218383

83228384
msr.host_initiated = false;
83238385
for (i = 0; i < count; i++) {
8324-
kvm_read_guest(vcpu->kvm, gpa + i * sizeof(e), &e, sizeof(e));
8325-
if (nested_vmx_load_msr_check(&e))
8386+
if (kvm_read_guest(vcpu->kvm, gpa + i * sizeof(e),
8387+
&e, sizeof(e))) {
8388+
pr_warn_ratelimited(
8389+
"%s cannot read MSR entry (%u, 0x%08llx)\n",
8390+
__func__, i, gpa + i * sizeof(e));
83268391
goto fail;
8392+
}
8393+
if (nested_vmx_load_msr_check(vcpu, &e)) {
8394+
pr_warn_ratelimited(
8395+
"%s check failed (%u, 0x%x, 0x%x)\n",
8396+
__func__, i, e.index, e.reserved);
8397+
goto fail;
8398+
}
83278399
msr.index = e.index;
83288400
msr.data = e.value;
8329-
if (kvm_set_msr(vcpu, &msr))
8401+
if (kvm_set_msr(vcpu, &msr)) {
8402+
pr_warn_ratelimited(
8403+
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
8404+
__func__, i, e.index, e.value);
83308405
goto fail;
8406+
}
83318407
}
83328408
return 0;
83338409
fail:
@@ -8340,16 +8416,35 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
83408416
struct vmx_msr_entry e;
83418417

83428418
for (i = 0; i < count; i++) {
8343-
kvm_read_guest(vcpu->kvm, gpa + i * sizeof(e),
8344-
&e, 2 * sizeof(u32));
8345-
if (nested_vmx_msr_check_common(&e))
8419+
if (kvm_read_guest(vcpu->kvm,
8420+
gpa + i * sizeof(e),
8421+
&e, 2 * sizeof(u32))) {
8422+
pr_warn_ratelimited(
8423+
"%s cannot read MSR entry (%u, 0x%08llx)\n",
8424+
__func__, i, gpa + i * sizeof(e));
83468425
return -EINVAL;
8347-
if (kvm_get_msr(vcpu, e.index, &e.value))
8426+
}
8427+
if (nested_vmx_store_msr_check(vcpu, &e)) {
8428+
pr_warn_ratelimited(
8429+
"%s check failed (%u, 0x%x, 0x%x)\n",
8430+
__func__, i, e.index, e.reserved);
83488431
return -EINVAL;
8349-
kvm_write_guest(vcpu->kvm,
8350-
gpa + i * sizeof(e) +
8432+
}
8433+
if (kvm_get_msr(vcpu, e.index, &e.value)) {
8434+
pr_warn_ratelimited(
8435+
"%s cannot read MSR (%u, 0x%x)\n",
8436+
__func__, i, e.index);
8437+
return -EINVAL;
8438+
}
8439+
if (kvm_write_guest(vcpu->kvm,
8440+
gpa + i * sizeof(e) +
83518441
offsetof(struct vmx_msr_entry, value),
8352-
&e.value, sizeof(e.value));
8442+
&e.value, sizeof(e.value))) {
8443+
pr_warn_ratelimited(
8444+
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
8445+
__func__, i, e.index, e.value);
8446+
return -EINVAL;
8447+
}
83538448
}
83548449
return 0;
83558450
}
@@ -8698,6 +8793,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
86988793
return 1;
86998794
}
87008795

8796+
if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) {
8797+
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
8798+
return 1;
8799+
}
8800+
87018801
if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
87028802
nested_vmx_true_procbased_ctls_low,
87038803
nested_vmx_procbased_ctls_high) ||

0 commit comments

Comments
 (0)