Skip to content

Commit ff651cb

Browse files
Wincy Vanbonzini
authored andcommitted
KVM: nVMX: Add nested msr load/restore algorithm
Several hypervisors need MSR auto load/restore feature. We read MSRs from VM-entry MSR load area which specified by L1, and load them via kvm_set_msr in the nested entry. When nested exit occurs, we get MSRs via kvm_get_msr, writing them to L1`s MSR store area. After this, we read MSRs from VM-exit MSR load area, and load them via kvm_set_msr. Signed-off-by: Wincy Van <fanwenyi0529@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent b1940cd commit ff651cb

File tree

4 files changed

+97
-11
lines changed

4 files changed

+97
-11
lines changed

arch/x86/include/uapi/asm/vmx.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@
5656
#define EXIT_REASON_MSR_READ 31
5757
#define EXIT_REASON_MSR_WRITE 32
5858
#define EXIT_REASON_INVALID_STATE 33
59+
#define EXIT_REASON_MSR_LOAD_FAIL 34
5960
#define EXIT_REASON_MWAIT_INSTRUCTION 36
6061
#define EXIT_REASON_MONITOR_INSTRUCTION 39
6162
#define EXIT_REASON_PAUSE_INSTRUCTION 40
@@ -116,10 +117,14 @@
116117
{ EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \
117118
{ EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
118119
{ EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
120+
{ EXIT_REASON_MSR_LOAD_FAIL, "MSR_LOAD_FAIL" }, \
119121
{ EXIT_REASON_INVD, "INVD" }, \
120122
{ EXIT_REASON_INVVPID, "INVVPID" }, \
121123
{ EXIT_REASON_INVPCID, "INVPCID" }, \
122124
{ EXIT_REASON_XSAVES, "XSAVES" }, \
123125
{ EXIT_REASON_XRSTORS, "XRSTORS" }
124126

127+
#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
128+
#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
129+
125130
#endif /* _UAPIVMX_H */

arch/x86/kvm/vmx.c

Lines changed: 90 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -6143,6 +6143,13 @@ static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
61436143
*/
61446144
}
61456145

6146+
static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
6147+
{
6148+
/* TODO: not to reset guest simply here. */
6149+
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
6150+
pr_warn("kvm: nested vmx abort, indicator %d\n", indicator);
6151+
}
6152+
61466153
static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
61476154
{
61486155
struct vcpu_vmx *vmx =
@@ -8286,6 +8293,67 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
82868293
ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
82878294
}
82888295

8296+
static inline int nested_vmx_msr_check_common(struct vmx_msr_entry *e)
8297+
{
8298+
if (e->index >> 8 == 0x8 || e->reserved != 0)
8299+
return -EINVAL;
8300+
return 0;
8301+
}
8302+
8303+
static inline int nested_vmx_load_msr_check(struct vmx_msr_entry *e)
8304+
{
8305+
if (e->index == MSR_FS_BASE ||
8306+
e->index == MSR_GS_BASE ||
8307+
nested_vmx_msr_check_common(e))
8308+
return -EINVAL;
8309+
return 0;
8310+
}
8311+
8312+
/*
8313+
* Load guest's/host's msr at nested entry/exit.
8314+
* return 0 for success, entry index for failure.
8315+
*/
8316+
static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
8317+
{
8318+
u32 i;
8319+
struct vmx_msr_entry e;
8320+
struct msr_data msr;
8321+
8322+
msr.host_initiated = false;
8323+
for (i = 0; i < count; i++) {
8324+
kvm_read_guest(vcpu->kvm, gpa + i * sizeof(e), &e, sizeof(e));
8325+
if (nested_vmx_load_msr_check(&e))
8326+
goto fail;
8327+
msr.index = e.index;
8328+
msr.data = e.value;
8329+
if (kvm_set_msr(vcpu, &msr))
8330+
goto fail;
8331+
}
8332+
return 0;
8333+
fail:
8334+
return i + 1;
8335+
}
8336+
8337+
static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
8338+
{
8339+
u32 i;
8340+
struct vmx_msr_entry e;
8341+
8342+
for (i = 0; i < count; i++) {
8343+
kvm_read_guest(vcpu->kvm, gpa + i * sizeof(e),
8344+
&e, 2 * sizeof(u32));
8345+
if (nested_vmx_msr_check_common(&e))
8346+
return -EINVAL;
8347+
if (kvm_get_msr(vcpu, e.index, &e.value))
8348+
return -EINVAL;
8349+
kvm_write_guest(vcpu->kvm,
8350+
gpa + i * sizeof(e) +
8351+
offsetof(struct vmx_msr_entry, value),
8352+
&e.value, sizeof(e.value));
8353+
}
8354+
return 0;
8355+
}
8356+
82898357
/*
82908358
* prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
82918359
* L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
@@ -8582,6 +8650,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
85828650
int cpu;
85838651
struct loaded_vmcs *vmcs02;
85848652
bool ia32e;
8653+
u32 msr_entry_idx;
85858654

85868655
if (!nested_vmx_check_permission(vcpu) ||
85878656
!nested_vmx_check_vmcs12(vcpu))
@@ -8629,15 +8698,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
86298698
return 1;
86308699
}
86318700

8632-
if (vmcs12->vm_entry_msr_load_count > 0 ||
8633-
vmcs12->vm_exit_msr_load_count > 0 ||
8634-
vmcs12->vm_exit_msr_store_count > 0) {
8635-
pr_warn_ratelimited("%s: VMCS MSR_{LOAD,STORE} unsupported\n",
8636-
__func__);
8637-
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
8638-
return 1;
8639-
}
8640-
86418701
if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
86428702
nested_vmx_true_procbased_ctls_low,
86438703
nested_vmx_procbased_ctls_high) ||
@@ -8739,10 +8799,21 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
87398799

87408800
vmx_segment_cache_clear(vmx);
87418801

8742-
vmcs12->launch_state = 1;
8743-
87448802
prepare_vmcs02(vcpu, vmcs12);
87458803

8804+
msr_entry_idx = nested_vmx_load_msr(vcpu,
8805+
vmcs12->vm_entry_msr_load_addr,
8806+
vmcs12->vm_entry_msr_load_count);
8807+
if (msr_entry_idx) {
8808+
leave_guest_mode(vcpu);
8809+
vmx_load_vmcs01(vcpu);
8810+
nested_vmx_entry_failure(vcpu, vmcs12,
8811+
EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx);
8812+
return 1;
8813+
}
8814+
8815+
vmcs12->launch_state = 1;
8816+
87468817
if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
87478818
return kvm_emulate_halt(vcpu);
87488819

@@ -9172,6 +9243,10 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
91729243

91739244
kvm_set_dr(vcpu, 7, 0x400);
91749245
vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
9246+
9247+
if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
9248+
vmcs12->vm_exit_msr_load_count))
9249+
nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
91759250
}
91769251

91779252
/*
@@ -9193,6 +9268,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
91939268
prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
91949269
exit_qualification);
91959270

9271+
if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr,
9272+
vmcs12->vm_exit_msr_store_count))
9273+
nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL);
9274+
91969275
vmx_load_vmcs01(vcpu);
91979276

91989277
if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)

arch/x86/kvm/x86.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2324,6 +2324,7 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
23242324
{
23252325
return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
23262326
}
2327+
EXPORT_SYMBOL_GPL(kvm_get_msr);
23272328

23282329
static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
23292330
{

virt/kvm/kvm_main.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1593,6 +1593,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
15931593
}
15941594
return 0;
15951595
}
1596+
EXPORT_SYMBOL_GPL(kvm_write_guest);
15961597

15971598
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
15981599
gpa_t gpa, unsigned long len)

0 commit comments

Comments
 (0)