Skip to content

Commit 44883f0

Browse files
committed
KVM: x86: ensure all MSRs can always be KVM_GET/SET_MSR'd
Some of the MSRs returned by GET_MSR_INDEX_LIST currently cannot be sent back to KVM_GET_MSR and/or KVM_SET_MSR; either they can never be sent back, or you they are only accepted under special conditions. This makes the API a pain to use. To avoid this pain, this patch makes it so that the result of the get-list ioctl can always be used for host-initiated get and set. Since we don't have a separate way to check for read-only MSRs, this means some Hyper-V MSRs are ignored when written. Arguably they should not even be in the result of GET_MSR_INDEX_LIST, but I am leaving there in case userspace is using the outcome of GET_MSR_INDEX_LIST to derive the support for the corresponding Hyper-V feature. Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent cf81a7e commit 44883f0

File tree

3 files changed

+30
-14
lines changed

3 files changed

+30
-14
lines changed

arch/x86/kvm/hyperv.c

Lines changed: 20 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
235235
struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
236236
int ret;
237237

238-
if (!synic->active)
238+
if (!synic->active && !host)
239239
return 1;
240240

241241
trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
@@ -295,11 +295,12 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
295295
return ret;
296296
}
297297

298-
static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata)
298+
static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
299+
bool host)
299300
{
300301
int ret;
301302

302-
if (!synic->active)
303+
if (!synic->active && !host)
303304
return 1;
304305

305306
ret = 0;
@@ -1014,6 +1015,11 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
10141015
case HV_X64_MSR_TSC_EMULATION_STATUS:
10151016
hv->hv_tsc_emulation_status = data;
10161017
break;
1018+
case HV_X64_MSR_TIME_REF_COUNT:
1019+
/* read-only, but still ignore it if host-initiated */
1020+
if (!host)
1021+
return 1;
1022+
break;
10171023
default:
10181024
vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
10191025
msr, data);
@@ -1101,6 +1107,12 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
11011107
return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
11021108
data, host);
11031109
}
1110+
case HV_X64_MSR_TSC_FREQUENCY:
1111+
case HV_X64_MSR_APIC_FREQUENCY:
1112+
/* read-only, but still ignore it if host-initiated */
1113+
if (!host)
1114+
return 1;
1115+
break;
11041116
default:
11051117
vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
11061118
msr, data);
@@ -1156,7 +1168,8 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
11561168
return 0;
11571169
}
11581170

1159-
static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1171+
static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1172+
bool host)
11601173
{
11611174
u64 data = 0;
11621175
struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
@@ -1183,7 +1196,7 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
11831196
case HV_X64_MSR_SIMP:
11841197
case HV_X64_MSR_EOM:
11851198
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1186-
return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata);
1199+
return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata, host);
11871200
case HV_X64_MSR_STIMER0_CONFIG:
11881201
case HV_X64_MSR_STIMER1_CONFIG:
11891202
case HV_X64_MSR_STIMER2_CONFIG:
@@ -1229,7 +1242,7 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
12291242
return kvm_hv_set_msr(vcpu, msr, data, host);
12301243
}
12311244

1232-
int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1245+
int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
12331246
{
12341247
if (kvm_hv_msr_partition_wide(msr)) {
12351248
int r;
@@ -1239,7 +1252,7 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
12391252
mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
12401253
return r;
12411254
} else
1242-
return kvm_hv_get_msr(vcpu, msr, pdata);
1255+
return kvm_hv_get_msr(vcpu, msr, pdata, host);
12431256
}
12441257

12451258
static __always_inline int get_sparse_bank_no(u64 valid_bank_mask, int bank_no)

arch/x86/kvm/hyperv.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ static inline struct kvm_vcpu *synic_to_vcpu(struct kvm_vcpu_hv_synic *synic)
4848
}
4949

5050
int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
51-
int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
51+
int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host);
5252

5353
bool kvm_hv_hypercall_enabled(struct kvm *kvm);
5454
int kvm_hv_hypercall(struct kvm_vcpu *vcpu);

arch/x86/kvm/x86.c

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2160,10 +2160,11 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
21602160
vcpu->arch.mcg_status = data;
21612161
break;
21622162
case MSR_IA32_MCG_CTL:
2163-
if (!(mcg_cap & MCG_CTL_P))
2163+
if (!(mcg_cap & MCG_CTL_P) &&
2164+
(data || !msr_info->host_initiated))
21642165
return 1;
21652166
if (data != 0 && data != ~(u64)0)
2166-
return -1;
2167+
return 1;
21672168
vcpu->arch.mcg_ctl = data;
21682169
break;
21692170
default:
@@ -2551,7 +2552,7 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
25512552
}
25522553
EXPORT_SYMBOL_GPL(kvm_get_msr);
25532554

2554-
static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2555+
static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
25552556
{
25562557
u64 data;
25572558
u64 mcg_cap = vcpu->arch.mcg_cap;
@@ -2566,7 +2567,7 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
25662567
data = vcpu->arch.mcg_cap;
25672568
break;
25682569
case MSR_IA32_MCG_CTL:
2569-
if (!(mcg_cap & MCG_CTL_P))
2570+
if (!(mcg_cap & MCG_CTL_P) && !host)
25702571
return 1;
25712572
data = vcpu->arch.mcg_ctl;
25722573
break;
@@ -2699,7 +2700,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
26992700
case MSR_IA32_MCG_CTL:
27002701
case MSR_IA32_MCG_STATUS:
27012702
case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
2702-
return get_msr_mce(vcpu, msr_info->index, &msr_info->data);
2703+
return get_msr_mce(vcpu, msr_info->index, &msr_info->data,
2704+
msr_info->host_initiated);
27032705
case MSR_K7_CLK_CTL:
27042706
/*
27052707
* Provide expected ramp-up count for K7. All other
@@ -2720,7 +2722,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
27202722
case HV_X64_MSR_TSC_EMULATION_CONTROL:
27212723
case HV_X64_MSR_TSC_EMULATION_STATUS:
27222724
return kvm_hv_get_msr_common(vcpu,
2723-
msr_info->index, &msr_info->data);
2725+
msr_info->index, &msr_info->data,
2726+
msr_info->host_initiated);
27242727
break;
27252728
case MSR_IA32_BBL_CR_CTL3:
27262729
/* This legacy MSR exists but isn't fully documented in current

0 commit comments

Comments
 (0)