Skip to content

Commit e73a317

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini: - A series of fixes to the MTRR emulation, tested in the BZ by several users so they should be safe this late - A fix for a division by zero - Two very simple ARM and PPC fixes * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: x86: Reload pit counters for all channels when restoring state KVM: MTRR: treat memory as writeback if MTRR is disabled in guest CPUID KVM: MTRR: observe maxphyaddr from guest CPUID, not host KVM: MTRR: fix fixed MTRR segment look up KVM: VMX: Fix host initiated access to guest MSR_TSC_AUX KVM: arm/arm64: vgic: Fix kvm_vgic_map_is_active's dist check kvm: x86: move tracepoints outside extended quiescent state KVM: PPC: Book3S HV: Prohibit setting illegal transaction state in MSR
2 parents ad3d1ab + 0185604 commit e73a317

File tree

7 files changed

+48
-16
lines changed

7 files changed

+48
-16
lines changed

arch/powerpc/kvm/book3s_hv.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -224,6 +224,12 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
224224

225225
static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
226226
{
227+
/*
228+
* Check for illegal transactional state bit combination
229+
* and if we find it, force the TS field to a safe state.
230+
*/
231+
if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
232+
msr &= ~MSR_TS_MASK;
227233
vcpu->arch.shregs.msr = msr;
228234
kvmppc_end_cede(vcpu);
229235
}

arch/x86/kvm/cpuid.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,14 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
3838
return best && (best->ecx & bit(X86_FEATURE_XSAVE));
3939
}
4040

41+
static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
42+
{
43+
struct kvm_cpuid_entry2 *best;
44+
45+
best = kvm_find_cpuid_entry(vcpu, 1, 0);
46+
return best && (best->edx & bit(X86_FEATURE_MTRR));
47+
}
48+
4149
static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
4250
{
4351
struct kvm_cpuid_entry2 *best;

arch/x86/kvm/mtrr.c

Lines changed: 19 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -120,14 +120,22 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
120120
return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
121121
}
122122

123-
static u8 mtrr_disabled_type(void)
123+
static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
124124
{
125125
/*
126126
* Intel SDM 11.11.2.2: all MTRRs are disabled when
127127
* IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
128128
* memory type is applied to all of physical memory.
129+
*
130+
* However, virtual machines can be run with CPUID such that
131+
* there are no MTRRs. In that case, the firmware will never
132+
* enable MTRRs and it is obviously undesirable to run the
133+
* guest entirely with UC memory and we use WB.
129134
*/
130-
return MTRR_TYPE_UNCACHABLE;
135+
if (guest_cpuid_has_mtrr(vcpu))
136+
return MTRR_TYPE_UNCACHABLE;
137+
else
138+
return MTRR_TYPE_WRBACK;
131139
}
132140

133141
/*
@@ -267,7 +275,7 @@ static int fixed_mtrr_addr_to_seg(u64 addr)
267275

268276
for (seg = 0; seg < seg_num; seg++) {
269277
mtrr_seg = &fixed_seg_table[seg];
270-
if (mtrr_seg->start >= addr && addr < mtrr_seg->end)
278+
if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
271279
return seg;
272280
}
273281

@@ -300,7 +308,6 @@ static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
300308
*start = range->base & PAGE_MASK;
301309

302310
mask = range->mask & PAGE_MASK;
303-
mask |= ~0ULL << boot_cpu_data.x86_phys_bits;
304311

305312
/* This cannot overflow because writing to the reserved bits of
306313
* variable MTRRs causes a #GP.
@@ -356,10 +363,14 @@ static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
356363
if (var_mtrr_range_is_valid(cur))
357364
list_del(&mtrr_state->var_ranges[index].node);
358365

366+
/* Extend the mask with all 1 bits to the left, since those
367+
* bits must implicitly be 0. The bits are then cleared
368+
* when reading them.
369+
*/
359370
if (!is_mtrr_mask)
360371
cur->base = data;
361372
else
362-
cur->mask = data;
373+
cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu));
363374

364375
/* add it to the list if it's enabled. */
365376
if (var_mtrr_range_is_valid(cur)) {
@@ -426,6 +437,8 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
426437
*pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
427438
else
428439
*pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
440+
441+
*pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1;
429442
}
430443

431444
return 0;
@@ -670,7 +683,7 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
670683
}
671684

672685
if (iter.mtrr_disabled)
673-
return mtrr_disabled_type();
686+
return mtrr_disabled_type(vcpu);
674687

675688
/* not contained in any MTRRs. */
676689
if (type == -1)

arch/x86/kvm/svm.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3422,6 +3422,8 @@ static int handle_exit(struct kvm_vcpu *vcpu)
34223422
struct kvm_run *kvm_run = vcpu->run;
34233423
u32 exit_code = svm->vmcb->control.exit_code;
34243424

3425+
trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
3426+
34253427
if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
34263428
vcpu->arch.cr0 = svm->vmcb->save.cr0;
34273429
if (npt_enabled)
@@ -3892,8 +3894,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
38923894
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
38933895
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
38943896

3895-
trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM);
3896-
38973897
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
38983898
kvm_before_handle_nmi(&svm->vcpu);
38993899

arch/x86/kvm/vmx.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2803,7 +2803,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
28032803
msr_info->data = vcpu->arch.ia32_xss;
28042804
break;
28052805
case MSR_TSC_AUX:
2806-
if (!guest_cpuid_has_rdtscp(vcpu))
2806+
if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
28072807
return 1;
28082808
/* Otherwise falls through */
28092809
default:
@@ -2909,7 +2909,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
29092909
clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
29102910
break;
29112911
case MSR_TSC_AUX:
2912-
if (!guest_cpuid_has_rdtscp(vcpu))
2912+
if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
29132913
return 1;
29142914
/* Check reserved bit, higher 32 bits should be zero */
29152915
if ((data >> 32) != 0)
@@ -8042,6 +8042,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
80428042
u32 exit_reason = vmx->exit_reason;
80438043
u32 vectoring_info = vmx->idt_vectoring_info;
80448044

8045+
trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
8046+
80458047
/*
80468048
* Flush logged GPAs PML buffer, this will make dirty_bitmap more
80478049
* updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
@@ -8668,7 +8670,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
86688670
vmx->loaded_vmcs->launched = 1;
86698671

86708672
vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
8671-
trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
86728673

86738674
/*
86748675
* the KVM_REQ_EVENT optimization bit is only on for one entry, and if

arch/x86/kvm/x86.c

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3572,9 +3572,11 @@ static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
35723572

35733573
static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
35743574
{
3575+
int i;
35753576
mutex_lock(&kvm->arch.vpit->pit_state.lock);
35763577
memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
3577-
kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
3578+
for (i = 0; i < 3; i++)
3579+
kvm_pit_load_count(kvm, i, ps->channels[i].count, 0);
35783580
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
35793581
return 0;
35803582
}
@@ -3593,6 +3595,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
35933595
static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
35943596
{
35953597
int start = 0;
3598+
int i;
35963599
u32 prev_legacy, cur_legacy;
35973600
mutex_lock(&kvm->arch.vpit->pit_state.lock);
35983601
prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
@@ -3602,7 +3605,8 @@ static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
36023605
memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
36033606
sizeof(kvm->arch.vpit->pit_state.channels));
36043607
kvm->arch.vpit->pit_state.flags = ps->flags;
3605-
kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
3608+
for (i = 0; i < 3; i++)
3609+
kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count, start);
36063610
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
36073611
return 0;
36083612
}
@@ -6515,6 +6519,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
65156519
if (req_immediate_exit)
65166520
smp_send_reschedule(vcpu->cpu);
65176521

6522+
trace_kvm_entry(vcpu->vcpu_id);
6523+
wait_lapic_expire(vcpu);
65186524
__kvm_guest_enter();
65196525

65206526
if (unlikely(vcpu->arch.switch_db_regs)) {
@@ -6527,8 +6533,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
65276533
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
65286534
}
65296535

6530-
trace_kvm_entry(vcpu->vcpu_id);
6531-
wait_lapic_expire(vcpu);
65326536
kvm_x86_ops->run(vcpu);
65336537

65346538
/*

virt/kvm/arm/vgic.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1114,7 +1114,7 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
11141114
return true;
11151115
}
11161116

1117-
return dist_active_irq(vcpu);
1117+
return vgic_irq_is_active(vcpu, map->virt_irq);
11181118
}
11191119

11201120
/*

0 commit comments

Comments
 (0)