Skip to content

Commit 5e1b59a

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Radim Krčmář: "ARM fixes: - Wrong indentation in the PMU code from the merge window - A long-time bug occuring with running ntpd on the host, candidate for stable - Properly handle (and warn about) the unsupported configuration of running on systems with less than 40 bits of PA space - More fixes to the PM and hotplug notifier stuff from the merge window x86: - leak of guest xcr0 (typically shows up as SIGILL) - new maintainer (who is sending the pull request too) - fix for merge window regression - fix for guest CPUID" Paolo Bonzini points out: "For the record, this tag is signed by me because I prepared the pull request. Further pull requests for 4.6 will be signed and sent out by Radim directly" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: x86: mask CPUID(0xD,0x1).EAX against host value kvm: x86: do not leak guest xcr0 into host interrupt handlers KVM: MMU: fix permission_fault() KVM: new maintainer on the block arm64: KVM: unregister notifiers in hyp mode teardown path arm64: KVM: Warn when PARange is less than 40 bits KVM: arm/arm64: Handle forward time correction gracefully arm64: KVM: Add braces to multi-line if statement in virtual PMU code
2 parents 1c74a7f + 316314c commit 5e1b59a

File tree

12 files changed

+106
-37
lines changed

12 files changed

+106
-37
lines changed

MAINTAINERS

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6252,8 +6252,8 @@ S: Maintained
62526252
F: tools/testing/selftests
62536253

62546254
KERNEL VIRTUAL MACHINE (KVM)
6255-
M: Gleb Natapov <gleb@kernel.org>
62566255
M: Paolo Bonzini <pbonzini@redhat.com>
6256+
M: Radim Krčmář <rkrcmar@redhat.com>
62576257
L: kvm@vger.kernel.org
62586258
W: http://www.linux-kvm.org
62596259
T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git

arch/arm/kvm/arm.c

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1112,10 +1112,17 @@ static void __init hyp_cpu_pm_init(void)
11121112
{
11131113
cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
11141114
}
1115+
static void __init hyp_cpu_pm_exit(void)
1116+
{
1117+
cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
1118+
}
11151119
#else
11161120
static inline void hyp_cpu_pm_init(void)
11171121
{
11181122
}
1123+
static inline void hyp_cpu_pm_exit(void)
1124+
{
1125+
}
11191126
#endif
11201127

11211128
static void teardown_common_resources(void)
@@ -1141,9 +1148,7 @@ static int init_subsystems(void)
11411148
/*
11421149
* Register CPU Hotplug notifier
11431150
*/
1144-
cpu_notifier_register_begin();
1145-
err = __register_cpu_notifier(&hyp_init_cpu_nb);
1146-
cpu_notifier_register_done();
1151+
err = register_cpu_notifier(&hyp_init_cpu_nb);
11471152
if (err) {
11481153
kvm_err("Cannot register KVM init CPU notifier (%d)\n", err);
11491154
return err;
@@ -1193,6 +1198,8 @@ static void teardown_hyp_mode(void)
11931198
free_hyp_pgds();
11941199
for_each_possible_cpu(cpu)
11951200
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
1201+
unregister_cpu_notifier(&hyp_init_cpu_nb);
1202+
hyp_cpu_pm_exit();
11961203
}
11971204

11981205
static int init_vhe_mode(void)

arch/arm64/include/asm/kvm_arm.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -151,8 +151,7 @@
151151
*/
152152
#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \
153153
VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
154-
VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
155-
VTCR_EL2_RES1)
154+
VTCR_EL2_SL0_LVL1 | VTCR_EL2_RES1)
156155
#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B)
157156
#else
158157
/*
@@ -163,8 +162,7 @@
163162
*/
164163
#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \
165164
VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
166-
VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
167-
VTCR_EL2_RES1)
165+
VTCR_EL2_SL0_LVL1 | VTCR_EL2_RES1)
168166
#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B)
169167
#endif
170168

arch/arm64/include/asm/kvm_asm.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ extern void __vgic_v3_init_lrs(void);
5454

5555
extern u32 __kvm_get_mdcr_el2(void);
5656

57-
extern void __init_stage2_translation(void);
57+
extern u32 __init_stage2_translation(void);
5858

5959
#endif
6060

arch/arm64/include/asm/kvm_host.h

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -369,11 +369,12 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
369369
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
370370
struct kvm_device_attr *attr);
371371

372-
/* #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) */
373-
374372
static inline void __cpu_init_stage2(void)
375373
{
376-
kvm_call_hyp(__init_stage2_translation);
374+
u32 parange = kvm_call_hyp(__init_stage2_translation);
375+
376+
WARN_ONCE(parange < 40,
377+
"PARange is %d bits, unsupported configuration!", parange);
377378
}
378379

379380
#endif /* __ARM64_KVM_HOST_H__ */

arch/arm64/kvm/hyp/s2-setup.c

Lines changed: 37 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,17 +20,50 @@
2020
#include <asm/kvm_asm.h>
2121
#include <asm/kvm_hyp.h>
2222

23-
void __hyp_text __init_stage2_translation(void)
23+
u32 __hyp_text __init_stage2_translation(void)
2424
{
2525
u64 val = VTCR_EL2_FLAGS;
26+
u64 parange;
2627
u64 tmp;
2728

2829
/*
2930
* Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS
3031
* bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while
3132
* PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2...
3233
*/
33-
val |= (read_sysreg(id_aa64mmfr0_el1) & 7) << 16;
34+
parange = read_sysreg(id_aa64mmfr0_el1) & 7;
35+
val |= parange << 16;
36+
37+
/* Compute the actual PARange... */
38+
switch (parange) {
39+
case 0:
40+
parange = 32;
41+
break;
42+
case 1:
43+
parange = 36;
44+
break;
45+
case 2:
46+
parange = 40;
47+
break;
48+
case 3:
49+
parange = 42;
50+
break;
51+
case 4:
52+
parange = 44;
53+
break;
54+
case 5:
55+
default:
56+
parange = 48;
57+
break;
58+
}
59+
60+
/*
61+
* ... and clamp it to 40 bits, unless we have some braindead
62+
* HW that implements less than that. In all cases, we'll
63+
* return that value for the rest of the kernel to decide what
64+
* to do.
65+
*/
66+
val |= 64 - (parange > 40 ? 40 : parange);
3467

3568
/*
3669
* Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS
@@ -42,4 +75,6 @@ void __hyp_text __init_stage2_translation(void)
4275
VTCR_EL2_VS_8BIT;
4376

4477
write_sysreg(val, vtcr_el2);
78+
79+
return parange;
4580
}

arch/x86/kvm/cpuid.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -534,6 +534,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
534534
do_cpuid_1_ent(&entry[i], function, idx);
535535
if (idx == 1) {
536536
entry[i].eax &= kvm_cpuid_D_1_eax_x86_features;
537+
cpuid_mask(&entry[i].eax, CPUID_D_1_EAX);
537538
entry[i].ebx = 0;
538539
if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
539540
entry[i].ebx =

arch/x86/kvm/mmu.h

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -173,10 +173,9 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
173173
int index = (pfec >> 1) +
174174
(smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
175175
bool fault = (mmu->permissions[index] >> pte_access) & 1;
176+
u32 errcode = PFERR_PRESENT_MASK;
176177

177178
WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
178-
pfec |= PFERR_PRESENT_MASK;
179-
180179
if (unlikely(mmu->pkru_mask)) {
181180
u32 pkru_bits, offset;
182181

@@ -189,15 +188,15 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
189188
pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3;
190189

191190
/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
192-
offset = pfec - 1 +
191+
offset = (pfec & ~1) +
193192
((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
194193

195194
pkru_bits &= mmu->pkru_mask >> offset;
196-
pfec |= -pkru_bits & PFERR_PK_MASK;
195+
errcode |= -pkru_bits & PFERR_PK_MASK;
197196
fault |= (pkru_bits != 0);
198197
}
199198

200-
return -(uint32_t)fault & pfec;
199+
return -(u32)fault & errcode;
201200
}
202201

203202
void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);

arch/x86/kvm/paging_tmpl.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -360,7 +360,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
360360
goto error;
361361

362362
if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) {
363-
errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
363+
errcode = PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
364364
goto error;
365365
}
366366

arch/x86/kvm/x86.c

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -700,7 +700,6 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
700700
if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
701701
return 1;
702702
}
703-
kvm_put_guest_xcr0(vcpu);
704703
vcpu->arch.xcr0 = xcr0;
705704

706705
if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
@@ -6590,8 +6589,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
65906589
kvm_x86_ops->prepare_guest_switch(vcpu);
65916590
if (vcpu->fpu_active)
65926591
kvm_load_guest_fpu(vcpu);
6593-
kvm_load_guest_xcr0(vcpu);
6594-
65956592
vcpu->mode = IN_GUEST_MODE;
65966593

65976594
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -6618,6 +6615,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
66186615
goto cancel_injection;
66196616
}
66206617

6618+
kvm_load_guest_xcr0(vcpu);
6619+
66216620
if (req_immediate_exit)
66226621
smp_send_reschedule(vcpu->cpu);
66236622

@@ -6667,6 +6666,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
66676666
vcpu->mode = OUTSIDE_GUEST_MODE;
66686667
smp_wmb();
66696668

6669+
kvm_put_guest_xcr0(vcpu);
6670+
66706671
/* Interrupt is enabled by handle_external_intr() */
66716672
kvm_x86_ops->handle_external_intr(vcpu);
66726673

@@ -7314,7 +7315,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
73147315
* and assume host would use all available bits.
73157316
* Guest xcr0 would be loaded later.
73167317
*/
7317-
kvm_put_guest_xcr0(vcpu);
73187318
vcpu->guest_fpu_loaded = 1;
73197319
__kernel_fpu_begin();
73207320
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
@@ -7323,8 +7323,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
73237323

73247324
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
73257325
{
7326-
kvm_put_guest_xcr0(vcpu);
7327-
73287326
if (!vcpu->guest_fpu_loaded) {
73297327
vcpu->fpu_counter = 0;
73307328
return;

virt/kvm/arm/arch_timer.c

Lines changed: 39 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -91,17 +91,55 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
9191
vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
9292
vcpu->arch.timer_cpu.armed = false;
9393

94+
WARN_ON(!kvm_timer_should_fire(vcpu));
95+
9496
/*
9597
* If the vcpu is blocked we want to wake it up so that it will see
9698
* the timer has expired when entering the guest.
9799
*/
98100
kvm_vcpu_kick(vcpu);
99101
}
100102

103+
static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
104+
{
105+
cycle_t cval, now;
106+
107+
cval = vcpu->arch.timer_cpu.cntv_cval;
108+
now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
109+
110+
if (now < cval) {
111+
u64 ns;
112+
113+
ns = cyclecounter_cyc2ns(timecounter->cc,
114+
cval - now,
115+
timecounter->mask,
116+
&timecounter->frac);
117+
return ns;
118+
}
119+
120+
return 0;
121+
}
122+
101123
static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
102124
{
103125
struct arch_timer_cpu *timer;
126+
struct kvm_vcpu *vcpu;
127+
u64 ns;
128+
104129
timer = container_of(hrt, struct arch_timer_cpu, timer);
130+
vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
131+
132+
/*
133+
* Check that the timer has really expired from the guest's
134+
* PoV (NTP on the host may have forced it to expire
135+
* early). If we should have slept longer, restart it.
136+
*/
137+
ns = kvm_timer_compute_delta(vcpu);
138+
if (unlikely(ns)) {
139+
hrtimer_forward_now(hrt, ns_to_ktime(ns));
140+
return HRTIMER_RESTART;
141+
}
142+
105143
queue_work(wqueue, &timer->expired);
106144
return HRTIMER_NORESTART;
107145
}
@@ -176,8 +214,6 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
176214
void kvm_timer_schedule(struct kvm_vcpu *vcpu)
177215
{
178216
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
179-
u64 ns;
180-
cycle_t cval, now;
181217

182218
BUG_ON(timer_is_armed(timer));
183219

@@ -197,14 +233,7 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
197233
return;
198234

199235
/* The timer has not yet expired, schedule a background timer */
200-
cval = timer->cntv_cval;
201-
now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
202-
203-
ns = cyclecounter_cyc2ns(timecounter->cc,
204-
cval - now,
205-
timecounter->mask,
206-
&timecounter->frac);
207-
timer_arm(timer, ns);
236+
timer_arm(timer, kvm_timer_compute_delta(vcpu));
208237
}
209238

210239
void kvm_timer_unschedule(struct kvm_vcpu *vcpu)

virt/kvm/arm/pmu.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -193,11 +193,12 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
193193
{
194194
u64 reg = 0;
195195

196-
if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
196+
if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
197197
reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0);
198198
reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
199199
reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1);
200200
reg &= kvm_pmu_valid_counter_mask(vcpu);
201+
}
201202

202203
return reg;
203204
}

0 commit comments

Comments
 (0)