Skip to content

Commit e37a07e

Browse files
committed
Merge tag 'kvm-4.13-2' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull more KVM updates from Radim Krčmář: "Second batch of KVM updates for v4.13 Common: - add uevents for VM creation/destruction - annotate and properly access RCU-protected objects s390: - rename IOCTL added in the first v4.13 merge x86: - emulate VMLOAD VMSAVE feature in SVM - support paravirtual asynchronous page fault while nested - add Hyper-V userspace interfaces for better migration - improve master clock corner cases - extend internal error reporting after EPT misconfig - correct single-stepping of emulated instructions in SVM - handle MCE during VM entry - fix nVMX VM entry checks and nVMX VMCS shadowing" * tag 'kvm-4.13-2' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (28 commits) kvm: x86: hyperv: make VP_INDEX managed by userspace KVM: async_pf: Let guest support delivery of async_pf from guest mode KVM: async_pf: Force a nested vmexit if the injected #PF is async_pf KVM: async_pf: Add L1 guest async_pf #PF vmexit handler KVM: x86: Simplify kvm_x86_ops->queue_exception parameter list kvm: x86: hyperv: add KVM_CAP_HYPERV_SYNIC2 KVM: x86: make backwards_tsc_observed a per-VM variable KVM: trigger uevents when creating or destroying a VM KVM: SVM: Enable Virtual VMLOAD VMSAVE feature KVM: SVM: Add Virtual VMLOAD VMSAVE feature definition KVM: SVM: Rename lbr_ctl field in the vmcb control area KVM: SVM: Prepare for new bit definition in lbr_ctl KVM: SVM: handle singlestep exception when skipping emulated instructions KVM: x86: take slots_lock in kvm_free_pit KVM: s390: Fix KVM_S390_GET_CMMA_BITS ioctl definition kvm: vmx: Properly handle machine check during VM-entry KVM: x86: update master clock before computing kvmclock_offset kvm: nVMX: Shadow "high" parts of shadowed 64-bit VMCS fields kvm: nVMX: Fix nested_vmx_check_msr_bitmap_controls kvm: nVMX: Validate the I/O bitmaps on nested VM-entry ...
2 parents a80099a + d3457c8 commit e37a07e

File tree

21 files changed

+470
-207
lines changed

21 files changed

+470
-207
lines changed

Documentation/virtual/kvm/api.txt

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4329,3 +4329,21 @@ Querying this capability returns a bitmap indicating the possible
43294329
virtual SMT modes that can be set using KVM_CAP_PPC_SMT. If bit N
43304330
(counting from the right) is set, then a virtual SMT mode of 2^N is
43314331
available.
4332+
4333+
8.11 KVM_CAP_HYPERV_SYNIC2
4334+
4335+
Architectures: x86
4336+
4337+
This capability enables a newer version of Hyper-V Synthetic interrupt
4338+
controller (SynIC). The only difference with KVM_CAP_HYPERV_SYNIC is that KVM
4339+
doesn't clear SynIC message and event flags pages when they are enabled by
4340+
writing to the respective MSRs.
4341+
4342+
8.12 KVM_CAP_HYPERV_VP_INDEX
4343+
4344+
Architectures: x86
4345+
4346+
This capability indicates that userspace can load HV_X64_MSR_VP_INDEX msr. Its
4347+
value is used to denote the target vcpu for a SynIC interrupt. For
4348+
compatibilty, KVM initializes this msr to KVM's internal vcpu index. When this
4349+
capability is absent, userspace can still query this msr's value.

Documentation/virtual/kvm/msr.txt

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -166,10 +166,11 @@ MSR_KVM_SYSTEM_TIME: 0x12
166166
MSR_KVM_ASYNC_PF_EN: 0x4b564d02
167167
data: Bits 63-6 hold 64-byte aligned physical address of a
168168
64 byte memory area which must be in guest RAM and must be
169-
zeroed. Bits 5-2 are reserved and should be zero. Bit 0 is 1
169+
zeroed. Bits 5-3 are reserved and should be zero. Bit 0 is 1
170170
when asynchronous page faults are enabled on the vcpu 0 when
171171
disabled. Bit 1 is 1 if asynchronous page faults can be injected
172-
when vcpu is in cpl == 0.
172+
when vcpu is in cpl == 0. Bit 2 is 1 if asynchronous page faults
173+
are delivered to L1 as #PF vmexits.
173174

174175
First 4 byte of 64 byte memory location will be written to by
175176
the hypervisor at the time of asynchronous page fault (APF)

arch/x86/include/asm/cpufeatures.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -286,6 +286,7 @@
286286
#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
287287
#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
288288
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
289+
#define X86_FEATURE_VIRTUAL_VMLOAD_VMSAVE (15*32+15) /* Virtual VMLOAD VMSAVE */
289290

290291
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
291292
#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/

arch/x86/include/asm/kvm_emulate.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ struct x86_exception {
2323
u16 error_code;
2424
bool nested_page_fault;
2525
u64 address; /* cr2 or nested page fault gpa */
26+
u8 async_page_fault;
2627
};
2728

2829
/*

arch/x86/include/asm/kvm_host.h

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -462,10 +462,12 @@ struct kvm_vcpu_hv_synic {
462462
DECLARE_BITMAP(auto_eoi_bitmap, 256);
463463
DECLARE_BITMAP(vec_bitmap, 256);
464464
bool active;
465+
bool dont_zero_synic_pages;
465466
};
466467

467468
/* Hyper-V per vcpu emulation context */
468469
struct kvm_vcpu_hv {
470+
u32 vp_index;
469471
u64 hv_vapic;
470472
s64 runtime_offset;
471473
struct kvm_vcpu_hv_synic synic;
@@ -549,6 +551,7 @@ struct kvm_vcpu_arch {
549551
bool reinject;
550552
u8 nr;
551553
u32 error_code;
554+
u8 nested_apf;
552555
} exception;
553556

554557
struct kvm_queued_interrupt {
@@ -649,6 +652,9 @@ struct kvm_vcpu_arch {
649652
u64 msr_val;
650653
u32 id;
651654
bool send_user_only;
655+
u32 host_apf_reason;
656+
unsigned long nested_apf_token;
657+
bool delivery_as_pf_vmexit;
652658
} apf;
653659

654660
/* OSVW MSRs (AMD only) */
@@ -803,6 +809,7 @@ struct kvm_arch {
803809
int audit_point;
804810
#endif
805811

812+
bool backwards_tsc_observed;
806813
bool boot_vcpu_runs_old_kvmclock;
807814
u32 bsp_vcpu_id;
808815

@@ -952,9 +959,7 @@ struct kvm_x86_ops {
952959
unsigned char *hypercall_addr);
953960
void (*set_irq)(struct kvm_vcpu *vcpu);
954961
void (*set_nmi)(struct kvm_vcpu *vcpu);
955-
void (*queue_exception)(struct kvm_vcpu *vcpu, unsigned nr,
956-
bool has_error_code, u32 error_code,
957-
bool reinject);
962+
void (*queue_exception)(struct kvm_vcpu *vcpu);
958963
void (*cancel_injection)(struct kvm_vcpu *vcpu);
959964
int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
960965
int (*nmi_allowed)(struct kvm_vcpu *vcpu);

arch/x86/include/asm/svm.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
8383
u32 event_inj;
8484
u32 event_inj_err;
8585
u64 nested_cr3;
86-
u64 lbr_ctl;
86+
u64 virt_ext;
8787
u32 clean;
8888
u32 reserved_5;
8989
u64 next_rip;
@@ -119,6 +119,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
119119
#define AVIC_ENABLE_SHIFT 31
120120
#define AVIC_ENABLE_MASK (1 << AVIC_ENABLE_SHIFT)
121121

122+
#define LBR_CTL_ENABLE_MASK BIT_ULL(0)
123+
#define VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK BIT_ULL(1)
124+
122125
#define SVM_INTERRUPT_SHADOW_MASK 1
123126

124127
#define SVM_IOIO_STR_SHIFT 2

arch/x86/include/uapi/asm/kvm_para.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,7 @@ struct kvm_clock_pairing {
6767

6868
#define KVM_ASYNC_PF_ENABLED (1 << 0)
6969
#define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
70+
#define KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT (1 << 2)
7071

7172
/* Operations for KVM_HC_MMU_OP */
7273
#define KVM_MMU_OP_WRITE_PTE 1

arch/x86/kernel/kvm.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -330,7 +330,12 @@ static void kvm_guest_cpu_init(void)
330330
#ifdef CONFIG_PREEMPT
331331
pa |= KVM_ASYNC_PF_SEND_ALWAYS;
332332
#endif
333-
wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
333+
pa |= KVM_ASYNC_PF_ENABLED;
334+
335+
/* Async page fault support for L1 hypervisor is optional */
336+
if (wrmsr_safe(MSR_KVM_ASYNC_PF_EN,
337+
(pa | KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT) & 0xffffffff, pa >> 32) < 0)
338+
wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
334339
__this_cpu_write(apf_reason.enabled, 1);
335340
printk(KERN_INFO"KVM setup async PF for cpu %d\n",
336341
smp_processor_id());

arch/x86/kvm/hyperv.c

Lines changed: 44 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -106,14 +106,27 @@ static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
106106
return 0;
107107
}
108108

109-
static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vcpu_id)
109+
static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
110+
{
111+
struct kvm_vcpu *vcpu = NULL;
112+
int i;
113+
114+
if (vpidx < KVM_MAX_VCPUS)
115+
vcpu = kvm_get_vcpu(kvm, vpidx);
116+
if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
117+
return vcpu;
118+
kvm_for_each_vcpu(i, vcpu, kvm)
119+
if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
120+
return vcpu;
121+
return NULL;
122+
}
123+
124+
static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
110125
{
111126
struct kvm_vcpu *vcpu;
112127
struct kvm_vcpu_hv_synic *synic;
113128

114-
if (vcpu_id >= atomic_read(&kvm->online_vcpus))
115-
return NULL;
116-
vcpu = kvm_get_vcpu(kvm, vcpu_id);
129+
vcpu = get_vcpu_by_vpidx(kvm, vpidx);
117130
if (!vcpu)
118131
return NULL;
119132
synic = vcpu_to_synic(vcpu);
@@ -221,7 +234,8 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
221234
synic->version = data;
222235
break;
223236
case HV_X64_MSR_SIEFP:
224-
if (data & HV_SYNIC_SIEFP_ENABLE)
237+
if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
238+
!synic->dont_zero_synic_pages)
225239
if (kvm_clear_guest(vcpu->kvm,
226240
data & PAGE_MASK, PAGE_SIZE)) {
227241
ret = 1;
@@ -232,7 +246,8 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
232246
synic_exit(synic, msr);
233247
break;
234248
case HV_X64_MSR_SIMP:
235-
if (data & HV_SYNIC_SIMP_ENABLE)
249+
if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
250+
!synic->dont_zero_synic_pages)
236251
if (kvm_clear_guest(vcpu->kvm,
237252
data & PAGE_MASK, PAGE_SIZE)) {
238253
ret = 1;
@@ -318,11 +333,11 @@ static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
318333
return ret;
319334
}
320335

321-
int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint)
336+
int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
322337
{
323338
struct kvm_vcpu_hv_synic *synic;
324339

325-
synic = synic_get(kvm, vcpu_id);
340+
synic = synic_get(kvm, vpidx);
326341
if (!synic)
327342
return -EINVAL;
328343

@@ -341,11 +356,11 @@ void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
341356
kvm_hv_notify_acked_sint(vcpu, i);
342357
}
343358

344-
static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vcpu_id, u32 sint, int gsi)
359+
static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
345360
{
346361
struct kvm_vcpu_hv_synic *synic;
347362

348-
synic = synic_get(kvm, vcpu_id);
363+
synic = synic_get(kvm, vpidx);
349364
if (!synic)
350365
return -EINVAL;
351366

@@ -687,14 +702,24 @@ void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
687702
stimer_init(&hv_vcpu->stimer[i], i);
688703
}
689704

690-
int kvm_hv_activate_synic(struct kvm_vcpu *vcpu)
705+
void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu)
706+
{
707+
struct kvm_vcpu_hv *hv_vcpu = vcpu_to_hv_vcpu(vcpu);
708+
709+
hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu);
710+
}
711+
712+
int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
691713
{
714+
struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu);
715+
692716
/*
693717
* Hyper-V SynIC auto EOI SINT's are
694718
* not compatible with APICV, so deactivate APICV
695719
*/
696720
kvm_vcpu_deactivate_apicv(vcpu);
697-
vcpu_to_synic(vcpu)->active = true;
721+
synic->active = true;
722+
synic->dont_zero_synic_pages = dont_zero_synic_pages;
698723
return 0;
699724
}
700725

@@ -978,6 +1003,11 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
9781003
struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
9791004

9801005
switch (msr) {
1006+
case HV_X64_MSR_VP_INDEX:
1007+
if (!host)
1008+
return 1;
1009+
hv->vp_index = (u32)data;
1010+
break;
9811011
case HV_X64_MSR_APIC_ASSIST_PAGE: {
9821012
u64 gfn;
9831013
unsigned long addr;
@@ -1089,18 +1119,9 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
10891119
struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
10901120

10911121
switch (msr) {
1092-
case HV_X64_MSR_VP_INDEX: {
1093-
int r;
1094-
struct kvm_vcpu *v;
1095-
1096-
kvm_for_each_vcpu(r, v, vcpu->kvm) {
1097-
if (v == vcpu) {
1098-
data = r;
1099-
break;
1100-
}
1101-
}
1122+
case HV_X64_MSR_VP_INDEX:
1123+
data = hv->vp_index;
11021124
break;
1103-
}
11041125
case HV_X64_MSR_EOI:
11051126
return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
11061127
case HV_X64_MSR_ICR:

arch/x86/kvm/hyperv.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,9 +56,10 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
5656
void kvm_hv_irq_routing_update(struct kvm *kvm);
5757
int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint);
5858
void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector);
59-
int kvm_hv_activate_synic(struct kvm_vcpu *vcpu);
59+
int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages);
6060

6161
void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
62+
void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu);
6263
void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
6364

6465
static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu,

arch/x86/kvm/i8254.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -724,8 +724,10 @@ void kvm_free_pit(struct kvm *kvm)
724724
struct kvm_pit *pit = kvm->arch.vpit;
725725

726726
if (pit) {
727+
mutex_lock(&kvm->slots_lock);
727728
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
728729
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev);
730+
mutex_unlock(&kvm->slots_lock);
729731
kvm_pit_set_reinject(pit, false);
730732
hrtimer_cancel(&pit->pit_state.timer);
731733
kthread_destroy_worker(pit->worker);

arch/x86/kvm/mmu.c

Lines changed: 34 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@
4646
#include <asm/io.h>
4747
#include <asm/vmx.h>
4848
#include <asm/kvm_page_track.h>
49+
#include "trace.h"
4950

5051
/*
5152
* When setting this variable to true it enables Two-Dimensional-Paging
@@ -3748,7 +3749,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
37483749
kvm_event_needs_reinjection(vcpu)))
37493750
return false;
37503751

3751-
if (is_guest_mode(vcpu))
3752+
if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu))
37523753
return false;
37533754

37543755
return kvm_x86_ops->interrupt_allowed(vcpu);
@@ -3780,6 +3781,38 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
37803781
return false;
37813782
}
37823783

3784+
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
3785+
u64 fault_address, char *insn, int insn_len,
3786+
bool need_unprotect)
3787+
{
3788+
int r = 1;
3789+
3790+
switch (vcpu->arch.apf.host_apf_reason) {
3791+
default:
3792+
trace_kvm_page_fault(fault_address, error_code);
3793+
3794+
if (need_unprotect && kvm_event_needs_reinjection(vcpu))
3795+
kvm_mmu_unprotect_page_virt(vcpu, fault_address);
3796+
r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
3797+
insn_len);
3798+
break;
3799+
case KVM_PV_REASON_PAGE_NOT_PRESENT:
3800+
vcpu->arch.apf.host_apf_reason = 0;
3801+
local_irq_disable();
3802+
kvm_async_pf_task_wait(fault_address);
3803+
local_irq_enable();
3804+
break;
3805+
case KVM_PV_REASON_PAGE_READY:
3806+
vcpu->arch.apf.host_apf_reason = 0;
3807+
local_irq_disable();
3808+
kvm_async_pf_task_wake(fault_address);
3809+
local_irq_enable();
3810+
break;
3811+
}
3812+
return r;
3813+
}
3814+
EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
3815+
37833816
static bool
37843817
check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
37853818
{

arch/x86/kvm/mmu.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,6 +77,9 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
7777
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
7878
bool accessed_dirty);
7979
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
80+
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
81+
u64 fault_address, char *insn, int insn_len,
82+
bool need_unprotect);
8083

8184
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
8285
{

0 commit comments

Comments
 (0)