Skip to content

Commit e6c67d8

Browse files
Liran Alonbonzini
authored andcommitted
KVM: nVMX: Wake blocked vCPU in guest-mode if pending interrupt in virtual APICv
In case L1 do not intercept L2 HLT or enter L2 in HLT activity-state, it is possible for a vCPU to be blocked while it is in guest-mode. According to Intel SDM 26.6.5 Interrupt-Window Exiting and Virtual-Interrupt Delivery: "These events wake the logical processor if it just entered the HLT state because of a VM entry". Therefore, if L1 enters L2 in HLT activity-state and L2 has a pending deliverable interrupt in vmcs12->guest_intr_status.RVI, then the vCPU should be waken from the HLT state and injected with the interrupt. In addition, if while the vCPU is blocked (while it is in guest-mode), it receives a nested posted-interrupt, then the vCPU should also be waken and injected with the posted interrupt. To handle these cases, this patch enhances kvm_vcpu_has_events() to also check if there is a pending interrupt in L2 virtual APICv provided by L1. That is, it evaluates if there is a pending virtual interrupt for L2 by checking RVI[7:4] > VPPR[7:4] as specified in Intel SDM 29.2.1 Evaluation of Pending Interrupts. Note that this also handles the case of nested posted-interrupt by the fact RVI is updated in vmx_complete_nested_posted_interrupt() which is called from kvm_vcpu_check_block() -> kvm_arch_vcpu_runnable() -> kvm_vcpu_running() -> vmx_check_nested_events() -> vmx_complete_nested_posted_interrupt(). Reviewed-by: Nikita Leshenko <nikita.leshchenko@oracle.com> Reviewed-by: Darren Kenny <darren.kenny@oracle.com> Signed-off-by: Liran Alon <liran.alon@oracle.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 5bea512 commit e6c67d8

File tree

3 files changed

+32
-1
lines changed

3 files changed

+32
-1
lines changed

arch/x86/include/asm/kvm_host.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1022,6 +1022,7 @@ struct kvm_x86_ops {
10221022
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
10231023
void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
10241024
void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
1025+
bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
10251026
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
10261027
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
10271028
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);

arch/x86/kvm/vmx.c

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6189,6 +6189,27 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
61896189
nested_mark_vmcs12_pages_dirty(vcpu);
61906190
}
61916191

6192+
static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
6193+
{
6194+
struct vcpu_vmx *vmx = to_vmx(vcpu);
6195+
void *vapic_page;
6196+
u32 vppr;
6197+
int rvi;
6198+
6199+
if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
6200+
!nested_cpu_has_vid(get_vmcs12(vcpu)) ||
6201+
WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
6202+
return false;
6203+
6204+
rvi = vmcs_read16(GUEST_INTR_STATUS) & 0xff;
6205+
6206+
vapic_page = kmap(vmx->nested.virtual_apic_page);
6207+
vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
6208+
kunmap(vmx->nested.virtual_apic_page);
6209+
6210+
return ((rvi & 0xf0) > (vppr & 0xf0));
6211+
}
6212+
61926213
static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
61936214
bool nested)
61946215
{
@@ -14129,6 +14150,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
1412914150
.apicv_post_state_restore = vmx_apicv_post_state_restore,
1413014151
.hwapic_irr_update = vmx_hwapic_irr_update,
1413114152
.hwapic_isr_update = vmx_hwapic_isr_update,
14153+
.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
1413214154
.sync_pir_to_irr = vmx_sync_pir_to_irr,
1413314155
.deliver_posted_interrupt = vmx_deliver_posted_interrupt,
1413414156

arch/x86/kvm/x86.c

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9206,6 +9206,13 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
92069206
kvm_page_track_flush_slot(kvm, slot);
92079207
}
92089208

9209+
static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
9210+
{
9211+
return (is_guest_mode(vcpu) &&
9212+
kvm_x86_ops->guest_apic_has_interrupt &&
9213+
kvm_x86_ops->guest_apic_has_interrupt(vcpu));
9214+
}
9215+
92099216
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
92109217
{
92119218
if (!list_empty_careful(&vcpu->async_pf.done))
@@ -9230,7 +9237,8 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
92309237
return true;
92319238

92329239
if (kvm_arch_interrupt_allowed(vcpu) &&
9233-
kvm_cpu_has_interrupt(vcpu))
9240+
(kvm_cpu_has_interrupt(vcpu) ||
9241+
kvm_guest_apic_has_interrupt(vcpu)))
92349242
return true;
92359243

92369244
if (kvm_hv_has_stimer_pending(vcpu))

0 commit comments

Comments
 (0)