Skip to content

Commit e08d8d2

Browse files
Julien Thierrychristofferdall-arm
authored andcommitted
KVM: arm/arm64: vgic: Make vgic_cpu->ap_list_lock a raw_spinlock
vgic_cpu->ap_list_lock must always be taken with interrupts disabled as it is used in interrupt context. For configurations such as PREEMPT_RT_FULL, this means that it should be a raw_spinlock since RT spinlocks are interruptible. Signed-off-by: Julien Thierry <julien.thierry@arm.com> Acked-by: Christoffer Dall <christoffer.dall@arm.com> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
1 parent fc3bc47 commit e08d8d2

File tree

3 files changed

+21
-20
lines changed

3 files changed

+21
-20
lines changed

include/kvm/arm_vgic.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -307,7 +307,7 @@ struct vgic_cpu {
307307
unsigned int used_lrs;
308308
struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
309309

310-
spinlock_t ap_list_lock; /* Protects the ap_list */
310+
raw_spinlock_t ap_list_lock; /* Protects the ap_list */
311311

312312
/*
313313
* List of IRQs that this VCPU should consider because they are either

virt/kvm/arm/vgic/vgic-init.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -206,7 +206,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
206206
vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF;
207207

208208
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
209-
spin_lock_init(&vgic_cpu->ap_list_lock);
209+
raw_spin_lock_init(&vgic_cpu->ap_list_lock);
210210

211211
/*
212212
* Enable and configure all SGIs to be edge-triggered and

virt/kvm/arm/vgic/vgic.c

Lines changed: 19 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -54,11 +54,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
5454
* When taking more than one ap_list_lock at the same time, always take the
5555
* lowest numbered VCPU's ap_list_lock first, so:
5656
* vcpuX->vcpu_id < vcpuY->vcpu_id:
57-
* spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
58-
* spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
57+
* raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
58+
* raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
5959
*
6060
* Since the VGIC must support injecting virtual interrupts from ISRs, we have
61-
* to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer
61+
* to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer
6262
* spinlocks for any lock that may be taken while injecting an interrupt.
6363
*/
6464

@@ -351,7 +351,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
351351

352352
/* someone can do stuff here, which we re-check below */
353353

354-
spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
354+
raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
355355
raw_spin_lock(&irq->irq_lock);
356356

357357
/*
@@ -368,7 +368,8 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
368368

369369
if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) {
370370
raw_spin_unlock(&irq->irq_lock);
371-
spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
371+
raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock,
372+
flags);
372373

373374
raw_spin_lock_irqsave(&irq->irq_lock, flags);
374375
goto retry;
@@ -383,7 +384,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
383384
irq->vcpu = vcpu;
384385

385386
raw_spin_unlock(&irq->irq_lock);
386-
spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
387+
raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags);
387388

388389
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
389390
kvm_vcpu_kick(vcpu);
@@ -597,7 +598,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
597598
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
598599

599600
retry:
600-
spin_lock(&vgic_cpu->ap_list_lock);
601+
raw_spin_lock(&vgic_cpu->ap_list_lock);
601602

602603
list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
603604
struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;
@@ -638,7 +639,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
638639
/* This interrupt looks like it has to be migrated. */
639640

640641
raw_spin_unlock(&irq->irq_lock);
641-
spin_unlock(&vgic_cpu->ap_list_lock);
642+
raw_spin_unlock(&vgic_cpu->ap_list_lock);
642643

643644
/*
644645
* Ensure locking order by always locking the smallest
@@ -652,9 +653,9 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
652653
vcpuB = vcpu;
653654
}
654655

655-
spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
656-
spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
657-
SINGLE_DEPTH_NESTING);
656+
raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
657+
raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
658+
SINGLE_DEPTH_NESTING);
658659
raw_spin_lock(&irq->irq_lock);
659660

660661
/*
@@ -676,8 +677,8 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
676677
}
677678

678679
raw_spin_unlock(&irq->irq_lock);
679-
spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
680-
spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
680+
raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
681+
raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
681682

682683
if (target_vcpu_needs_kick) {
683684
kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu);
@@ -687,7 +688,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
687688
goto retry;
688689
}
689690

690-
spin_unlock(&vgic_cpu->ap_list_lock);
691+
raw_spin_unlock(&vgic_cpu->ap_list_lock);
691692
}
692693

693694
static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
@@ -872,9 +873,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
872873

873874
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
874875

875-
spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
876+
raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
876877
vgic_flush_lr_state(vcpu);
877-
spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
878+
raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
878879

879880
if (can_access_vgic_from_kernel())
880881
vgic_restore_state(vcpu);
@@ -918,7 +919,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
918919

919920
vgic_get_vmcr(vcpu, &vmcr);
920921

921-
spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
922+
raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
922923

923924
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
924925
raw_spin_lock(&irq->irq_lock);
@@ -931,7 +932,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
931932
break;
932933
}
933934

934-
spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
935+
raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags);
935936

936937
return pending;
937938
}

0 commit comments

Comments
 (0)