@@ -54,11 +54,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
54
54
* When taking more than one ap_list_lock at the same time, always take the
55
55
* lowest numbered VCPU's ap_list_lock first, so:
56
56
* vcpuX->vcpu_id < vcpuY->vcpu_id:
57
- * spin_lock (vcpuX->arch.vgic_cpu.ap_list_lock);
58
- * spin_lock (vcpuY->arch.vgic_cpu.ap_list_lock);
57
+ * raw_spin_lock (vcpuX->arch.vgic_cpu.ap_list_lock);
58
+ * raw_spin_lock (vcpuY->arch.vgic_cpu.ap_list_lock);
59
59
*
60
60
* Since the VGIC must support injecting virtual interrupts from ISRs, we have
61
- * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer
61
+ * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer
62
62
* spinlocks for any lock that may be taken while injecting an interrupt.
63
63
*/
64
64
@@ -351,7 +351,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
351
351
352
352
/* someone can do stuff here, which we re-check below */
353
353
354
- spin_lock_irqsave (& vcpu -> arch .vgic_cpu .ap_list_lock , flags );
354
+ raw_spin_lock_irqsave (& vcpu -> arch .vgic_cpu .ap_list_lock , flags );
355
355
raw_spin_lock (& irq -> irq_lock );
356
356
357
357
/*
@@ -368,7 +368,8 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
368
368
369
369
if (unlikely (irq -> vcpu || vcpu != vgic_target_oracle (irq ))) {
370
370
raw_spin_unlock (& irq -> irq_lock );
371
- spin_unlock_irqrestore (& vcpu -> arch .vgic_cpu .ap_list_lock , flags );
371
+ raw_spin_unlock_irqrestore (& vcpu -> arch .vgic_cpu .ap_list_lock ,
372
+ flags );
372
373
373
374
raw_spin_lock_irqsave (& irq -> irq_lock , flags );
374
375
goto retry ;
@@ -383,7 +384,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
383
384
irq -> vcpu = vcpu ;
384
385
385
386
raw_spin_unlock (& irq -> irq_lock );
386
- spin_unlock_irqrestore (& vcpu -> arch .vgic_cpu .ap_list_lock , flags );
387
+ raw_spin_unlock_irqrestore (& vcpu -> arch .vgic_cpu .ap_list_lock , flags );
387
388
388
389
kvm_make_request (KVM_REQ_IRQ_PENDING , vcpu );
389
390
kvm_vcpu_kick (vcpu );
@@ -597,7 +598,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
597
598
DEBUG_SPINLOCK_BUG_ON (!irqs_disabled ());
598
599
599
600
retry :
600
- spin_lock (& vgic_cpu -> ap_list_lock );
601
+ raw_spin_lock (& vgic_cpu -> ap_list_lock );
601
602
602
603
list_for_each_entry_safe (irq , tmp , & vgic_cpu -> ap_list_head , ap_list ) {
603
604
struct kvm_vcpu * target_vcpu , * vcpuA , * vcpuB ;
@@ -638,7 +639,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
638
639
/* This interrupt looks like it has to be migrated. */
639
640
640
641
raw_spin_unlock (& irq -> irq_lock );
641
- spin_unlock (& vgic_cpu -> ap_list_lock );
642
+ raw_spin_unlock (& vgic_cpu -> ap_list_lock );
642
643
643
644
/*
644
645
* Ensure locking order by always locking the smallest
@@ -652,9 +653,9 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
652
653
vcpuB = vcpu ;
653
654
}
654
655
655
- spin_lock (& vcpuA -> arch .vgic_cpu .ap_list_lock );
656
- spin_lock_nested (& vcpuB -> arch .vgic_cpu .ap_list_lock ,
657
- SINGLE_DEPTH_NESTING );
656
+ raw_spin_lock (& vcpuA -> arch .vgic_cpu .ap_list_lock );
657
+ raw_spin_lock_nested (& vcpuB -> arch .vgic_cpu .ap_list_lock ,
658
+ SINGLE_DEPTH_NESTING );
658
659
raw_spin_lock (& irq -> irq_lock );
659
660
660
661
/*
@@ -676,8 +677,8 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
676
677
}
677
678
678
679
raw_spin_unlock (& irq -> irq_lock );
679
- spin_unlock (& vcpuB -> arch .vgic_cpu .ap_list_lock );
680
- spin_unlock (& vcpuA -> arch .vgic_cpu .ap_list_lock );
680
+ raw_spin_unlock (& vcpuB -> arch .vgic_cpu .ap_list_lock );
681
+ raw_spin_unlock (& vcpuA -> arch .vgic_cpu .ap_list_lock );
681
682
682
683
if (target_vcpu_needs_kick ) {
683
684
kvm_make_request (KVM_REQ_IRQ_PENDING , target_vcpu );
@@ -687,7 +688,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
687
688
goto retry ;
688
689
}
689
690
690
- spin_unlock (& vgic_cpu -> ap_list_lock );
691
+ raw_spin_unlock (& vgic_cpu -> ap_list_lock );
691
692
}
692
693
693
694
static inline void vgic_fold_lr_state (struct kvm_vcpu * vcpu )
@@ -872,9 +873,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
872
873
873
874
DEBUG_SPINLOCK_BUG_ON (!irqs_disabled ());
874
875
875
- spin_lock (& vcpu -> arch .vgic_cpu .ap_list_lock );
876
+ raw_spin_lock (& vcpu -> arch .vgic_cpu .ap_list_lock );
876
877
vgic_flush_lr_state (vcpu );
877
- spin_unlock (& vcpu -> arch .vgic_cpu .ap_list_lock );
878
+ raw_spin_unlock (& vcpu -> arch .vgic_cpu .ap_list_lock );
878
879
879
880
if (can_access_vgic_from_kernel ())
880
881
vgic_restore_state (vcpu );
@@ -918,7 +919,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
918
919
919
920
vgic_get_vmcr (vcpu , & vmcr );
920
921
921
- spin_lock_irqsave (& vgic_cpu -> ap_list_lock , flags );
922
+ raw_spin_lock_irqsave (& vgic_cpu -> ap_list_lock , flags );
922
923
923
924
list_for_each_entry (irq , & vgic_cpu -> ap_list_head , ap_list ) {
924
925
raw_spin_lock (& irq -> irq_lock );
@@ -931,7 +932,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
931
932
break ;
932
933
}
933
934
934
- spin_unlock_irqrestore (& vgic_cpu -> ap_list_lock , flags );
935
+ raw_spin_unlock_irqrestore (& vgic_cpu -> ap_list_lock , flags );
935
936
936
937
return pending ;
937
938
}
0 commit comments