Skip to content

Commit 8fa3adb

Browse files
Julien Thierrychristofferdall-arm
authored andcommitted
KVM: arm/arm64: vgic: Make vgic_irq->irq_lock a raw_spinlock
vgic_irq->irq_lock must always be taken with interrupts disabled as it is used in interrupt context. For configurations such as PREEMPT_RT_FULL, this means that it should be a raw_spinlock since RT spinlocks are interruptible. Signed-off-by: Julien Thierry <julien.thierry@arm.com> Acked-by: Christoffer Dall <christoffer.dall@arm.com> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
1 parent 49a5785 commit 8fa3adb

File tree

10 files changed

+83
-84
lines changed

10 files changed

+83
-84
lines changed

include/kvm/arm_vgic.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ enum vgic_irq_config {
100100
};
101101

102102
struct vgic_irq {
103-
spinlock_t irq_lock; /* Protects the content of the struct */
103+
raw_spinlock_t irq_lock; /* Protects the content of the struct */
104104
struct list_head lpi_list; /* Used to link all LPIs together */
105105
struct list_head ap_list;
106106

virt/kvm/arm/vgic/vgic-debug.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -251,9 +251,9 @@ static int vgic_debug_show(struct seq_file *s, void *v)
251251
return 0;
252252
}
253253

254-
spin_lock_irqsave(&irq->irq_lock, flags);
254+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
255255
print_irq_state(s, irq, vcpu);
256-
spin_unlock_irqrestore(&irq->irq_lock, flags);
256+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
257257

258258
vgic_put_irq(kvm, irq);
259259
return 0;

virt/kvm/arm/vgic/vgic-init.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
171171

172172
irq->intid = i + VGIC_NR_PRIVATE_IRQS;
173173
INIT_LIST_HEAD(&irq->ap_list);
174-
spin_lock_init(&irq->irq_lock);
174+
raw_spin_lock_init(&irq->irq_lock);
175175
irq->vcpu = NULL;
176176
irq->target_vcpu = vcpu0;
177177
kref_init(&irq->refcount);
@@ -216,7 +216,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
216216
struct vgic_irq *irq = &vgic_cpu->private_irqs[i];
217217

218218
INIT_LIST_HEAD(&irq->ap_list);
219-
spin_lock_init(&irq->irq_lock);
219+
raw_spin_lock_init(&irq->irq_lock);
220220
irq->intid = i;
221221
irq->vcpu = NULL;
222222
irq->target_vcpu = vcpu;

virt/kvm/arm/vgic/vgic-its.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
6565

6666
INIT_LIST_HEAD(&irq->lpi_list);
6767
INIT_LIST_HEAD(&irq->ap_list);
68-
spin_lock_init(&irq->irq_lock);
68+
raw_spin_lock_init(&irq->irq_lock);
6969

7070
irq->config = VGIC_CONFIG_EDGE;
7171
kref_init(&irq->refcount);
@@ -287,7 +287,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
287287
if (ret)
288288
return ret;
289289

290-
spin_lock_irqsave(&irq->irq_lock, flags);
290+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
291291

292292
if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
293293
irq->priority = LPI_PROP_PRIORITY(prop);
@@ -299,7 +299,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
299299
}
300300
}
301301

302-
spin_unlock_irqrestore(&irq->irq_lock, flags);
302+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
303303

304304
if (irq->hw)
305305
return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
@@ -352,9 +352,9 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
352352
int ret = 0;
353353
unsigned long flags;
354354

355-
spin_lock_irqsave(&irq->irq_lock, flags);
355+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
356356
irq->target_vcpu = vcpu;
357-
spin_unlock_irqrestore(&irq->irq_lock, flags);
357+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
358358

359359
if (irq->hw) {
360360
struct its_vlpi_map map;
@@ -455,7 +455,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
455455
}
456456

457457
irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
458-
spin_lock_irqsave(&irq->irq_lock, flags);
458+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
459459
irq->pending_latch = pendmask & (1U << bit_nr);
460460
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
461461
vgic_put_irq(vcpu->kvm, irq);
@@ -612,7 +612,7 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
612612
return irq_set_irqchip_state(irq->host_irq,
613613
IRQCHIP_STATE_PENDING, true);
614614

615-
spin_lock_irqsave(&irq->irq_lock, flags);
615+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
616616
irq->pending_latch = true;
617617
vgic_queue_irq_unlock(kvm, irq, flags);
618618

virt/kvm/arm/vgic/vgic-mmio-v2.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
147147

148148
irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid);
149149

150-
spin_lock_irqsave(&irq->irq_lock, flags);
150+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
151151
irq->pending_latch = true;
152152
irq->source |= 1U << source_vcpu->vcpu_id;
153153

@@ -191,13 +191,13 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
191191
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i);
192192
int target;
193193

194-
spin_lock_irqsave(&irq->irq_lock, flags);
194+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
195195

196196
irq->targets = (val >> (i * 8)) & cpu_mask;
197197
target = irq->targets ? __ffs(irq->targets) : 0;
198198
irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
199199

200-
spin_unlock_irqrestore(&irq->irq_lock, flags);
200+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
201201
vgic_put_irq(vcpu->kvm, irq);
202202
}
203203
}
@@ -230,13 +230,13 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
230230
for (i = 0; i < len; i++) {
231231
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
232232

233-
spin_lock_irqsave(&irq->irq_lock, flags);
233+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
234234

235235
irq->source &= ~((val >> (i * 8)) & 0xff);
236236
if (!irq->source)
237237
irq->pending_latch = false;
238238

239-
spin_unlock_irqrestore(&irq->irq_lock, flags);
239+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
240240
vgic_put_irq(vcpu->kvm, irq);
241241
}
242242
}
@@ -252,15 +252,15 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
252252
for (i = 0; i < len; i++) {
253253
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
254254

255-
spin_lock_irqsave(&irq->irq_lock, flags);
255+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
256256

257257
irq->source |= (val >> (i * 8)) & 0xff;
258258

259259
if (irq->source) {
260260
irq->pending_latch = true;
261261
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
262262
} else {
263-
spin_unlock_irqrestore(&irq->irq_lock, flags);
263+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
264264
}
265265
vgic_put_irq(vcpu->kvm, irq);
266266
}

virt/kvm/arm/vgic/vgic-mmio-v3.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -169,13 +169,13 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
169169
if (!irq)
170170
return;
171171

172-
spin_lock_irqsave(&irq->irq_lock, flags);
172+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
173173

174174
/* We only care about and preserve Aff0, Aff1 and Aff2. */
175175
irq->mpidr = val & GENMASK(23, 0);
176176
irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
177177

178-
spin_unlock_irqrestore(&irq->irq_lock, flags);
178+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
179179
vgic_put_irq(vcpu->kvm, irq);
180180
}
181181

@@ -281,7 +281,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
281281
for (i = 0; i < len * 8; i++) {
282282
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
283283

284-
spin_lock_irqsave(&irq->irq_lock, flags);
284+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
285285
if (test_bit(i, &val)) {
286286
/*
287287
* pending_latch is set irrespective of irq type
@@ -292,7 +292,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
292292
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
293293
} else {
294294
irq->pending_latch = false;
295-
spin_unlock_irqrestore(&irq->irq_lock, flags);
295+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
296296
}
297297

298298
vgic_put_irq(vcpu->kvm, irq);
@@ -957,7 +957,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
957957

958958
irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
959959

960-
spin_lock_irqsave(&irq->irq_lock, flags);
960+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
961961

962962
/*
963963
* An access targetting Group0 SGIs can only generate
@@ -968,7 +968,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
968968
irq->pending_latch = true;
969969
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
970970
} else {
971-
spin_unlock_irqrestore(&irq->irq_lock, flags);
971+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
972972
}
973973

974974
vgic_put_irq(vcpu->kvm, irq);

virt/kvm/arm/vgic/vgic-mmio.c

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
7777
for (i = 0; i < len * 8; i++) {
7878
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
7979

80-
spin_lock_irqsave(&irq->irq_lock, flags);
80+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
8181
irq->group = !!(val & BIT(i));
8282
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
8383

@@ -120,7 +120,7 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
120120
for_each_set_bit(i, &val, len * 8) {
121121
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
122122

123-
spin_lock_irqsave(&irq->irq_lock, flags);
123+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
124124
irq->enabled = true;
125125
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
126126

@@ -139,11 +139,11 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
139139
for_each_set_bit(i, &val, len * 8) {
140140
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
141141

142-
spin_lock_irqsave(&irq->irq_lock, flags);
142+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
143143

144144
irq->enabled = false;
145145

146-
spin_unlock_irqrestore(&irq->irq_lock, flags);
146+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
147147
vgic_put_irq(vcpu->kvm, irq);
148148
}
149149
}
@@ -160,10 +160,10 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
160160
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
161161
unsigned long flags;
162162

163-
spin_lock_irqsave(&irq->irq_lock, flags);
163+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
164164
if (irq_is_pending(irq))
165165
value |= (1U << i);
166-
spin_unlock_irqrestore(&irq->irq_lock, flags);
166+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
167167

168168
vgic_put_irq(vcpu->kvm, irq);
169169
}
@@ -215,7 +215,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
215215
for_each_set_bit(i, &val, len * 8) {
216216
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
217217

218-
spin_lock_irqsave(&irq->irq_lock, flags);
218+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
219219
if (irq->hw)
220220
vgic_hw_irq_spending(vcpu, irq, is_uaccess);
221221
else
@@ -262,14 +262,14 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
262262
for_each_set_bit(i, &val, len * 8) {
263263
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
264264

265-
spin_lock_irqsave(&irq->irq_lock, flags);
265+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
266266

267267
if (irq->hw)
268268
vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
269269
else
270270
irq->pending_latch = false;
271271

272-
spin_unlock_irqrestore(&irq->irq_lock, flags);
272+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
273273
vgic_put_irq(vcpu->kvm, irq);
274274
}
275275
}
@@ -311,7 +311,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
311311
unsigned long flags;
312312
struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
313313

314-
spin_lock_irqsave(&irq->irq_lock, flags);
314+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
315315

316316
if (irq->hw) {
317317
vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
@@ -342,7 +342,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
342342
if (irq->active)
343343
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
344344
else
345-
spin_unlock_irqrestore(&irq->irq_lock, flags);
345+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
346346
}
347347

348348
/*
@@ -485,10 +485,10 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
485485
for (i = 0; i < len; i++) {
486486
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
487487

488-
spin_lock_irqsave(&irq->irq_lock, flags);
488+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
489489
/* Narrow the priority range to what we actually support */
490490
irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
491-
spin_unlock_irqrestore(&irq->irq_lock, flags);
491+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
492492

493493
vgic_put_irq(vcpu->kvm, irq);
494494
}
@@ -534,14 +534,14 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
534534
continue;
535535

536536
irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
537-
spin_lock_irqsave(&irq->irq_lock, flags);
537+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
538538

539539
if (test_bit(i * 2 + 1, &val))
540540
irq->config = VGIC_CONFIG_EDGE;
541541
else
542542
irq->config = VGIC_CONFIG_LEVEL;
543543

544-
spin_unlock_irqrestore(&irq->irq_lock, flags);
544+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
545545
vgic_put_irq(vcpu->kvm, irq);
546546
}
547547
}
@@ -590,12 +590,12 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
590590
* restore irq config before line level.
591591
*/
592592
new_level = !!(val & (1U << i));
593-
spin_lock_irqsave(&irq->irq_lock, flags);
593+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
594594
irq->line_level = new_level;
595595
if (new_level)
596596
vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
597597
else
598-
spin_unlock_irqrestore(&irq->irq_lock, flags);
598+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
599599

600600
vgic_put_irq(vcpu->kvm, irq);
601601
}

virt/kvm/arm/vgic/vgic-v2.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
8484

8585
irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
8686

87-
spin_lock(&irq->irq_lock);
87+
raw_spin_lock(&irq->irq_lock);
8888

8989
/* Always preserve the active bit */
9090
irq->active = !!(val & GICH_LR_ACTIVE_BIT);
@@ -127,7 +127,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
127127
vgic_irq_set_phys_active(irq, false);
128128
}
129129

130-
spin_unlock(&irq->irq_lock);
130+
raw_spin_unlock(&irq->irq_lock);
131131
vgic_put_irq(vcpu->kvm, irq);
132132
}
133133

virt/kvm/arm/vgic/vgic-v3.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
7676
if (!irq) /* An LPI could have been unmapped. */
7777
continue;
7878

79-
spin_lock(&irq->irq_lock);
79+
raw_spin_lock(&irq->irq_lock);
8080

8181
/* Always preserve the active bit */
8282
irq->active = !!(val & ICH_LR_ACTIVE_BIT);
@@ -119,7 +119,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
119119
vgic_irq_set_phys_active(irq, false);
120120
}
121121

122-
spin_unlock(&irq->irq_lock);
122+
raw_spin_unlock(&irq->irq_lock);
123123
vgic_put_irq(vcpu->kvm, irq);
124124
}
125125

@@ -347,9 +347,9 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
347347

348348
status = val & (1 << bit_nr);
349349

350-
spin_lock_irqsave(&irq->irq_lock, flags);
350+
raw_spin_lock_irqsave(&irq->irq_lock, flags);
351351
if (irq->target_vcpu != vcpu) {
352-
spin_unlock_irqrestore(&irq->irq_lock, flags);
352+
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
353353
goto retry;
354354
}
355355
irq->pending_latch = status;

0 commit comments

Comments
 (0)