Skip to content

Commit 59120e0

Browse files
westeridavem330
authored andcommitted
thunderbolt: Use spinlock in NHI serialization
This is needed because ring polling functionality can be called from atomic contexts when networking and other high-speed traffic is transferred over a Thunderbolt cable. Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Reviewed-by: Michael Jamet <michael.jamet@intel.com> Reviewed-by: Yehezkel Bernat <yehezkel.bernat@intel.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 22b7de1 commit 59120e0

File tree

2 files changed

+42
-35
lines changed

2 files changed

+42
-35
lines changed

drivers/thunderbolt/nhi.c

Lines changed: 41 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -327,21 +327,9 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
327327
if (transmit && hop == RING_E2E_UNUSED_HOPID)
328328
return NULL;
329329

330-
mutex_lock(&nhi->lock);
331-
if (hop >= nhi->hop_count) {
332-
dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
333-
goto err;
334-
}
335-
if (transmit && nhi->tx_rings[hop]) {
336-
dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop);
337-
goto err;
338-
} else if (!transmit && nhi->rx_rings[hop]) {
339-
dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop);
340-
goto err;
341-
}
342330
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
343331
if (!ring)
344-
goto err;
332+
return NULL;
345333

346334
spin_lock_init(&ring->lock);
347335
INIT_LIST_HEAD(&ring->queue);
@@ -359,25 +347,45 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
359347
ring->tail = 0;
360348
ring->running = false;
361349

362-
if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
363-
goto err;
364-
365350
ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
366351
size * sizeof(*ring->descriptors),
367352
&ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
368353
if (!ring->descriptors)
369-
goto err;
354+
goto err_free_ring;
370355

356+
if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
357+
goto err_free_descs;
358+
359+
spin_lock_irq(&nhi->lock);
360+
if (hop >= nhi->hop_count) {
361+
dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
362+
goto err_release_msix;
363+
}
364+
if (transmit && nhi->tx_rings[hop]) {
365+
dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop);
366+
goto err_release_msix;
367+
} else if (!transmit && nhi->rx_rings[hop]) {
368+
dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop);
369+
goto err_release_msix;
370+
}
371371
if (transmit)
372372
nhi->tx_rings[hop] = ring;
373373
else
374374
nhi->rx_rings[hop] = ring;
375-
mutex_unlock(&nhi->lock);
375+
spin_unlock_irq(&nhi->lock);
376+
376377
return ring;
377378

378-
err:
379+
err_release_msix:
380+
spin_unlock_irq(&nhi->lock);
381+
ring_release_msix(ring);
382+
err_free_descs:
383+
dma_free_coherent(&ring->nhi->pdev->dev,
384+
ring->size * sizeof(*ring->descriptors),
385+
ring->descriptors, ring->descriptors_dma);
386+
err_free_ring:
379387
kfree(ring);
380-
mutex_unlock(&nhi->lock);
388+
381389
return NULL;
382390
}
383391

@@ -421,8 +429,8 @@ void tb_ring_start(struct tb_ring *ring)
421429
u16 frame_size;
422430
u32 flags;
423431

424-
mutex_lock(&ring->nhi->lock);
425-
spin_lock_irq(&ring->lock);
432+
spin_lock_irq(&ring->nhi->lock);
433+
spin_lock(&ring->lock);
426434
if (ring->nhi->going_away)
427435
goto err;
428436
if (ring->running) {
@@ -469,8 +477,8 @@ void tb_ring_start(struct tb_ring *ring)
469477
ring_interrupt_active(ring, true);
470478
ring->running = true;
471479
err:
472-
spin_unlock_irq(&ring->lock);
473-
mutex_unlock(&ring->nhi->lock);
480+
spin_unlock(&ring->lock);
481+
spin_unlock_irq(&ring->nhi->lock);
474482
}
475483
EXPORT_SYMBOL_GPL(tb_ring_start);
476484

@@ -489,8 +497,8 @@ EXPORT_SYMBOL_GPL(tb_ring_start);
489497
*/
490498
void tb_ring_stop(struct tb_ring *ring)
491499
{
492-
mutex_lock(&ring->nhi->lock);
493-
spin_lock_irq(&ring->lock);
500+
spin_lock_irq(&ring->nhi->lock);
501+
spin_lock(&ring->lock);
494502
dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
495503
RING_TYPE(ring), ring->hop);
496504
if (ring->nhi->going_away)
@@ -511,8 +519,8 @@ void tb_ring_stop(struct tb_ring *ring)
511519
ring->running = false;
512520

513521
err:
514-
spin_unlock_irq(&ring->lock);
515-
mutex_unlock(&ring->nhi->lock);
522+
spin_unlock(&ring->lock);
523+
spin_unlock_irq(&ring->nhi->lock);
516524

517525
/*
518526
* schedule ring->work to invoke callbacks on all remaining frames.
@@ -534,7 +542,7 @@ EXPORT_SYMBOL_GPL(tb_ring_stop);
534542
*/
535543
void tb_ring_free(struct tb_ring *ring)
536544
{
537-
mutex_lock(&ring->nhi->lock);
545+
spin_lock_irq(&ring->nhi->lock);
538546
/*
539547
* Dissociate the ring from the NHI. This also ensures that
540548
* nhi_interrupt_work cannot reschedule ring->work.
@@ -564,7 +572,7 @@ void tb_ring_free(struct tb_ring *ring)
564572
RING_TYPE(ring),
565573
ring->hop);
566574

567-
mutex_unlock(&ring->nhi->lock);
575+
spin_unlock_irq(&ring->nhi->lock);
568576
/**
569577
* ring->work can no longer be scheduled (it is scheduled only
570578
* by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
@@ -639,7 +647,7 @@ static void nhi_interrupt_work(struct work_struct *work)
639647
int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
640648
struct tb_ring *ring;
641649

642-
mutex_lock(&nhi->lock);
650+
spin_lock_irq(&nhi->lock);
643651

644652
/*
645653
* Starting at REG_RING_NOTIFY_BASE there are three status bitfields
@@ -677,7 +685,7 @@ static void nhi_interrupt_work(struct work_struct *work)
677685
/* we do not check ring->running, this is done in ring->work */
678686
schedule_work(&ring->work);
679687
}
680-
mutex_unlock(&nhi->lock);
688+
spin_unlock_irq(&nhi->lock);
681689
}
682690

683691
static irqreturn_t nhi_msi(int irq, void *data)
@@ -767,7 +775,6 @@ static void nhi_shutdown(struct tb_nhi *nhi)
767775
devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
768776
flush_work(&nhi->interrupt_work);
769777
}
770-
mutex_destroy(&nhi->lock);
771778
ida_destroy(&nhi->msix_ida);
772779
}
773780

@@ -856,7 +863,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
856863
return res;
857864
}
858865

859-
mutex_init(&nhi->lock);
866+
spin_lock_init(&nhi->lock);
860867

861868
pci_set_master(pdev);
862869

include/linux/thunderbolt.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -415,7 +415,7 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc)
415415
* @hop_count: Number of rings (end point hops) supported by NHI.
416416
*/
417417
struct tb_nhi {
418-
struct mutex lock;
418+
spinlock_t lock;
419419
struct pci_dev *pdev;
420420
void __iomem *iobase;
421421
struct tb_ring **tx_rings;

0 commit comments

Comments
 (0)