Skip to content

Commit d1156b4

Browse files
khoroshilovdavem330
authored andcommitted
net: adaptec: starfire: add checks for dma mapping errors
init_ring(), refill_rx_ring() and start_tx() don't check if mapping dma memory succeed. The patch adds the checks and failure handling. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Alexey Khoroshilov <khoroshilov@ispras.ru> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent cf626c3 commit d1156b4

File tree

1 file changed

+43
-2
lines changed

1 file changed

+43
-2
lines changed

drivers/net/ethernet/adaptec/starfire.c

Lines changed: 43 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1152,6 +1152,12 @@ static void init_ring(struct net_device *dev)
11521152
if (skb == NULL)
11531153
break;
11541154
np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1155+
if (pci_dma_mapping_error(np->pci_dev,
1156+
np->rx_info[i].mapping)) {
1157+
dev_kfree_skb(skb);
1158+
np->rx_info[i].skb = NULL;
1159+
break;
1160+
}
11551161
/* Grrr, we cannot offset to correctly align the IP header. */
11561162
np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
11571163
}
@@ -1182,8 +1188,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
11821188
{
11831189
struct netdev_private *np = netdev_priv(dev);
11841190
unsigned int entry;
1191+
unsigned int prev_tx;
11851192
u32 status;
1186-
int i;
1193+
int i, j;
11871194

11881195
/*
11891196
* be cautious here, wrapping the queue has weird semantics
@@ -1201,6 +1208,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
12011208
}
12021209
#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
12031210

1211+
prev_tx = np->cur_tx;
12041212
entry = np->cur_tx % TX_RING_SIZE;
12051213
for (i = 0; i < skb_num_frags(skb); i++) {
12061214
int wrap_ring = 0;
@@ -1234,6 +1242,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
12341242
skb_frag_size(this_frag),
12351243
PCI_DMA_TODEVICE);
12361244
}
1245+
if (pci_dma_mapping_error(np->pci_dev,
1246+
np->tx_info[entry].mapping)) {
1247+
dev->stats.tx_dropped++;
1248+
goto err_out;
1249+
}
12371250

12381251
np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
12391252
np->tx_ring[entry].status = cpu_to_le32(status);
@@ -1268,8 +1281,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
12681281
netif_stop_queue(dev);
12691282

12701283
return NETDEV_TX_OK;
1271-
}
12721284

1285+
err_out:
1286+
entry = prev_tx % TX_RING_SIZE;
1287+
np->tx_info[entry].skb = NULL;
1288+
if (i > 0) {
1289+
pci_unmap_single(np->pci_dev,
1290+
np->tx_info[entry].mapping,
1291+
skb_first_frag_len(skb),
1292+
PCI_DMA_TODEVICE);
1293+
np->tx_info[entry].mapping = 0;
1294+
entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
1295+
for (j = 1; j < i; j++) {
1296+
pci_unmap_single(np->pci_dev,
1297+
np->tx_info[entry].mapping,
1298+
skb_frag_size(
1299+
&skb_shinfo(skb)->frags[j-1]),
1300+
PCI_DMA_TODEVICE);
1301+
entry++;
1302+
}
1303+
}
1304+
dev_kfree_skb_any(skb);
1305+
np->cur_tx = prev_tx;
1306+
return NETDEV_TX_OK;
1307+
}
12731308

12741309
/* The interrupt handler does all of the Rx thread work and cleans up
12751310
after the Tx thread. */
@@ -1569,6 +1604,12 @@ static void refill_rx_ring(struct net_device *dev)
15691604
break; /* Better luck next round. */
15701605
np->rx_info[entry].mapping =
15711606
pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1607+
if (pci_dma_mapping_error(np->pci_dev,
1608+
np->rx_info[entry].mapping)) {
1609+
dev_kfree_skb(skb);
1610+
np->rx_info[entry].skb = NULL;
1611+
break;
1612+
}
15721613
np->rx_ring[entry].rxaddr =
15731614
cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
15741615
}

0 commit comments

Comments
 (0)