Skip to content

Commit ec1f127

Browse files
committed
sunhme: Add DMA mapping error checks.
Reported-by: Meelis Roos <mroos@linux.ee> Tested-by: Meelis Roos <mroos@linux.ee> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 9f93567 commit ec1f127

File tree

1 file changed

+57
-5
lines changed

1 file changed

+57
-5
lines changed

drivers/net/ethernet/sun/sunhme.c

Lines changed: 57 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1262,6 +1262,7 @@ static void happy_meal_init_rings(struct happy_meal *hp)
12621262
HMD(("init rxring, "));
12631263
for (i = 0; i < RX_RING_SIZE; i++) {
12641264
struct sk_buff *skb;
1265+
u32 mapping;
12651266

12661267
skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
12671268
if (!skb) {
@@ -1272,10 +1273,16 @@ static void happy_meal_init_rings(struct happy_meal *hp)
12721273

12731274
/* Because we reserve afterwards. */
12741275
skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
1276+
mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
1277+
DMA_FROM_DEVICE);
1278+
if (dma_mapping_error(hp->dma_dev, mapping)) {
1279+
dev_kfree_skb_any(skb);
1280+
hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1281+
continue;
1282+
}
12751283
hme_write_rxd(hp, &hb->happy_meal_rxd[i],
12761284
(RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
1277-
dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
1278-
DMA_FROM_DEVICE));
1285+
mapping);
12791286
skb_reserve(skb, RX_OFFSET);
12801287
}
12811288

@@ -2020,20 +2027,29 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
20202027
skb = hp->rx_skbs[elem];
20212028
if (len > RX_COPY_THRESHOLD) {
20222029
struct sk_buff *new_skb;
2030+
u32 mapping;
20232031

20242032
/* Now refill the entry, if we can. */
20252033
new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
20262034
if (new_skb == NULL) {
20272035
drops++;
20282036
goto drop_it;
20292037
}
2038+
skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
2039+
mapping = dma_map_single(hp->dma_dev, new_skb->data,
2040+
RX_BUF_ALLOC_SIZE,
2041+
DMA_FROM_DEVICE);
2042+
if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2043+
dev_kfree_skb_any(new_skb);
2044+
drops++;
2045+
goto drop_it;
2046+
}
2047+
20302048
dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
20312049
hp->rx_skbs[elem] = new_skb;
2032-
skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
20332050
hme_write_rxd(hp, this,
20342051
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2035-
dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE,
2036-
DMA_FROM_DEVICE));
2052+
mapping);
20372053
skb_reserve(new_skb, RX_OFFSET);
20382054

20392055
/* Trim the original skb for the netif. */
@@ -2248,6 +2264,25 @@ static void happy_meal_tx_timeout(struct net_device *dev)
22482264
netif_wake_queue(dev);
22492265
}
22502266

2267+
static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
2268+
u32 first_len, u32 first_entry, u32 entry)
2269+
{
2270+
struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
2271+
2272+
dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
2273+
2274+
first_entry = NEXT_TX(first_entry);
2275+
while (first_entry != entry) {
2276+
struct happy_meal_txd *this = &txbase[first_entry];
2277+
u32 addr, len;
2278+
2279+
addr = hme_read_desc32(hp, &this->tx_addr);
2280+
len = hme_read_desc32(hp, &this->tx_flags);
2281+
len &= TXFLAG_SIZE;
2282+
dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
2283+
}
2284+
}
2285+
22512286
static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
22522287
struct net_device *dev)
22532288
{
@@ -2284,6 +2319,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
22842319

22852320
len = skb->len;
22862321
mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
2322+
if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
2323+
goto out_dma_error;
22872324
tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
22882325
hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
22892326
(tx_flags | (len & TXFLAG_SIZE)),
@@ -2299,6 +2336,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
22992336
first_len = skb_headlen(skb);
23002337
first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
23012338
DMA_TO_DEVICE);
2339+
if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
2340+
goto out_dma_error;
23022341
entry = NEXT_TX(entry);
23032342

23042343
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
@@ -2308,6 +2347,11 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
23082347
len = skb_frag_size(this_frag);
23092348
mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
23102349
0, len, DMA_TO_DEVICE);
2350+
if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2351+
unmap_partial_tx_skb(hp, first_mapping, first_len,
2352+
first_entry, entry);
2353+
goto out_dma_error;
2354+
}
23112355
this_txflags = tx_flags;
23122356
if (frag == skb_shinfo(skb)->nr_frags - 1)
23132357
this_txflags |= TXFLAG_EOP;
@@ -2333,6 +2377,14 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
23332377

23342378
tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
23352379
return NETDEV_TX_OK;
2380+
2381+
out_dma_error:
2382+
hp->tx_skbs[hp->tx_new] = NULL;
2383+
spin_unlock_irq(&hp->happy_lock);
2384+
2385+
dev_kfree_skb_any(skb);
2386+
dev->stats.tx_dropped++;
2387+
return NETDEV_TX_OK;
23362388
}
23372389

23382390
static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)

0 commit comments

Comments
 (0)