Skip to content

Commit d8b4891

Browse files
Sergei Shtylyovdavem330
authored andcommitted
ravb: fix ring memory allocation
The driver is written as if it can adapt to a low memory situation allocating less RX skbs and TX aligned buffers than the respective RX/TX ring sizes. In reality though the driver would malfunction in this case. Stop being overly smart and just fail in such situation -- this is achieved by moving the memory allocation from ravb_ring_format() to ravb_ring_init(). We leave dma_map_single() calls in place but make their failure non-fatal by marking the corresponding RX descriptors with zero data size which should prevent DMA to an invalid addresses. Signed-off-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent a46fa26 commit d8b4891

File tree

1 file changed

+34
-25
lines changed

1 file changed

+34
-25
lines changed

drivers/net/ethernet/renesas/ravb_main.c

Lines changed: 34 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -228,9 +228,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
228228
struct ravb_desc *desc = NULL;
229229
int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
230230
int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
231-
struct sk_buff *skb;
232231
dma_addr_t dma_addr;
233-
void *buffer;
234232
int i;
235233

236234
priv->cur_rx[q] = 0;
@@ -241,41 +239,28 @@ static void ravb_ring_format(struct net_device *ndev, int q)
241239
memset(priv->rx_ring[q], 0, rx_ring_size);
242240
/* Build RX ring buffer */
243241
for (i = 0; i < priv->num_rx_ring[q]; i++) {
244-
priv->rx_skb[q][i] = NULL;
245-
skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
246-
if (!skb)
247-
break;
248-
ravb_set_buffer_align(skb);
249242
/* RX descriptor */
250243
rx_desc = &priv->rx_ring[q][i];
251244
/* The size of the buffer should be on 16-byte boundary. */
252245
rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
253-
dma_addr = dma_map_single(&ndev->dev, skb->data,
246+
dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data,
254247
ALIGN(PKT_BUF_SZ, 16),
255248
DMA_FROM_DEVICE);
256-
if (dma_mapping_error(&ndev->dev, dma_addr)) {
257-
dev_kfree_skb(skb);
258-
break;
259-
}
260-
priv->rx_skb[q][i] = skb;
249+
/* We just set the data size to 0 for a failed mapping which
250+
* should prevent DMA from happening...
251+
*/
252+
if (dma_mapping_error(&ndev->dev, dma_addr))
253+
rx_desc->ds_cc = cpu_to_le16(0);
261254
rx_desc->dptr = cpu_to_le32(dma_addr);
262255
rx_desc->die_dt = DT_FEMPTY;
263256
}
264257
rx_desc = &priv->rx_ring[q][i];
265258
rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
266259
rx_desc->die_dt = DT_LINKFIX; /* type */
267-
priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]);
268260

269261
memset(priv->tx_ring[q], 0, tx_ring_size);
270262
/* Build TX ring buffer */
271263
for (i = 0; i < priv->num_tx_ring[q]; i++) {
272-
priv->tx_skb[q][i] = NULL;
273-
priv->tx_buffers[q][i] = NULL;
274-
buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
275-
if (!buffer)
276-
break;
277-
/* Aligned TX buffer */
278-
priv->tx_buffers[q][i] = buffer;
279264
tx_desc = &priv->tx_ring[q][i];
280265
tx_desc->die_dt = DT_EEMPTY;
281266
}
@@ -298,7 +283,10 @@ static void ravb_ring_format(struct net_device *ndev, int q)
298283
static int ravb_ring_init(struct net_device *ndev, int q)
299284
{
300285
struct ravb_private *priv = netdev_priv(ndev);
286+
struct sk_buff *skb;
301287
int ring_size;
288+
void *buffer;
289+
int i;
302290

303291
/* Allocate RX and TX skb rings */
304292
priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@@ -308,12 +296,28 @@ static int ravb_ring_init(struct net_device *ndev, int q)
308296
if (!priv->rx_skb[q] || !priv->tx_skb[q])
309297
goto error;
310298

299+
for (i = 0; i < priv->num_rx_ring[q]; i++) {
300+
skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
301+
if (!skb)
302+
goto error;
303+
ravb_set_buffer_align(skb);
304+
priv->rx_skb[q][i] = skb;
305+
}
306+
311307
/* Allocate rings for the aligned buffers */
312308
priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
313309
sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
314310
if (!priv->tx_buffers[q])
315311
goto error;
316312

313+
for (i = 0; i < priv->num_tx_ring[q]; i++) {
314+
buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
315+
if (!buffer)
316+
goto error;
317+
/* Aligned TX buffer */
318+
priv->tx_buffers[q][i] = buffer;
319+
}
320+
317321
/* Allocate all RX descriptors. */
318322
ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
319323
priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
@@ -524,6 +528,10 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
524528
if (--boguscnt < 0)
525529
break;
526530

531+
/* We use 0-byte descriptors to mark the DMA mapping errors */
532+
if (!pkt_len)
533+
continue;
534+
527535
if (desc_status & MSC_MC)
528536
stats->multicast++;
529537

@@ -587,10 +595,11 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
587595
le16_to_cpu(desc->ds_cc),
588596
DMA_FROM_DEVICE);
589597
skb_checksum_none_assert(skb);
590-
if (dma_mapping_error(&ndev->dev, dma_addr)) {
591-
dev_kfree_skb_any(skb);
592-
break;
593-
}
598+
/* We just set the data size to 0 for a failed mapping
599+
* which should prevent DMA from happening...
600+
*/
601+
if (dma_mapping_error(&ndev->dev, dma_addr))
602+
desc->ds_cc = cpu_to_le16(0);
594603
desc->dptr = cpu_to_le32(dma_addr);
595604
priv->rx_skb[q][entry] = skb;
596605
}

0 commit comments

Comments
 (0)