@@ -228,9 +228,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
228
228
struct ravb_desc * desc = NULL ;
229
229
int rx_ring_size = sizeof (* rx_desc ) * priv -> num_rx_ring [q ];
230
230
int tx_ring_size = sizeof (* tx_desc ) * priv -> num_tx_ring [q ];
231
- struct sk_buff * skb ;
232
231
dma_addr_t dma_addr ;
233
- void * buffer ;
234
232
int i ;
235
233
236
234
priv -> cur_rx [q ] = 0 ;
@@ -241,41 +239,28 @@ static void ravb_ring_format(struct net_device *ndev, int q)
241
239
memset (priv -> rx_ring [q ], 0 , rx_ring_size );
242
240
/* Build RX ring buffer */
243
241
for (i = 0 ; i < priv -> num_rx_ring [q ]; i ++ ) {
244
- priv -> rx_skb [q ][i ] = NULL ;
245
- skb = netdev_alloc_skb (ndev , PKT_BUF_SZ + RAVB_ALIGN - 1 );
246
- if (!skb )
247
- break ;
248
- ravb_set_buffer_align (skb );
249
242
/* RX descriptor */
250
243
rx_desc = & priv -> rx_ring [q ][i ];
251
244
/* The size of the buffer should be on 16-byte boundary. */
252
245
rx_desc -> ds_cc = cpu_to_le16 (ALIGN (PKT_BUF_SZ , 16 ));
253
- dma_addr = dma_map_single (& ndev -> dev , skb -> data ,
246
+ dma_addr = dma_map_single (& ndev -> dev , priv -> rx_skb [ q ][ i ] -> data ,
254
247
ALIGN (PKT_BUF_SZ , 16 ),
255
248
DMA_FROM_DEVICE );
256
- if ( dma_mapping_error ( & ndev -> dev , dma_addr )) {
257
- dev_kfree_skb ( skb );
258
- break ;
259
- }
260
- priv -> rx_skb [ q ][ i ] = skb ;
249
+ /* We just set the data size to 0 for a failed mapping which
250
+ * should prevent DMA from happening...
251
+ */
252
+ if ( dma_mapping_error ( & ndev -> dev , dma_addr ))
253
+ rx_desc -> ds_cc = cpu_to_le16 ( 0 ) ;
261
254
rx_desc -> dptr = cpu_to_le32 (dma_addr );
262
255
rx_desc -> die_dt = DT_FEMPTY ;
263
256
}
264
257
rx_desc = & priv -> rx_ring [q ][i ];
265
258
rx_desc -> dptr = cpu_to_le32 ((u32 )priv -> rx_desc_dma [q ]);
266
259
rx_desc -> die_dt = DT_LINKFIX ; /* type */
267
- priv -> dirty_rx [q ] = (u32 )(i - priv -> num_rx_ring [q ]);
268
260
269
261
memset (priv -> tx_ring [q ], 0 , tx_ring_size );
270
262
/* Build TX ring buffer */
271
263
for (i = 0 ; i < priv -> num_tx_ring [q ]; i ++ ) {
272
- priv -> tx_skb [q ][i ] = NULL ;
273
- priv -> tx_buffers [q ][i ] = NULL ;
274
- buffer = kmalloc (PKT_BUF_SZ + RAVB_ALIGN - 1 , GFP_KERNEL );
275
- if (!buffer )
276
- break ;
277
- /* Aligned TX buffer */
278
- priv -> tx_buffers [q ][i ] = buffer ;
279
264
tx_desc = & priv -> tx_ring [q ][i ];
280
265
tx_desc -> die_dt = DT_EEMPTY ;
281
266
}
@@ -298,7 +283,10 @@ static void ravb_ring_format(struct net_device *ndev, int q)
298
283
static int ravb_ring_init (struct net_device * ndev , int q )
299
284
{
300
285
struct ravb_private * priv = netdev_priv (ndev );
286
+ struct sk_buff * skb ;
301
287
int ring_size ;
288
+ void * buffer ;
289
+ int i ;
302
290
303
291
/* Allocate RX and TX skb rings */
304
292
priv -> rx_skb [q ] = kcalloc (priv -> num_rx_ring [q ],
@@ -308,12 +296,28 @@ static int ravb_ring_init(struct net_device *ndev, int q)
308
296
if (!priv -> rx_skb [q ] || !priv -> tx_skb [q ])
309
297
goto error ;
310
298
299
+ for (i = 0 ; i < priv -> num_rx_ring [q ]; i ++ ) {
300
+ skb = netdev_alloc_skb (ndev , PKT_BUF_SZ + RAVB_ALIGN - 1 );
301
+ if (!skb )
302
+ goto error ;
303
+ ravb_set_buffer_align (skb );
304
+ priv -> rx_skb [q ][i ] = skb ;
305
+ }
306
+
311
307
/* Allocate rings for the aligned buffers */
312
308
priv -> tx_buffers [q ] = kcalloc (priv -> num_tx_ring [q ],
313
309
sizeof (* priv -> tx_buffers [q ]), GFP_KERNEL );
314
310
if (!priv -> tx_buffers [q ])
315
311
goto error ;
316
312
313
+ for (i = 0 ; i < priv -> num_tx_ring [q ]; i ++ ) {
314
+ buffer = kmalloc (PKT_BUF_SZ + RAVB_ALIGN - 1 , GFP_KERNEL );
315
+ if (!buffer )
316
+ goto error ;
317
+ /* Aligned TX buffer */
318
+ priv -> tx_buffers [q ][i ] = buffer ;
319
+ }
320
+
317
321
/* Allocate all RX descriptors. */
318
322
ring_size = sizeof (struct ravb_ex_rx_desc ) * (priv -> num_rx_ring [q ] + 1 );
319
323
priv -> rx_ring [q ] = dma_alloc_coherent (NULL , ring_size ,
@@ -524,6 +528,10 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
524
528
if (-- boguscnt < 0 )
525
529
break ;
526
530
531
+ /* We use 0-byte descriptors to mark the DMA mapping errors */
532
+ if (!pkt_len )
533
+ continue ;
534
+
527
535
if (desc_status & MSC_MC )
528
536
stats -> multicast ++ ;
529
537
@@ -587,10 +595,11 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
587
595
le16_to_cpu (desc -> ds_cc ),
588
596
DMA_FROM_DEVICE );
589
597
skb_checksum_none_assert (skb );
590
- if (dma_mapping_error (& ndev -> dev , dma_addr )) {
591
- dev_kfree_skb_any (skb );
592
- break ;
593
- }
598
+ /* We just set the data size to 0 for a failed mapping
599
+ * which should prevent DMA from happening...
600
+ */
601
+ if (dma_mapping_error (& ndev -> dev , dma_addr ))
602
+ desc -> ds_cc = cpu_to_le16 (0 );
594
603
desc -> dptr = cpu_to_le32 (dma_addr );
595
604
priv -> rx_skb [q ][entry ] = skb ;
596
605
}
0 commit comments