@@ -195,12 +195,8 @@ static void ravb_ring_free(struct net_device *ndev, int q)
195
195
priv -> tx_skb [q ] = NULL ;
196
196
197
197
/* Free aligned TX buffers */
198
- if (priv -> tx_buffers [q ]) {
199
- for (i = 0 ; i < priv -> num_tx_ring [q ]; i ++ )
200
- kfree (priv -> tx_buffers [q ][i ]);
201
- }
202
- kfree (priv -> tx_buffers [q ]);
203
- priv -> tx_buffers [q ] = NULL ;
198
+ kfree (priv -> tx_align [q ]);
199
+ priv -> tx_align [q ] = NULL ;
204
200
205
201
if (priv -> rx_ring [q ]) {
206
202
ring_size = sizeof (struct ravb_ex_rx_desc ) *
@@ -212,7 +208,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
212
208
213
209
if (priv -> tx_ring [q ]) {
214
210
ring_size = sizeof (struct ravb_tx_desc ) *
215
- (priv -> num_tx_ring [q ] + 1 );
211
+ (priv -> num_tx_ring [q ] * NUM_TX_DESC + 1 );
216
212
dma_free_coherent (NULL , ring_size , priv -> tx_ring [q ],
217
213
priv -> tx_desc_dma [q ]);
218
214
priv -> tx_ring [q ] = NULL ;
@@ -227,7 +223,8 @@ static void ravb_ring_format(struct net_device *ndev, int q)
227
223
struct ravb_tx_desc * tx_desc ;
228
224
struct ravb_desc * desc ;
229
225
int rx_ring_size = sizeof (* rx_desc ) * priv -> num_rx_ring [q ];
230
- int tx_ring_size = sizeof (* tx_desc ) * priv -> num_tx_ring [q ];
226
+ int tx_ring_size = sizeof (* tx_desc ) * priv -> num_tx_ring [q ] *
227
+ NUM_TX_DESC ;
231
228
dma_addr_t dma_addr ;
232
229
int i ;
233
230
@@ -260,11 +257,12 @@ static void ravb_ring_format(struct net_device *ndev, int q)
260
257
261
258
memset (priv -> tx_ring [q ], 0 , tx_ring_size );
262
259
/* Build TX ring buffer */
263
- for (i = 0 ; i < priv -> num_tx_ring [q ]; i ++ ) {
264
- tx_desc = & priv -> tx_ring [q ][i ];
260
+ for (i = 0 , tx_desc = priv -> tx_ring [q ]; i < priv -> num_tx_ring [q ];
261
+ i ++ , tx_desc ++ ) {
262
+ tx_desc -> die_dt = DT_EEMPTY ;
263
+ tx_desc ++ ;
265
264
tx_desc -> die_dt = DT_EEMPTY ;
266
265
}
267
- tx_desc = & priv -> tx_ring [q ][i ];
268
266
tx_desc -> dptr = cpu_to_le32 ((u32 )priv -> tx_desc_dma [q ]);
269
267
tx_desc -> die_dt = DT_LINKFIX ; /* type */
270
268
@@ -285,7 +283,6 @@ static int ravb_ring_init(struct net_device *ndev, int q)
285
283
struct ravb_private * priv = netdev_priv (ndev );
286
284
struct sk_buff * skb ;
287
285
int ring_size ;
288
- void * buffer ;
289
286
int i ;
290
287
291
288
/* Allocate RX and TX skb rings */
@@ -305,19 +302,11 @@ static int ravb_ring_init(struct net_device *ndev, int q)
305
302
}
306
303
307
304
/* Allocate rings for the aligned buffers */
308
- priv -> tx_buffers [q ] = kcalloc ( priv -> num_tx_ring [q ],
309
- sizeof ( * priv -> tx_buffers [ q ]) , GFP_KERNEL );
310
- if (!priv -> tx_buffers [q ])
305
+ priv -> tx_align [q ] = kmalloc ( DPTR_ALIGN * priv -> num_tx_ring [q ] +
306
+ DPTR_ALIGN - 1 , GFP_KERNEL );
307
+ if (!priv -> tx_align [q ])
311
308
goto error ;
312
309
313
- for (i = 0 ; i < priv -> num_tx_ring [q ]; i ++ ) {
314
- buffer = kmalloc (PKT_BUF_SZ + RAVB_ALIGN - 1 , GFP_KERNEL );
315
- if (!buffer )
316
- goto error ;
317
- /* Aligned TX buffer */
318
- priv -> tx_buffers [q ][i ] = buffer ;
319
- }
320
-
321
310
/* Allocate all RX descriptors. */
322
311
ring_size = sizeof (struct ravb_ex_rx_desc ) * (priv -> num_rx_ring [q ] + 1 );
323
312
priv -> rx_ring [q ] = dma_alloc_coherent (NULL , ring_size ,
@@ -329,7 +318,8 @@ static int ravb_ring_init(struct net_device *ndev, int q)
329
318
priv -> dirty_rx [q ] = 0 ;
330
319
331
320
/* Allocate all TX descriptors. */
332
- ring_size = sizeof (struct ravb_tx_desc ) * (priv -> num_tx_ring [q ] + 1 );
321
+ ring_size = sizeof (struct ravb_tx_desc ) *
322
+ (priv -> num_tx_ring [q ] * NUM_TX_DESC + 1 );
333
323
priv -> tx_ring [q ] = dma_alloc_coherent (NULL , ring_size ,
334
324
& priv -> tx_desc_dma [q ],
335
325
GFP_KERNEL );
@@ -443,22 +433,27 @@ static int ravb_tx_free(struct net_device *ndev, int q)
443
433
u32 size ;
444
434
445
435
for (; priv -> cur_tx [q ] - priv -> dirty_tx [q ] > 0 ; priv -> dirty_tx [q ]++ ) {
446
- entry = priv -> dirty_tx [q ] % priv -> num_tx_ring [q ];
436
+ entry = priv -> dirty_tx [q ] % (priv -> num_tx_ring [q ] *
437
+ NUM_TX_DESC );
447
438
desc = & priv -> tx_ring [q ][entry ];
448
439
if (desc -> die_dt != DT_FEMPTY )
449
440
break ;
450
441
/* Descriptor type must be checked before all other reads */
451
442
dma_rmb ();
452
443
size = le16_to_cpu (desc -> ds_tagl ) & TX_DS ;
453
444
/* Free the original skb. */
454
- if (priv -> tx_skb [q ][entry ]) {
445
+ if (priv -> tx_skb [q ][entry / NUM_TX_DESC ]) {
455
446
dma_unmap_single (& ndev -> dev , le32_to_cpu (desc -> dptr ),
456
447
size , DMA_TO_DEVICE );
457
- dev_kfree_skb_any (priv -> tx_skb [q ][entry ]);
458
- priv -> tx_skb [q ][entry ] = NULL ;
448
+ /* Last packet descriptor? */
449
+ if (entry % NUM_TX_DESC == NUM_TX_DESC - 1 ) {
450
+ entry /= NUM_TX_DESC ;
451
+ dev_kfree_skb_any (priv -> tx_skb [q ][entry ]);
452
+ priv -> tx_skb [q ][entry ] = NULL ;
453
+ stats -> tx_packets ++ ;
454
+ }
459
455
free_num ++ ;
460
456
}
461
- stats -> tx_packets ++ ;
462
457
stats -> tx_bytes += size ;
463
458
desc -> die_dt = DT_EEMPTY ;
464
459
}
@@ -1284,37 +1279,53 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1284
1279
u32 dma_addr ;
1285
1280
void * buffer ;
1286
1281
u32 entry ;
1282
+ u32 len ;
1287
1283
1288
1284
spin_lock_irqsave (& priv -> lock , flags );
1289
- if (priv -> cur_tx [q ] - priv -> dirty_tx [q ] >= priv -> num_tx_ring [q ]) {
1285
+ if (priv -> cur_tx [q ] - priv -> dirty_tx [q ] > (priv -> num_tx_ring [q ] - 1 ) *
1286
+ NUM_TX_DESC ) {
1290
1287
netif_err (priv , tx_queued , ndev ,
1291
1288
"still transmitting with the full ring!\n" );
1292
1289
netif_stop_subqueue (ndev , q );
1293
1290
spin_unlock_irqrestore (& priv -> lock , flags );
1294
1291
return NETDEV_TX_BUSY ;
1295
1292
}
1296
- entry = priv -> cur_tx [q ] % priv -> num_tx_ring [q ];
1297
- priv -> tx_skb [q ][entry ] = skb ;
1293
+ entry = priv -> cur_tx [q ] % ( priv -> num_tx_ring [q ] * NUM_TX_DESC ) ;
1294
+ priv -> tx_skb [q ][entry / NUM_TX_DESC ] = skb ;
1298
1295
1299
1296
if (skb_put_padto (skb , ETH_ZLEN ))
1300
1297
goto drop ;
1301
1298
1302
- buffer = PTR_ALIGN (priv -> tx_buffers [q ][ entry ], RAVB_ALIGN );
1303
- memcpy ( buffer , skb -> data , skb -> len ) ;
1304
- desc = & priv -> tx_ring [ q ][ entry ] ;
1305
- desc -> ds_tagl = cpu_to_le16 ( skb -> len );
1306
- dma_addr = dma_map_single (& ndev -> dev , buffer , skb -> len , DMA_TO_DEVICE );
1299
+ buffer = PTR_ALIGN (priv -> tx_align [q ], DPTR_ALIGN ) +
1300
+ entry / NUM_TX_DESC * DPTR_ALIGN ;
1301
+ len = PTR_ALIGN ( skb -> data , DPTR_ALIGN ) - skb -> data ;
1302
+ memcpy ( buffer , skb -> data , len );
1303
+ dma_addr = dma_map_single (& ndev -> dev , buffer , len , DMA_TO_DEVICE );
1307
1304
if (dma_mapping_error (& ndev -> dev , dma_addr ))
1308
1305
goto drop ;
1306
+
1307
+ desc = & priv -> tx_ring [q ][entry ];
1308
+ desc -> ds_tagl = cpu_to_le16 (len );
1309
+ desc -> dptr = cpu_to_le32 (dma_addr );
1310
+
1311
+ buffer = skb -> data + len ;
1312
+ len = skb -> len - len ;
1313
+ dma_addr = dma_map_single (& ndev -> dev , buffer , len , DMA_TO_DEVICE );
1314
+ if (dma_mapping_error (& ndev -> dev , dma_addr ))
1315
+ goto unmap ;
1316
+
1317
+ desc ++ ;
1318
+ desc -> ds_tagl = cpu_to_le16 (len );
1309
1319
desc -> dptr = cpu_to_le32 (dma_addr );
1310
1320
1311
1321
/* TX timestamp required */
1312
1322
if (q == RAVB_NC ) {
1313
1323
ts_skb = kmalloc (sizeof (* ts_skb ), GFP_ATOMIC );
1314
1324
if (!ts_skb ) {
1315
- dma_unmap_single (& ndev -> dev , dma_addr , skb -> len ,
1325
+ desc -- ;
1326
+ dma_unmap_single (& ndev -> dev , dma_addr , len ,
1316
1327
DMA_TO_DEVICE );
1317
- goto drop ;
1328
+ goto unmap ;
1318
1329
}
1319
1330
ts_skb -> skb = skb ;
1320
1331
ts_skb -> tag = priv -> ts_skb_tag ++ ;
@@ -1330,23 +1341,28 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1330
1341
1331
1342
/* Descriptor type must be set after all the above writes */
1332
1343
dma_wmb ();
1333
- desc -> die_dt = DT_FSINGLE ;
1344
+ desc -> die_dt = DT_FEND ;
1345
+ desc -- ;
1346
+ desc -> die_dt = DT_FSTART ;
1334
1347
1335
1348
ravb_write (ndev , ravb_read (ndev , TCCR ) | (TCCR_TSRQ0 << q ), TCCR );
1336
1349
1337
- priv -> cur_tx [q ]++ ;
1338
- if (priv -> cur_tx [q ] - priv -> dirty_tx [q ] >= priv -> num_tx_ring [ q ] &&
1339
- !ravb_tx_free (ndev , q ))
1350
+ priv -> cur_tx [q ] += NUM_TX_DESC ;
1351
+ if (priv -> cur_tx [q ] - priv -> dirty_tx [q ] >
1352
+ ( priv -> num_tx_ring [ q ] - 1 ) * NUM_TX_DESC && !ravb_tx_free (ndev , q ))
1340
1353
netif_stop_subqueue (ndev , q );
1341
1354
1342
1355
exit :
1343
1356
mmiowb ();
1344
1357
spin_unlock_irqrestore (& priv -> lock , flags );
1345
1358
return NETDEV_TX_OK ;
1346
1359
1360
+ unmap :
1361
+ dma_unmap_single (& ndev -> dev , le32_to_cpu (desc -> dptr ),
1362
+ le16_to_cpu (desc -> ds_tagl ), DMA_TO_DEVICE );
1347
1363
drop :
1348
1364
dev_kfree_skb_any (skb );
1349
- priv -> tx_skb [q ][entry ] = NULL ;
1365
+ priv -> tx_skb [q ][entry / NUM_TX_DESC ] = NULL ;
1350
1366
goto exit ;
1351
1367
}
1352
1368
0 commit comments