@@ -2200,6 +2200,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2200
2200
struct ring_desc * start_tx ;
2201
2201
struct ring_desc * prev_tx ;
2202
2202
struct nv_skb_map * prev_tx_ctx ;
2203
+ struct nv_skb_map * tmp_tx_ctx = NULL , * start_tx_ctx = NULL ;
2203
2204
unsigned long flags ;
2204
2205
2205
2206
/* add fragments to entries count */
@@ -2261,12 +2262,31 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2261
2262
do {
2262
2263
prev_tx = put_tx ;
2263
2264
prev_tx_ctx = np -> put_tx_ctx ;
2265
+ if (!start_tx_ctx )
2266
+ start_tx_ctx = tmp_tx_ctx = np -> put_tx_ctx ;
2267
+
2264
2268
bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE ) ? NV_TX2_TSO_MAX_SIZE : frag_size ;
2265
2269
np -> put_tx_ctx -> dma = skb_frag_dma_map (
2266
2270
& np -> pci_dev -> dev ,
2267
2271
frag , offset ,
2268
2272
bcnt ,
2269
2273
DMA_TO_DEVICE );
2274
+ if (dma_mapping_error (& np -> pci_dev -> dev , np -> put_tx_ctx -> dma )) {
2275
+
2276
+ /* Unwind the mapped fragments */
2277
+ do {
2278
+ nv_unmap_txskb (np , start_tx_ctx );
2279
+ if (unlikely (tmp_tx_ctx ++ == np -> last_tx_ctx ))
2280
+ tmp_tx_ctx = np -> first_tx_ctx ;
2281
+ } while (tmp_tx_ctx != np -> put_tx_ctx );
2282
+ kfree_skb (skb );
2283
+ np -> put_tx_ctx = start_tx_ctx ;
2284
+ u64_stats_update_begin (& np -> swstats_tx_syncp );
2285
+ np -> stat_tx_dropped ++ ;
2286
+ u64_stats_update_end (& np -> swstats_tx_syncp );
2287
+ return NETDEV_TX_OK ;
2288
+ }
2289
+
2270
2290
np -> put_tx_ctx -> dma_len = bcnt ;
2271
2291
np -> put_tx_ctx -> dma_single = 0 ;
2272
2292
put_tx -> buf = cpu_to_le32 (np -> put_tx_ctx -> dma );
@@ -2327,7 +2347,8 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2327
2347
struct ring_desc_ex * start_tx ;
2328
2348
struct ring_desc_ex * prev_tx ;
2329
2349
struct nv_skb_map * prev_tx_ctx ;
2330
- struct nv_skb_map * start_tx_ctx ;
2350
+ struct nv_skb_map * start_tx_ctx = NULL ;
2351
+ struct nv_skb_map * tmp_tx_ctx = NULL ;
2331
2352
unsigned long flags ;
2332
2353
2333
2354
/* add fragments to entries count */
@@ -2392,11 +2413,29 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2392
2413
prev_tx = put_tx ;
2393
2414
prev_tx_ctx = np -> put_tx_ctx ;
2394
2415
bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE ) ? NV_TX2_TSO_MAX_SIZE : frag_size ;
2416
+ if (!start_tx_ctx )
2417
+ start_tx_ctx = tmp_tx_ctx = np -> put_tx_ctx ;
2395
2418
np -> put_tx_ctx -> dma = skb_frag_dma_map (
2396
2419
& np -> pci_dev -> dev ,
2397
2420
frag , offset ,
2398
2421
bcnt ,
2399
2422
DMA_TO_DEVICE );
2423
+
2424
+ if (dma_mapping_error (& np -> pci_dev -> dev , np -> put_tx_ctx -> dma )) {
2425
+
2426
+ /* Unwind the mapped fragments */
2427
+ do {
2428
+ nv_unmap_txskb (np , start_tx_ctx );
2429
+ if (unlikely (tmp_tx_ctx ++ == np -> last_tx_ctx ))
2430
+ tmp_tx_ctx = np -> first_tx_ctx ;
2431
+ } while (tmp_tx_ctx != np -> put_tx_ctx );
2432
+ kfree_skb (skb );
2433
+ np -> put_tx_ctx = start_tx_ctx ;
2434
+ u64_stats_update_begin (& np -> swstats_tx_syncp );
2435
+ np -> stat_tx_dropped ++ ;
2436
+ u64_stats_update_end (& np -> swstats_tx_syncp );
2437
+ return NETDEV_TX_OK ;
2438
+ }
2400
2439
np -> put_tx_ctx -> dma_len = bcnt ;
2401
2440
np -> put_tx_ctx -> dma_single = 0 ;
2402
2441
put_tx -> bufhigh = cpu_to_le32 (dma_high (np -> put_tx_ctx -> dma ));
0 commit comments