@@ -1817,8 +1817,8 @@ static int nv_alloc_rx(struct net_device *dev)
1817
1817
skb -> data ,
1818
1818
skb_tailroom (skb ),
1819
1819
DMA_FROM_DEVICE );
1820
- if (dma_mapping_error (& np -> pci_dev -> dev ,
1821
- np -> put_rx_ctx -> dma )) {
1820
+ if (unlikely ( dma_mapping_error (& np -> pci_dev -> dev ,
1821
+ np -> put_rx_ctx -> dma ) )) {
1822
1822
kfree_skb (skb );
1823
1823
goto packet_dropped ;
1824
1824
}
@@ -1858,8 +1858,8 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
1858
1858
skb -> data ,
1859
1859
skb_tailroom (skb ),
1860
1860
DMA_FROM_DEVICE );
1861
- if (dma_mapping_error (& np -> pci_dev -> dev ,
1862
- np -> put_rx_ctx -> dma )) {
1861
+ if (unlikely ( dma_mapping_error (& np -> pci_dev -> dev ,
1862
+ np -> put_rx_ctx -> dma ) )) {
1863
1863
kfree_skb (skb );
1864
1864
goto packet_dropped ;
1865
1865
}
@@ -2227,8 +2227,8 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2227
2227
np -> put_tx_ctx -> dma = dma_map_single (& np -> pci_dev -> dev ,
2228
2228
skb -> data + offset , bcnt ,
2229
2229
DMA_TO_DEVICE );
2230
- if (dma_mapping_error (& np -> pci_dev -> dev ,
2231
- np -> put_tx_ctx -> dma )) {
2230
+ if (unlikely ( dma_mapping_error (& np -> pci_dev -> dev ,
2231
+ np -> put_tx_ctx -> dma ) )) {
2232
2232
/* on DMA mapping error - drop the packet */
2233
2233
dev_kfree_skb_any (skb );
2234
2234
u64_stats_update_begin (& np -> swstats_tx_syncp );
@@ -2268,7 +2268,8 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2268
2268
frag , offset ,
2269
2269
bcnt ,
2270
2270
DMA_TO_DEVICE );
2271
- if (dma_mapping_error (& np -> pci_dev -> dev , np -> put_tx_ctx -> dma )) {
2271
+ if (unlikely (dma_mapping_error (& np -> pci_dev -> dev ,
2272
+ np -> put_tx_ctx -> dma ))) {
2272
2273
2273
2274
/* Unwind the mapped fragments */
2274
2275
do {
@@ -2377,8 +2378,8 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2377
2378
np -> put_tx_ctx -> dma = dma_map_single (& np -> pci_dev -> dev ,
2378
2379
skb -> data + offset , bcnt ,
2379
2380
DMA_TO_DEVICE );
2380
- if (dma_mapping_error (& np -> pci_dev -> dev ,
2381
- np -> put_tx_ctx -> dma )) {
2381
+ if (unlikely ( dma_mapping_error (& np -> pci_dev -> dev ,
2382
+ np -> put_tx_ctx -> dma ) )) {
2382
2383
/* on DMA mapping error - drop the packet */
2383
2384
dev_kfree_skb_any (skb );
2384
2385
u64_stats_update_begin (& np -> swstats_tx_syncp );
@@ -2419,7 +2420,8 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2419
2420
bcnt ,
2420
2421
DMA_TO_DEVICE );
2421
2422
2422
- if (dma_mapping_error (& np -> pci_dev -> dev , np -> put_tx_ctx -> dma )) {
2423
+ if (unlikely (dma_mapping_error (& np -> pci_dev -> dev ,
2424
+ np -> put_tx_ctx -> dma ))) {
2423
2425
2424
2426
/* Unwind the mapped fragments */
2425
2427
do {
@@ -5075,8 +5077,8 @@ static int nv_loopback_test(struct net_device *dev)
5075
5077
test_dma_addr = dma_map_single (& np -> pci_dev -> dev , tx_skb -> data ,
5076
5078
skb_tailroom (tx_skb ),
5077
5079
DMA_FROM_DEVICE );
5078
- if (dma_mapping_error (& np -> pci_dev -> dev ,
5079
- test_dma_addr )) {
5080
+ if (unlikely ( dma_mapping_error (& np -> pci_dev -> dev ,
5081
+ test_dma_addr ) )) {
5080
5082
dev_kfree_skb_any (tx_skb );
5081
5083
goto out ;
5082
5084
}
0 commit comments