Skip to content

Commit 39e50d9

Browse files
Zhu Yanjundavem330
authored andcommitted
forcedeth: optimize the xmit/rx with unlikely
In the xmit/rx fastpath, the function dma_map_single rarely fails. Therefore, add an unlikely() optimization to this error check conditional. Signed-off-by: Zhu Yanjun <yanjun.zhu@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 1f8d31d commit 39e50d9

File tree

1 file changed

+14
-12
lines changed

1 file changed

+14
-12
lines changed

drivers/net/ethernet/nvidia/forcedeth.c

Lines changed: 14 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1817,8 +1817,8 @@ static int nv_alloc_rx(struct net_device *dev)
18171817
skb->data,
18181818
skb_tailroom(skb),
18191819
DMA_FROM_DEVICE);
1820-
if (dma_mapping_error(&np->pci_dev->dev,
1821-
np->put_rx_ctx->dma)) {
1820+
if (unlikely(dma_mapping_error(&np->pci_dev->dev,
1821+
np->put_rx_ctx->dma))) {
18221822
kfree_skb(skb);
18231823
goto packet_dropped;
18241824
}
@@ -1858,8 +1858,8 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
18581858
skb->data,
18591859
skb_tailroom(skb),
18601860
DMA_FROM_DEVICE);
1861-
if (dma_mapping_error(&np->pci_dev->dev,
1862-
np->put_rx_ctx->dma)) {
1861+
if (unlikely(dma_mapping_error(&np->pci_dev->dev,
1862+
np->put_rx_ctx->dma))) {
18631863
kfree_skb(skb);
18641864
goto packet_dropped;
18651865
}
@@ -2227,8 +2227,8 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
22272227
np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
22282228
skb->data + offset, bcnt,
22292229
DMA_TO_DEVICE);
2230-
if (dma_mapping_error(&np->pci_dev->dev,
2231-
np->put_tx_ctx->dma)) {
2230+
if (unlikely(dma_mapping_error(&np->pci_dev->dev,
2231+
np->put_tx_ctx->dma))) {
22322232
/* on DMA mapping error - drop the packet */
22332233
dev_kfree_skb_any(skb);
22342234
u64_stats_update_begin(&np->swstats_tx_syncp);
@@ -2268,7 +2268,8 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
22682268
frag, offset,
22692269
bcnt,
22702270
DMA_TO_DEVICE);
2271-
if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) {
2271+
if (unlikely(dma_mapping_error(&np->pci_dev->dev,
2272+
np->put_tx_ctx->dma))) {
22722273

22732274
/* Unwind the mapped fragments */
22742275
do {
@@ -2377,8 +2378,8 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
23772378
np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
23782379
skb->data + offset, bcnt,
23792380
DMA_TO_DEVICE);
2380-
if (dma_mapping_error(&np->pci_dev->dev,
2381-
np->put_tx_ctx->dma)) {
2381+
if (unlikely(dma_mapping_error(&np->pci_dev->dev,
2382+
np->put_tx_ctx->dma))) {
23822383
/* on DMA mapping error - drop the packet */
23832384
dev_kfree_skb_any(skb);
23842385
u64_stats_update_begin(&np->swstats_tx_syncp);
@@ -2419,7 +2420,8 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
24192420
bcnt,
24202421
DMA_TO_DEVICE);
24212422

2422-
if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) {
2423+
if (unlikely(dma_mapping_error(&np->pci_dev->dev,
2424+
np->put_tx_ctx->dma))) {
24232425

24242426
/* Unwind the mapped fragments */
24252427
do {
@@ -5075,8 +5077,8 @@ static int nv_loopback_test(struct net_device *dev)
50755077
test_dma_addr = dma_map_single(&np->pci_dev->dev, tx_skb->data,
50765078
skb_tailroom(tx_skb),
50775079
DMA_FROM_DEVICE);
5078-
if (dma_mapping_error(&np->pci_dev->dev,
5079-
test_dma_addr)) {
5080+
if (unlikely(dma_mapping_error(&np->pci_dev->dev,
5081+
test_dma_addr))) {
50805082
dev_kfree_skb_any(tx_skb);
50815083
goto out;
50825084
}

0 commit comments

Comments
 (0)