@@ -1073,7 +1073,8 @@ static int init_dma_desc_rings(struct net_device *dev)
1073
1073
else
1074
1074
p = priv -> dma_tx + i ;
1075
1075
p -> des2 = 0 ;
1076
- priv -> tx_skbuff_dma [i ] = 0 ;
1076
+ priv -> tx_skbuff_dma [i ].buf = 0 ;
1077
+ priv -> tx_skbuff_dma [i ].map_as_page = false;
1077
1078
priv -> tx_skbuff [i ] = NULL ;
1078
1079
}
1079
1080
@@ -1112,17 +1113,24 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
1112
1113
else
1113
1114
p = priv -> dma_tx + i ;
1114
1115
1115
- if (priv -> tx_skbuff_dma [i ]) {
1116
- dma_unmap_single (priv -> device ,
1117
- priv -> tx_skbuff_dma [i ],
1118
- priv -> hw -> desc -> get_tx_len (p ),
1119
- DMA_TO_DEVICE );
1120
- priv -> tx_skbuff_dma [i ] = 0 ;
1116
+ if (priv -> tx_skbuff_dma [i ].buf ) {
1117
+ if (priv -> tx_skbuff_dma [i ].map_as_page )
1118
+ dma_unmap_page (priv -> device ,
1119
+ priv -> tx_skbuff_dma [i ].buf ,
1120
+ priv -> hw -> desc -> get_tx_len (p ),
1121
+ DMA_TO_DEVICE );
1122
+ else
1123
+ dma_unmap_single (priv -> device ,
1124
+ priv -> tx_skbuff_dma [i ].buf ,
1125
+ priv -> hw -> desc -> get_tx_len (p ),
1126
+ DMA_TO_DEVICE );
1121
1127
}
1122
1128
1123
1129
if (priv -> tx_skbuff [i ] != NULL ) {
1124
1130
dev_kfree_skb_any (priv -> tx_skbuff [i ]);
1125
1131
priv -> tx_skbuff [i ] = NULL ;
1132
+ priv -> tx_skbuff_dma [i ].buf = 0 ;
1133
+ priv -> tx_skbuff_dma [i ].map_as_page = false;
1126
1134
}
1127
1135
}
1128
1136
}
@@ -1143,7 +1151,8 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1143
1151
if (!priv -> rx_skbuff )
1144
1152
goto err_rx_skbuff ;
1145
1153
1146
- priv -> tx_skbuff_dma = kmalloc_array (txsize , sizeof (dma_addr_t ),
1154
+ priv -> tx_skbuff_dma = kmalloc_array (txsize ,
1155
+ sizeof (* priv -> tx_skbuff_dma ),
1147
1156
GFP_KERNEL );
1148
1157
if (!priv -> tx_skbuff_dma )
1149
1158
goto err_tx_skbuff_dma ;
@@ -1305,12 +1314,19 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
1305
1314
pr_debug ("%s: curr %d, dirty %d\n" , __func__ ,
1306
1315
priv -> cur_tx , priv -> dirty_tx );
1307
1316
1308
- if (likely (priv -> tx_skbuff_dma [entry ])) {
1309
- dma_unmap_single (priv -> device ,
1310
- priv -> tx_skbuff_dma [entry ],
1311
- priv -> hw -> desc -> get_tx_len (p ),
1312
- DMA_TO_DEVICE );
1313
- priv -> tx_skbuff_dma [entry ] = 0 ;
1317
+ if (likely (priv -> tx_skbuff_dma [entry ].buf )) {
1318
+ if (priv -> tx_skbuff_dma [entry ].map_as_page )
1319
+ dma_unmap_page (priv -> device ,
1320
+ priv -> tx_skbuff_dma [entry ].buf ,
1321
+ priv -> hw -> desc -> get_tx_len (p ),
1322
+ DMA_TO_DEVICE );
1323
+ else
1324
+ dma_unmap_single (priv -> device ,
1325
+ priv -> tx_skbuff_dma [entry ].buf ,
1326
+ priv -> hw -> desc -> get_tx_len (p ),
1327
+ DMA_TO_DEVICE );
1328
+ priv -> tx_skbuff_dma [entry ].buf = 0 ;
1329
+ priv -> tx_skbuff_dma [entry ].map_as_page = false;
1314
1330
}
1315
1331
priv -> hw -> mode -> clean_desc3 (priv , p );
1316
1332
@@ -1905,12 +1921,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1905
1921
if (likely (!is_jumbo )) {
1906
1922
desc -> des2 = dma_map_single (priv -> device , skb -> data ,
1907
1923
nopaged_len , DMA_TO_DEVICE );
1908
- priv -> tx_skbuff_dma [entry ] = desc -> des2 ;
1924
+ if (dma_mapping_error (priv -> device , desc -> des2 ))
1925
+ goto dma_map_err ;
1926
+ priv -> tx_skbuff_dma [entry ].buf = desc -> des2 ;
1909
1927
priv -> hw -> desc -> prepare_tx_desc (desc , 1 , nopaged_len ,
1910
1928
csum_insertion , priv -> mode );
1911
1929
} else {
1912
1930
desc = first ;
1913
1931
entry = priv -> hw -> mode -> jumbo_frm (priv , skb , csum_insertion );
1932
+ if (unlikely (entry < 0 ))
1933
+ goto dma_map_err ;
1914
1934
}
1915
1935
1916
1936
for (i = 0 ; i < nfrags ; i ++ ) {
@@ -1926,7 +1946,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1926
1946
1927
1947
desc -> des2 = skb_frag_dma_map (priv -> device , frag , 0 , len ,
1928
1948
DMA_TO_DEVICE );
1929
- priv -> tx_skbuff_dma [entry ] = desc -> des2 ;
1949
+ if (dma_mapping_error (priv -> device , desc -> des2 ))
1950
+ goto dma_map_err ; /* should reuse desc w/o issues */
1951
+
1952
+ priv -> tx_skbuff_dma [entry ].buf = desc -> des2 ;
1953
+ priv -> tx_skbuff_dma [entry ].map_as_page = true;
1930
1954
priv -> hw -> desc -> prepare_tx_desc (desc , 0 , len , csum_insertion ,
1931
1955
priv -> mode );
1932
1956
wmb ();
@@ -1993,7 +2017,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1993
2017
priv -> hw -> dma -> enable_dma_transmission (priv -> ioaddr );
1994
2018
1995
2019
spin_unlock (& priv -> tx_lock );
2020
+ return NETDEV_TX_OK ;
1996
2021
2022
+ dma_map_err :
2023
+ dev_err (priv -> device , "Tx dma map failed\n" );
2024
+ dev_kfree_skb (skb );
2025
+ priv -> dev -> stats .tx_dropped ++ ;
1997
2026
return NETDEV_TX_OK ;
1998
2027
}
1999
2028
@@ -2046,7 +2075,12 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2046
2075
priv -> rx_skbuff_dma [entry ] =
2047
2076
dma_map_single (priv -> device , skb -> data , bfsize ,
2048
2077
DMA_FROM_DEVICE );
2049
-
2078
+ if (dma_mapping_error (priv -> device ,
2079
+ priv -> rx_skbuff_dma [entry ])) {
2080
+ dev_err (priv -> device , "Rx dma map failed\n" );
2081
+ dev_kfree_skb (skb );
2082
+ break ;
2083
+ }
2050
2084
p -> des2 = priv -> rx_skbuff_dma [entry ];
2051
2085
2052
2086
priv -> hw -> mode -> refill_desc3 (priv , p );
0 commit comments