@@ -1152,6 +1152,12 @@ static void init_ring(struct net_device *dev)
1152
1152
if (skb == NULL )
1153
1153
break ;
1154
1154
np -> rx_info [i ].mapping = pci_map_single (np -> pci_dev , skb -> data , np -> rx_buf_sz , PCI_DMA_FROMDEVICE );
1155
+ if (pci_dma_mapping_error (np -> pci_dev ,
1156
+ np -> rx_info [i ].mapping )) {
1157
+ dev_kfree_skb (skb );
1158
+ np -> rx_info [i ].skb = NULL ;
1159
+ break ;
1160
+ }
1155
1161
/* Grrr, we cannot offset to correctly align the IP header. */
1156
1162
np -> rx_ring [i ].rxaddr = cpu_to_dma (np -> rx_info [i ].mapping | RxDescValid );
1157
1163
}
@@ -1182,8 +1188,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1182
1188
{
1183
1189
struct netdev_private * np = netdev_priv (dev );
1184
1190
unsigned int entry ;
1191
+ unsigned int prev_tx ;
1185
1192
u32 status ;
1186
- int i ;
1193
+ int i , j ;
1187
1194
1188
1195
/*
1189
1196
* be cautious here, wrapping the queue has weird semantics
@@ -1201,6 +1208,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1201
1208
}
1202
1209
#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
1203
1210
1211
+ prev_tx = np -> cur_tx ;
1204
1212
entry = np -> cur_tx % TX_RING_SIZE ;
1205
1213
for (i = 0 ; i < skb_num_frags (skb ); i ++ ) {
1206
1214
int wrap_ring = 0 ;
@@ -1234,6 +1242,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1234
1242
skb_frag_size (this_frag ),
1235
1243
PCI_DMA_TODEVICE );
1236
1244
}
1245
+ if (pci_dma_mapping_error (np -> pci_dev ,
1246
+ np -> tx_info [entry ].mapping )) {
1247
+ dev -> stats .tx_dropped ++ ;
1248
+ goto err_out ;
1249
+ }
1237
1250
1238
1251
np -> tx_ring [entry ].addr = cpu_to_dma (np -> tx_info [entry ].mapping );
1239
1252
np -> tx_ring [entry ].status = cpu_to_le32 (status );
@@ -1268,8 +1281,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1268
1281
netif_stop_queue (dev );
1269
1282
1270
1283
return NETDEV_TX_OK ;
1271
- }
1272
1284
1285
+ err_out :
1286
+ entry = prev_tx % TX_RING_SIZE ;
1287
+ np -> tx_info [entry ].skb = NULL ;
1288
+ if (i > 0 ) {
1289
+ pci_unmap_single (np -> pci_dev ,
1290
+ np -> tx_info [entry ].mapping ,
1291
+ skb_first_frag_len (skb ),
1292
+ PCI_DMA_TODEVICE );
1293
+ np -> tx_info [entry ].mapping = 0 ;
1294
+ entry = (entry + np -> tx_info [entry ].used_slots ) % TX_RING_SIZE ;
1295
+ for (j = 1 ; j < i ; j ++ ) {
1296
+ pci_unmap_single (np -> pci_dev ,
1297
+ np -> tx_info [entry ].mapping ,
1298
+ skb_frag_size (
1299
+ & skb_shinfo (skb )-> frags [j - 1 ]),
1300
+ PCI_DMA_TODEVICE );
1301
+ entry ++ ;
1302
+ }
1303
+ }
1304
+ dev_kfree_skb_any (skb );
1305
+ np -> cur_tx = prev_tx ;
1306
+ return NETDEV_TX_OK ;
1307
+ }
1273
1308
1274
1309
/* The interrupt handler does all of the Rx thread work and cleans up
1275
1310
after the Tx thread. */
@@ -1569,6 +1604,12 @@ static void refill_rx_ring(struct net_device *dev)
1569
1604
break ; /* Better luck next round. */
1570
1605
np -> rx_info [entry ].mapping =
1571
1606
pci_map_single (np -> pci_dev , skb -> data , np -> rx_buf_sz , PCI_DMA_FROMDEVICE );
1607
+ if (pci_dma_mapping_error (np -> pci_dev ,
1608
+ np -> rx_info [entry ].mapping )) {
1609
+ dev_kfree_skb (skb );
1610
+ np -> rx_info [entry ].skb = NULL ;
1611
+ break ;
1612
+ }
1572
1613
np -> rx_ring [entry ].rxaddr =
1573
1614
cpu_to_dma (np -> rx_info [entry ].mapping | RxDescValid );
1574
1615
}
0 commit comments