@@ -1262,6 +1262,7 @@ static void happy_meal_init_rings(struct happy_meal *hp)
1262
1262
HMD (("init rxring, " ));
1263
1263
for (i = 0 ; i < RX_RING_SIZE ; i ++ ) {
1264
1264
struct sk_buff * skb ;
1265
+ u32 mapping ;
1265
1266
1266
1267
skb = happy_meal_alloc_skb (RX_BUF_ALLOC_SIZE , GFP_ATOMIC );
1267
1268
if (!skb ) {
@@ -1272,10 +1273,16 @@ static void happy_meal_init_rings(struct happy_meal *hp)
1272
1273
1273
1274
/* Because we reserve afterwards. */
1274
1275
skb_put (skb , (ETH_FRAME_LEN + RX_OFFSET + 4 ));
1276
+ mapping = dma_map_single (hp -> dma_dev , skb -> data , RX_BUF_ALLOC_SIZE ,
1277
+ DMA_FROM_DEVICE );
1278
+ if (dma_mapping_error (hp -> dma_dev , mapping )) {
1279
+ dev_kfree_skb_any (skb );
1280
+ hme_write_rxd (hp , & hb -> happy_meal_rxd [i ], 0 , 0 );
1281
+ continue ;
1282
+ }
1275
1283
hme_write_rxd (hp , & hb -> happy_meal_rxd [i ],
1276
1284
(RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET ) << 16 )),
1277
- dma_map_single (hp -> dma_dev , skb -> data , RX_BUF_ALLOC_SIZE ,
1278
- DMA_FROM_DEVICE ));
1285
+ mapping );
1279
1286
skb_reserve (skb , RX_OFFSET );
1280
1287
}
1281
1288
@@ -2020,20 +2027,29 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
2020
2027
skb = hp -> rx_skbs [elem ];
2021
2028
if (len > RX_COPY_THRESHOLD ) {
2022
2029
struct sk_buff * new_skb ;
2030
+ u32 mapping ;
2023
2031
2024
2032
/* Now refill the entry, if we can. */
2025
2033
new_skb = happy_meal_alloc_skb (RX_BUF_ALLOC_SIZE , GFP_ATOMIC );
2026
2034
if (new_skb == NULL ) {
2027
2035
drops ++ ;
2028
2036
goto drop_it ;
2029
2037
}
2038
+ skb_put (new_skb , (ETH_FRAME_LEN + RX_OFFSET + 4 ));
2039
+ mapping = dma_map_single (hp -> dma_dev , new_skb -> data ,
2040
+ RX_BUF_ALLOC_SIZE ,
2041
+ DMA_FROM_DEVICE );
2042
+ if (unlikely (dma_mapping_error (hp -> dma_dev , mapping ))) {
2043
+ dev_kfree_skb_any (new_skb );
2044
+ drops ++ ;
2045
+ goto drop_it ;
2046
+ }
2047
+
2030
2048
dma_unmap_single (hp -> dma_dev , dma_addr , RX_BUF_ALLOC_SIZE , DMA_FROM_DEVICE );
2031
2049
hp -> rx_skbs [elem ] = new_skb ;
2032
- skb_put (new_skb , (ETH_FRAME_LEN + RX_OFFSET + 4 ));
2033
2050
hme_write_rxd (hp , this ,
2034
2051
(RXFLAG_OWN |((RX_BUF_ALLOC_SIZE - RX_OFFSET )<<16 )),
2035
- dma_map_single (hp -> dma_dev , new_skb -> data , RX_BUF_ALLOC_SIZE ,
2036
- DMA_FROM_DEVICE ));
2052
+ mapping );
2037
2053
skb_reserve (new_skb , RX_OFFSET );
2038
2054
2039
2055
/* Trim the original skb for the netif. */
@@ -2248,6 +2264,25 @@ static void happy_meal_tx_timeout(struct net_device *dev)
2248
2264
netif_wake_queue (dev );
2249
2265
}
2250
2266
2267
+ static void unmap_partial_tx_skb (struct happy_meal * hp , u32 first_mapping ,
2268
+ u32 first_len , u32 first_entry , u32 entry )
2269
+ {
2270
+ struct happy_meal_txd * txbase = & hp -> happy_block -> happy_meal_txd [0 ];
2271
+
2272
+ dma_unmap_single (hp -> dma_dev , first_mapping , first_len , DMA_TO_DEVICE );
2273
+
2274
+ first_entry = NEXT_TX (first_entry );
2275
+ while (first_entry != entry ) {
2276
+ struct happy_meal_txd * this = & txbase [first_entry ];
2277
+ u32 addr , len ;
2278
+
2279
+ addr = hme_read_desc32 (hp , & this -> tx_addr );
2280
+ len = hme_read_desc32 (hp , & this -> tx_flags );
2281
+ len &= TXFLAG_SIZE ;
2282
+ dma_unmap_page (hp -> dma_dev , addr , len , DMA_TO_DEVICE );
2283
+ }
2284
+ }
2285
+
2251
2286
static netdev_tx_t happy_meal_start_xmit (struct sk_buff * skb ,
2252
2287
struct net_device * dev )
2253
2288
{
@@ -2284,6 +2319,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2284
2319
2285
2320
len = skb -> len ;
2286
2321
mapping = dma_map_single (hp -> dma_dev , skb -> data , len , DMA_TO_DEVICE );
2322
+ if (unlikely (dma_mapping_error (hp -> dma_dev , mapping )))
2323
+ goto out_dma_error ;
2287
2324
tx_flags |= (TXFLAG_SOP | TXFLAG_EOP );
2288
2325
hme_write_txd (hp , & hp -> happy_block -> happy_meal_txd [entry ],
2289
2326
(tx_flags | (len & TXFLAG_SIZE )),
@@ -2299,6 +2336,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2299
2336
first_len = skb_headlen (skb );
2300
2337
first_mapping = dma_map_single (hp -> dma_dev , skb -> data , first_len ,
2301
2338
DMA_TO_DEVICE );
2339
+ if (unlikely (dma_mapping_error (hp -> dma_dev , first_mapping )))
2340
+ goto out_dma_error ;
2302
2341
entry = NEXT_TX (entry );
2303
2342
2304
2343
for (frag = 0 ; frag < skb_shinfo (skb )-> nr_frags ; frag ++ ) {
@@ -2308,6 +2347,11 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2308
2347
len = skb_frag_size (this_frag );
2309
2348
mapping = skb_frag_dma_map (hp -> dma_dev , this_frag ,
2310
2349
0 , len , DMA_TO_DEVICE );
2350
+ if (unlikely (dma_mapping_error (hp -> dma_dev , mapping ))) {
2351
+ unmap_partial_tx_skb (hp , first_mapping , first_len ,
2352
+ first_entry , entry );
2353
+ goto out_dma_error ;
2354
+ }
2311
2355
this_txflags = tx_flags ;
2312
2356
if (frag == skb_shinfo (skb )-> nr_frags - 1 )
2313
2357
this_txflags |= TXFLAG_EOP ;
@@ -2333,6 +2377,14 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2333
2377
2334
2378
tx_add_log (hp , TXLOG_ACTION_TXMIT , 0 );
2335
2379
return NETDEV_TX_OK ;
2380
+
2381
+ out_dma_error :
2382
+ hp -> tx_skbs [hp -> tx_new ] = NULL ;
2383
+ spin_unlock_irq (& hp -> happy_lock );
2384
+
2385
+ dev_kfree_skb_any (skb );
2386
+ dev -> stats .tx_dropped ++ ;
2387
+ return NETDEV_TX_OK ;
2336
2388
}
2337
2389
2338
2390
static struct net_device_stats * happy_meal_get_stats (struct net_device * dev )
0 commit comments