@@ -558,15 +558,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
558
558
return & ring -> buf [idx ];
559
559
}
560
560
561
- static void mtk_tx_unmap (struct device * dev , struct mtk_tx_buf * tx_buf )
561
+ static void mtk_tx_unmap (struct mtk_eth * eth , struct mtk_tx_buf * tx_buf )
562
562
{
563
563
if (tx_buf -> flags & MTK_TX_FLAGS_SINGLE0 ) {
564
- dma_unmap_single (dev ,
564
+ dma_unmap_single (eth -> dev ,
565
565
dma_unmap_addr (tx_buf , dma_addr0 ),
566
566
dma_unmap_len (tx_buf , dma_len0 ),
567
567
DMA_TO_DEVICE );
568
568
} else if (tx_buf -> flags & MTK_TX_FLAGS_PAGE0 ) {
569
- dma_unmap_page (dev ,
569
+ dma_unmap_page (eth -> dev ,
570
570
dma_unmap_addr (tx_buf , dma_addr0 ),
571
571
dma_unmap_len (tx_buf , dma_len0 ),
572
572
DMA_TO_DEVICE );
@@ -611,9 +611,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
611
611
if (skb_vlan_tag_present (skb ))
612
612
txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get (skb );
613
613
614
- mapped_addr = dma_map_single (& dev -> dev , skb -> data ,
614
+ mapped_addr = dma_map_single (eth -> dev , skb -> data ,
615
615
skb_headlen (skb ), DMA_TO_DEVICE );
616
- if (unlikely (dma_mapping_error (& dev -> dev , mapped_addr )))
616
+ if (unlikely (dma_mapping_error (eth -> dev , mapped_addr )))
617
617
return - ENOMEM ;
618
618
619
619
WRITE_ONCE (itxd -> txd1 , mapped_addr );
@@ -639,10 +639,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
639
639
640
640
n_desc ++ ;
641
641
frag_map_size = min (frag_size , MTK_TX_DMA_BUF_LEN );
642
- mapped_addr = skb_frag_dma_map (& dev -> dev , frag , offset ,
642
+ mapped_addr = skb_frag_dma_map (eth -> dev , frag , offset ,
643
643
frag_map_size ,
644
644
DMA_TO_DEVICE );
645
- if (unlikely (dma_mapping_error (& dev -> dev , mapped_addr )))
645
+ if (unlikely (dma_mapping_error (eth -> dev , mapped_addr )))
646
646
goto err_dma ;
647
647
648
648
if (i == nr_frags - 1 &&
@@ -695,7 +695,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
695
695
tx_buf = mtk_desc_to_tx_buf (ring , itxd );
696
696
697
697
/* unmap dma */
698
- mtk_tx_unmap (& dev -> dev , tx_buf );
698
+ mtk_tx_unmap (eth , tx_buf );
699
699
700
700
itxd -> txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU ;
701
701
itxd = mtk_qdma_phys_to_virt (ring , itxd -> txd2 );
@@ -852,11 +852,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
852
852
netdev -> stats .rx_dropped ++ ;
853
853
goto release_desc ;
854
854
}
855
- dma_addr = dma_map_single (& eth -> netdev [ mac ] -> dev ,
855
+ dma_addr = dma_map_single (eth -> dev ,
856
856
new_data + NET_SKB_PAD ,
857
857
ring -> buf_size ,
858
858
DMA_FROM_DEVICE );
859
- if (unlikely (dma_mapping_error (& netdev -> dev , dma_addr ))) {
859
+ if (unlikely (dma_mapping_error (eth -> dev , dma_addr ))) {
860
860
skb_free_frag (new_data );
861
861
netdev -> stats .rx_dropped ++ ;
862
862
goto release_desc ;
@@ -871,7 +871,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
871
871
}
872
872
skb_reserve (skb , NET_SKB_PAD + NET_IP_ALIGN );
873
873
874
- dma_unmap_single (& netdev -> dev , trxd .rxd1 ,
874
+ dma_unmap_single (eth -> dev , trxd .rxd1 ,
875
875
ring -> buf_size , DMA_FROM_DEVICE );
876
876
pktlen = RX_DMA_GET_PLEN0 (trxd .rxd2 );
877
877
skb -> dev = netdev ;
@@ -953,7 +953,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
953
953
done [mac ]++ ;
954
954
budget -- ;
955
955
}
956
- mtk_tx_unmap (eth -> dev , tx_buf );
956
+ mtk_tx_unmap (eth , tx_buf );
957
957
958
958
ring -> last_free = desc ;
959
959
atomic_inc (& ring -> free_count );
@@ -1108,7 +1108,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
1108
1108
1109
1109
if (ring -> buf ) {
1110
1110
for (i = 0 ; i < MTK_DMA_SIZE ; i ++ )
1111
- mtk_tx_unmap (eth -> dev , & ring -> buf [i ]);
1111
+ mtk_tx_unmap (eth , & ring -> buf [i ]);
1112
1112
kfree (ring -> buf );
1113
1113
ring -> buf = NULL ;
1114
1114
}
0 commit comments