@@ -245,12 +245,16 @@ static int mtk_phy_connect(struct mtk_mac *mac)
245
245
case PHY_INTERFACE_MODE_MII :
246
246
ge_mode = 1 ;
247
247
break ;
248
- case PHY_INTERFACE_MODE_RMII :
248
+ case PHY_INTERFACE_MODE_REVMII :
249
249
ge_mode = 2 ;
250
250
break ;
251
+ case PHY_INTERFACE_MODE_RMII :
252
+ if (!mac -> id )
253
+ goto err_phy ;
254
+ ge_mode = 3 ;
255
+ break ;
251
256
default :
252
- dev_err (eth -> dev , "invalid phy_mode\n" );
253
- return -1 ;
257
+ goto err_phy ;
254
258
}
255
259
256
260
/* put the gmac into the right mode */
@@ -263,6 +267,11 @@ static int mtk_phy_connect(struct mtk_mac *mac)
263
267
mac -> phy_dev -> autoneg = AUTONEG_ENABLE ;
264
268
mac -> phy_dev -> speed = 0 ;
265
269
mac -> phy_dev -> duplex = 0 ;
270
+
271
+ if (of_phy_is_fixed_link (mac -> of_node ))
272
+ mac -> phy_dev -> supported |=
273
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause ;
274
+
266
275
mac -> phy_dev -> supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
267
276
SUPPORTED_Asym_Pause ;
268
277
mac -> phy_dev -> advertising = mac -> phy_dev -> supported |
@@ -272,6 +281,11 @@ static int mtk_phy_connect(struct mtk_mac *mac)
272
281
of_node_put (np );
273
282
274
283
return 0 ;
284
+
285
+ err_phy :
286
+ of_node_put (np );
287
+ dev_err (eth -> dev , "invalid phy_mode\n" );
288
+ return - EINVAL ;
275
289
}
276
290
277
291
static int mtk_mdio_init (struct mtk_eth * eth )
@@ -544,15 +558,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
544
558
return & ring -> buf [idx ];
545
559
}
546
560
547
- static void mtk_tx_unmap (struct device * dev , struct mtk_tx_buf * tx_buf )
561
+ static void mtk_tx_unmap (struct mtk_eth * eth , struct mtk_tx_buf * tx_buf )
548
562
{
549
563
if (tx_buf -> flags & MTK_TX_FLAGS_SINGLE0 ) {
550
- dma_unmap_single (dev ,
564
+ dma_unmap_single (eth -> dev ,
551
565
dma_unmap_addr (tx_buf , dma_addr0 ),
552
566
dma_unmap_len (tx_buf , dma_len0 ),
553
567
DMA_TO_DEVICE );
554
568
} else if (tx_buf -> flags & MTK_TX_FLAGS_PAGE0 ) {
555
- dma_unmap_page (dev ,
569
+ dma_unmap_page (eth -> dev ,
556
570
dma_unmap_addr (tx_buf , dma_addr0 ),
557
571
dma_unmap_len (tx_buf , dma_len0 ),
558
572
DMA_TO_DEVICE );
@@ -597,9 +611,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
597
611
if (skb_vlan_tag_present (skb ))
598
612
txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get (skb );
599
613
600
- mapped_addr = dma_map_single (& dev -> dev , skb -> data ,
614
+ mapped_addr = dma_map_single (eth -> dev , skb -> data ,
601
615
skb_headlen (skb ), DMA_TO_DEVICE );
602
- if (unlikely (dma_mapping_error (& dev -> dev , mapped_addr )))
616
+ if (unlikely (dma_mapping_error (eth -> dev , mapped_addr )))
603
617
return - ENOMEM ;
604
618
605
619
WRITE_ONCE (itxd -> txd1 , mapped_addr );
@@ -625,10 +639,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
625
639
626
640
n_desc ++ ;
627
641
frag_map_size = min (frag_size , MTK_TX_DMA_BUF_LEN );
628
- mapped_addr = skb_frag_dma_map (& dev -> dev , frag , offset ,
642
+ mapped_addr = skb_frag_dma_map (eth -> dev , frag , offset ,
629
643
frag_map_size ,
630
644
DMA_TO_DEVICE );
631
- if (unlikely (dma_mapping_error (& dev -> dev , mapped_addr )))
645
+ if (unlikely (dma_mapping_error (eth -> dev , mapped_addr )))
632
646
goto err_dma ;
633
647
634
648
if (i == nr_frags - 1 &&
@@ -681,7 +695,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
681
695
tx_buf = mtk_desc_to_tx_buf (ring , itxd );
682
696
683
697
/* unmap dma */
684
- mtk_tx_unmap (& dev -> dev , tx_buf );
698
+ mtk_tx_unmap (eth , tx_buf );
685
699
686
700
itxd -> txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU ;
687
701
itxd = mtk_qdma_phys_to_virt (ring , itxd -> txd2 );
@@ -838,11 +852,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
838
852
netdev -> stats .rx_dropped ++ ;
839
853
goto release_desc ;
840
854
}
841
- dma_addr = dma_map_single (& eth -> netdev [ mac ] -> dev ,
855
+ dma_addr = dma_map_single (eth -> dev ,
842
856
new_data + NET_SKB_PAD ,
843
857
ring -> buf_size ,
844
858
DMA_FROM_DEVICE );
845
- if (unlikely (dma_mapping_error (& netdev -> dev , dma_addr ))) {
859
+ if (unlikely (dma_mapping_error (eth -> dev , dma_addr ))) {
846
860
skb_free_frag (new_data );
847
861
netdev -> stats .rx_dropped ++ ;
848
862
goto release_desc ;
@@ -857,7 +871,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
857
871
}
858
872
skb_reserve (skb , NET_SKB_PAD + NET_IP_ALIGN );
859
873
860
- dma_unmap_single (& netdev -> dev , trxd .rxd1 ,
874
+ dma_unmap_single (eth -> dev , trxd .rxd1 ,
861
875
ring -> buf_size , DMA_FROM_DEVICE );
862
876
pktlen = RX_DMA_GET_PLEN0 (trxd .rxd2 );
863
877
skb -> dev = netdev ;
@@ -939,7 +953,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
939
953
done [mac ]++ ;
940
954
budget -- ;
941
955
}
942
- mtk_tx_unmap (eth -> dev , tx_buf );
956
+ mtk_tx_unmap (eth , tx_buf );
943
957
944
958
ring -> last_free = desc ;
945
959
atomic_inc (& ring -> free_count );
@@ -1094,7 +1108,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
1094
1108
1095
1109
if (ring -> buf ) {
1096
1110
for (i = 0 ; i < MTK_DMA_SIZE ; i ++ )
1097
- mtk_tx_unmap (eth -> dev , & ring -> buf [i ]);
1111
+ mtk_tx_unmap (eth , & ring -> buf [i ]);
1098
1112
kfree (ring -> buf );
1099
1113
ring -> buf = NULL ;
1100
1114
}
0 commit comments