@@ -141,8 +141,7 @@ static void gfar_netpoll(struct net_device *dev);
141
141
#endif
142
142
int gfar_clean_rx_ring (struct gfar_priv_rx_q * rx_queue , int rx_work_limit );
143
143
static void gfar_clean_tx_ring (struct gfar_priv_tx_q * tx_queue );
144
- static void gfar_process_frame (struct net_device * dev , struct sk_buff * skb ,
145
- struct napi_struct * napi );
144
+ static void gfar_process_frame (struct net_device * ndev , struct sk_buff * skb );
146
145
static void gfar_halt_nodisable (struct gfar_private * priv );
147
146
static void gfar_clear_exact_match (struct net_device * dev );
148
147
static void gfar_set_mac_for_addr (struct net_device * dev , int num ,
@@ -262,7 +261,7 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
262
261
rx_queue = priv -> rx_queue [i ];
263
262
rx_queue -> rx_bd_base = vaddr ;
264
263
rx_queue -> rx_bd_dma_base = addr ;
265
- rx_queue -> dev = ndev ;
264
+ rx_queue -> ndev = ndev ;
266
265
addr += sizeof (struct rxbd8 ) * rx_queue -> rx_ring_size ;
267
266
vaddr += sizeof (struct rxbd8 ) * rx_queue -> rx_ring_size ;
268
267
}
@@ -593,7 +592,7 @@ static int gfar_alloc_rx_queues(struct gfar_private *priv)
593
592
594
593
priv -> rx_queue [i ]-> rx_skbuff = NULL ;
595
594
priv -> rx_queue [i ]-> qindex = i ;
596
- priv -> rx_queue [i ]-> dev = priv -> ndev ;
595
+ priv -> rx_queue [i ]-> ndev = priv -> ndev ;
597
596
}
598
597
return 0 ;
599
598
}
@@ -1913,7 +1912,7 @@ static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1913
1912
static void free_skb_rx_queue (struct gfar_priv_rx_q * rx_queue )
1914
1913
{
1915
1914
struct rxbd8 * rxbdp ;
1916
- struct gfar_private * priv = netdev_priv (rx_queue -> dev );
1915
+ struct gfar_private * priv = netdev_priv (rx_queue -> ndev );
1917
1916
int i ;
1918
1917
1919
1918
rxbdp = rx_queue -> rx_bd_base ;
@@ -2709,17 +2708,17 @@ static struct sk_buff *gfar_new_skb(struct net_device *ndev,
2709
2708
2710
2709
static void gfar_rx_alloc_err (struct gfar_priv_rx_q * rx_queue )
2711
2710
{
2712
- struct gfar_private * priv = netdev_priv (rx_queue -> dev );
2711
+ struct gfar_private * priv = netdev_priv (rx_queue -> ndev );
2713
2712
struct gfar_extra_stats * estats = & priv -> extra_stats ;
2714
2713
2715
- netdev_err (rx_queue -> dev , "Can't alloc RX buffers\n" );
2714
+ netdev_err (rx_queue -> ndev , "Can't alloc RX buffers\n" );
2716
2715
atomic64_inc (& estats -> rx_alloc_err );
2717
2716
}
2718
2717
2719
2718
static void gfar_alloc_rx_buffs (struct gfar_priv_rx_q * rx_queue ,
2720
2719
int alloc_cnt )
2721
2720
{
2722
- struct net_device * ndev = rx_queue -> dev ;
2721
+ struct net_device * ndev = rx_queue -> ndev ;
2723
2722
struct rxbd8 * bdp , * base ;
2724
2723
dma_addr_t bufaddr ;
2725
2724
int i ;
@@ -2756,10 +2755,10 @@ static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
2756
2755
rx_queue -> next_to_use = i ;
2757
2756
}
2758
2757
2759
- static void count_errors (u32 lstatus , struct net_device * dev )
2758
+ static void count_errors (u32 lstatus , struct net_device * ndev )
2760
2759
{
2761
- struct gfar_private * priv = netdev_priv (dev );
2762
- struct net_device_stats * stats = & dev -> stats ;
2760
+ struct gfar_private * priv = netdev_priv (ndev );
2761
+ struct net_device_stats * stats = & ndev -> stats ;
2763
2762
struct gfar_extra_stats * estats = & priv -> extra_stats ;
2764
2763
2765
2764
/* If the packet was truncated, none of the other errors matter */
@@ -2854,10 +2853,9 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2854
2853
}
2855
2854
2856
2855
/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2857
- static void gfar_process_frame (struct net_device * dev , struct sk_buff * skb ,
2858
- struct napi_struct * napi )
2856
+ static void gfar_process_frame (struct net_device * ndev , struct sk_buff * skb )
2859
2857
{
2860
- struct gfar_private * priv = netdev_priv (dev );
2858
+ struct gfar_private * priv = netdev_priv (ndev );
2861
2859
struct rxfcb * fcb = NULL ;
2862
2860
2863
2861
/* fcb is at the beginning if exists */
@@ -2866,10 +2864,8 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2866
2864
/* Remove the FCB from the skb
2867
2865
* Remove the padded bytes, if there are any
2868
2866
*/
2869
- if (priv -> uses_rxfcb ) {
2870
- skb_record_rx_queue (skb , fcb -> rq );
2867
+ if (priv -> uses_rxfcb )
2871
2868
skb_pull (skb , GMAC_FCB_LEN );
2872
- }
2873
2869
2874
2870
/* Get receive timestamp from the skb */
2875
2871
if (priv -> hwts_rx_en ) {
@@ -2883,24 +2879,20 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2883
2879
if (priv -> padding )
2884
2880
skb_pull (skb , priv -> padding );
2885
2881
2886
- if (dev -> features & NETIF_F_RXCSUM )
2882
+ if (ndev -> features & NETIF_F_RXCSUM )
2887
2883
gfar_rx_checksum (skb , fcb );
2888
2884
2889
2885
/* Tell the skb what kind of packet this is */
2890
- skb -> protocol = eth_type_trans (skb , dev );
2886
+ skb -> protocol = eth_type_trans (skb , ndev );
2891
2887
2892
2888
/* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2893
2889
* Even if vlan rx accel is disabled, on some chips
2894
2890
* RXFCB_VLN is pseudo randomly set.
2895
2891
*/
2896
- if (dev -> features & NETIF_F_HW_VLAN_CTAG_RX &&
2892
+ if (ndev -> features & NETIF_F_HW_VLAN_CTAG_RX &&
2897
2893
be16_to_cpu (fcb -> flags ) & RXFCB_VLN )
2898
2894
__vlan_hwaccel_put_tag (skb , htons (ETH_P_8021Q ),
2899
2895
be16_to_cpu (fcb -> vlctl ));
2900
-
2901
- /* Send the packet up the stack */
2902
- napi_gro_receive (napi , skb );
2903
-
2904
2896
}
2905
2897
2906
2898
/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
@@ -2909,12 +2901,12 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2909
2901
*/
2910
2902
int gfar_clean_rx_ring (struct gfar_priv_rx_q * rx_queue , int rx_work_limit )
2911
2903
{
2912
- struct net_device * dev = rx_queue -> dev ;
2904
+ struct net_device * ndev = rx_queue -> ndev ;
2913
2905
struct rxbd8 * bdp , * base ;
2914
2906
struct sk_buff * skb ;
2915
2907
int i , howmany = 0 ;
2916
2908
int cleaned_cnt = gfar_rxbd_unused (rx_queue );
2917
- struct gfar_private * priv = netdev_priv (dev );
2909
+ struct gfar_private * priv = netdev_priv (ndev );
2918
2910
2919
2911
/* Get the first full descriptor */
2920
2912
base = rx_queue -> rx_bd_base ;
@@ -2948,7 +2940,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2948
2940
2949
2941
if (unlikely (!(lstatus & BD_LFLAG (RXBD_LAST )) ||
2950
2942
(lstatus & BD_LFLAG (RXBD_ERR )))) {
2951
- count_errors (lstatus , dev );
2943
+ count_errors (lstatus , ndev );
2952
2944
2953
2945
/* discard faulty buffer */
2954
2946
dev_kfree_skb (skb );
@@ -2965,11 +2957,13 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2965
2957
skb_put (skb , pkt_len );
2966
2958
rx_queue -> stats .rx_bytes += pkt_len ;
2967
2959
skb_record_rx_queue (skb , rx_queue -> qindex );
2968
- gfar_process_frame (dev , skb ,
2969
- & rx_queue -> grp -> napi_rx );
2960
+ gfar_process_frame (ndev , skb );
2961
+
2962
+ /* Send the packet up the stack */
2963
+ napi_gro_receive (& rx_queue -> grp -> napi_rx , skb );
2970
2964
2971
2965
} else {
2972
- netif_warn (priv , rx_err , dev , "Missing skb!\n" );
2966
+ netif_warn (priv , rx_err , ndev , "Missing skb!\n" );
2973
2967
rx_queue -> stats .rx_dropped ++ ;
2974
2968
atomic64_inc (& priv -> extra_stats .rx_skbmissing );
2975
2969
}
0 commit comments