@@ -116,8 +116,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
116
116
static void gfar_reset_task (struct work_struct * work );
117
117
static void gfar_timeout (struct net_device * dev );
118
118
static int gfar_close (struct net_device * dev );
119
- static struct sk_buff * gfar_new_skb (struct net_device * dev ,
120
- dma_addr_t * bufaddr );
119
+ static void gfar_alloc_rx_buffs (struct gfar_priv_rx_q * rx_queue ,
120
+ int alloc_cnt );
121
121
static int gfar_set_mac_address (struct net_device * dev );
122
122
static int gfar_change_mtu (struct net_device * dev , int new_mtu );
123
123
static irqreturn_t gfar_error (int irq , void * dev_id );
@@ -142,7 +142,7 @@ static void gfar_netpoll(struct net_device *dev);
142
142
int gfar_clean_rx_ring (struct gfar_priv_rx_q * rx_queue , int rx_work_limit );
143
143
static void gfar_clean_tx_ring (struct gfar_priv_tx_q * tx_queue );
144
144
static void gfar_process_frame (struct net_device * dev , struct sk_buff * skb ,
145
- int amount_pull , struct napi_struct * napi );
145
+ struct napi_struct * napi );
146
146
static void gfar_halt_nodisable (struct gfar_private * priv );
147
147
static void gfar_clear_exact_match (struct net_device * dev );
148
148
static void gfar_set_mac_for_addr (struct net_device * dev , int num ,
@@ -169,17 +169,15 @@ static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
169
169
bdp -> lstatus = cpu_to_be32 (lstatus );
170
170
}
171
171
172
- static int gfar_init_bds (struct net_device * ndev )
172
+ static void gfar_init_bds (struct net_device * ndev )
173
173
{
174
174
struct gfar_private * priv = netdev_priv (ndev );
175
175
struct gfar __iomem * regs = priv -> gfargrp [0 ].regs ;
176
176
struct gfar_priv_tx_q * tx_queue = NULL ;
177
177
struct gfar_priv_rx_q * rx_queue = NULL ;
178
178
struct txbd8 * txbdp ;
179
- struct rxbd8 * rxbdp ;
180
179
u32 __iomem * rfbptr ;
181
180
int i , j ;
182
- dma_addr_t bufaddr ;
183
181
184
182
for (i = 0 ; i < priv -> num_tx_queues ; i ++ ) {
185
183
tx_queue = priv -> tx_queue [i ];
@@ -207,33 +205,18 @@ static int gfar_init_bds(struct net_device *ndev)
207
205
rfbptr = & regs -> rfbptr0 ;
208
206
for (i = 0 ; i < priv -> num_rx_queues ; i ++ ) {
209
207
rx_queue = priv -> rx_queue [i ];
210
- rx_queue -> cur_rx = rx_queue -> rx_bd_base ;
211
- rx_queue -> skb_currx = 0 ;
212
- rxbdp = rx_queue -> rx_bd_base ;
213
-
214
- for (j = 0 ; j < rx_queue -> rx_ring_size ; j ++ ) {
215
- struct sk_buff * skb = rx_queue -> rx_skbuff [j ];
216
208
217
- if (skb ) {
218
- bufaddr = be32_to_cpu (rxbdp -> bufPtr );
219
- } else {
220
- skb = gfar_new_skb (ndev , & bufaddr );
221
- if (!skb ) {
222
- netdev_err (ndev , "Can't allocate RX buffers\n" );
223
- return - ENOMEM ;
224
- }
225
- rx_queue -> rx_skbuff [j ] = skb ;
226
- }
209
+ rx_queue -> next_to_clean = 0 ;
210
+ rx_queue -> next_to_use = 0 ;
227
211
228
- gfar_init_rxbdp (rx_queue , rxbdp , bufaddr );
229
- rxbdp ++ ;
230
- }
212
+ /* make sure next_to_clean != next_to_use after this
213
+ * by leaving at least 1 unused descriptor
214
+ */
215
+ gfar_alloc_rx_buffs (rx_queue , gfar_rxbd_unused (rx_queue ));
231
216
232
217
rx_queue -> rfbptr = rfbptr ;
233
218
rfbptr += 2 ;
234
219
}
235
-
236
- return 0 ;
237
220
}
238
221
239
222
static int gfar_alloc_skb_resources (struct net_device * ndev )
@@ -311,8 +294,7 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
311
294
rx_queue -> rx_skbuff [j ] = NULL ;
312
295
}
313
296
314
- if (gfar_init_bds (ndev ))
315
- goto cleanup ;
297
+ gfar_init_bds (ndev );
316
298
317
299
return 0 ;
318
300
@@ -1639,10 +1621,7 @@ static int gfar_restore(struct device *dev)
1639
1621
return 0 ;
1640
1622
}
1641
1623
1642
- if (gfar_init_bds (ndev )) {
1643
- free_skb_resources (priv );
1644
- return - ENOMEM ;
1645
- }
1624
+ gfar_init_bds (ndev );
1646
1625
1647
1626
gfar_mac_reset (priv );
1648
1627
@@ -2704,30 +2683,19 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2704
2683
netdev_tx_completed_queue (txq , howmany , bytes_sent );
2705
2684
}
2706
2685
2707
- static struct sk_buff * gfar_alloc_skb (struct net_device * dev )
2686
+ static struct sk_buff * gfar_new_skb (struct net_device * ndev ,
2687
+ dma_addr_t * bufaddr )
2708
2688
{
2709
- struct gfar_private * priv = netdev_priv (dev );
2689
+ struct gfar_private * priv = netdev_priv (ndev );
2710
2690
struct sk_buff * skb ;
2691
+ dma_addr_t addr ;
2711
2692
2712
- skb = netdev_alloc_skb (dev , priv -> rx_buffer_size + RXBUF_ALIGNMENT );
2693
+ skb = netdev_alloc_skb (ndev , priv -> rx_buffer_size + RXBUF_ALIGNMENT );
2713
2694
if (!skb )
2714
2695
return NULL ;
2715
2696
2716
2697
gfar_align_skb (skb );
2717
2698
2718
- return skb ;
2719
- }
2720
-
2721
- static struct sk_buff * gfar_new_skb (struct net_device * dev , dma_addr_t * bufaddr )
2722
- {
2723
- struct gfar_private * priv = netdev_priv (dev );
2724
- struct sk_buff * skb ;
2725
- dma_addr_t addr ;
2726
-
2727
- skb = gfar_alloc_skb (dev );
2728
- if (!skb )
2729
- return NULL ;
2730
-
2731
2699
addr = dma_map_single (priv -> dev , skb -> data ,
2732
2700
priv -> rx_buffer_size , DMA_FROM_DEVICE );
2733
2701
if (unlikely (dma_mapping_error (priv -> dev , addr ))) {
@@ -2739,6 +2707,55 @@ static struct sk_buff *gfar_new_skb(struct net_device *dev, dma_addr_t *bufaddr)
2739
2707
return skb ;
2740
2708
}
2741
2709
2710
+ static void gfar_rx_alloc_err (struct gfar_priv_rx_q * rx_queue )
2711
+ {
2712
+ struct gfar_private * priv = netdev_priv (rx_queue -> dev );
2713
+ struct gfar_extra_stats * estats = & priv -> extra_stats ;
2714
+
2715
+ netdev_err (rx_queue -> dev , "Can't alloc RX buffers\n" );
2716
+ atomic64_inc (& estats -> rx_alloc_err );
2717
+ }
2718
+
2719
+ static void gfar_alloc_rx_buffs (struct gfar_priv_rx_q * rx_queue ,
2720
+ int alloc_cnt )
2721
+ {
2722
+ struct net_device * ndev = rx_queue -> dev ;
2723
+ struct rxbd8 * bdp , * base ;
2724
+ dma_addr_t bufaddr ;
2725
+ int i ;
2726
+
2727
+ i = rx_queue -> next_to_use ;
2728
+ base = rx_queue -> rx_bd_base ;
2729
+ bdp = & rx_queue -> rx_bd_base [i ];
2730
+
2731
+ while (alloc_cnt -- ) {
2732
+ struct sk_buff * skb = rx_queue -> rx_skbuff [i ];
2733
+
2734
+ if (likely (!skb )) {
2735
+ skb = gfar_new_skb (ndev , & bufaddr );
2736
+ if (unlikely (!skb )) {
2737
+ gfar_rx_alloc_err (rx_queue );
2738
+ break ;
2739
+ }
2740
+ } else { /* restore from sleep state */
2741
+ bufaddr = be32_to_cpu (bdp -> bufPtr );
2742
+ }
2743
+
2744
+ rx_queue -> rx_skbuff [i ] = skb ;
2745
+
2746
+ /* Setup the new RxBD */
2747
+ gfar_init_rxbdp (rx_queue , bdp , bufaddr );
2748
+
2749
+ /* Update to the next pointer */
2750
+ bdp = next_bd (bdp , base , rx_queue -> rx_ring_size );
2751
+
2752
+ if (unlikely (++ i == rx_queue -> rx_ring_size ))
2753
+ i = 0 ;
2754
+ }
2755
+
2756
+ rx_queue -> next_to_use = i ;
2757
+ }
2758
+
2742
2759
static inline void count_errors (unsigned short status , struct net_device * dev )
2743
2760
{
2744
2761
struct gfar_private * priv = netdev_priv (dev );
@@ -2838,7 +2855,7 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2838
2855
2839
2856
/* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2840
2857
static void gfar_process_frame (struct net_device * dev , struct sk_buff * skb ,
2841
- int amount_pull , struct napi_struct * napi )
2858
+ struct napi_struct * napi )
2842
2859
{
2843
2860
struct gfar_private * priv = netdev_priv (dev );
2844
2861
struct rxfcb * fcb = NULL ;
@@ -2849,9 +2866,9 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2849
2866
/* Remove the FCB from the skb
2850
2867
* Remove the padded bytes, if there are any
2851
2868
*/
2852
- if (amount_pull ) {
2869
+ if (priv -> uses_rxfcb ) {
2853
2870
skb_record_rx_queue (skb , fcb -> rq );
2854
- skb_pull (skb , amount_pull );
2871
+ skb_pull (skb , GMAC_FCB_LEN );
2855
2872
}
2856
2873
2857
2874
/* Get receive timestamp from the skb */
@@ -2895,27 +2912,30 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2895
2912
struct net_device * dev = rx_queue -> dev ;
2896
2913
struct rxbd8 * bdp , * base ;
2897
2914
struct sk_buff * skb ;
2898
- int pkt_len ;
2899
- int amount_pull ;
2900
- int howmany = 0 ;
2915
+ int i , howmany = 0 ;
2916
+ int cleaned_cnt = gfar_rxbd_unused (rx_queue );
2901
2917
struct gfar_private * priv = netdev_priv (dev );
2902
2918
2903
2919
/* Get the first full descriptor */
2904
- bdp = rx_queue -> cur_rx ;
2905
2920
base = rx_queue -> rx_bd_base ;
2921
+ i = rx_queue -> next_to_clean ;
2906
2922
2907
- amount_pull = priv -> uses_rxfcb ? GMAC_FCB_LEN : 0 ;
2923
+ while ( rx_work_limit -- ) {
2908
2924
2909
- while (!(be16_to_cpu (bdp -> status ) & RXBD_EMPTY ) && rx_work_limit -- ) {
2910
- struct sk_buff * newskb ;
2911
- dma_addr_t bufaddr ;
2925
+ if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC ) {
2926
+ gfar_alloc_rx_buffs (rx_queue , cleaned_cnt );
2927
+ cleaned_cnt = 0 ;
2928
+ }
2912
2929
2913
- rmb ();
2930
+ bdp = & rx_queue -> rx_bd_base [i ];
2931
+ if (be16_to_cpu (bdp -> status ) & RXBD_EMPTY )
2932
+ break ;
2914
2933
2915
- /* Add another skb for the future */
2916
- newskb = gfar_new_skb ( dev , & bufaddr );
2934
+ /* order rx buffer descriptor reads */
2935
+ rmb ( );
2917
2936
2918
- skb = rx_queue -> rx_skbuff [rx_queue -> skb_currx ];
2937
+ /* fetch next to clean buffer from the ring */
2938
+ skb = rx_queue -> rx_skbuff [i ];
2919
2939
2920
2940
dma_unmap_single (priv -> dev , be32_to_cpu (bdp -> bufPtr ),
2921
2941
priv -> rx_buffer_size , DMA_FROM_DEVICE );
@@ -2924,30 +2944,26 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2924
2944
be16_to_cpu (bdp -> length ) > priv -> rx_buffer_size ))
2925
2945
bdp -> status = cpu_to_be16 (RXBD_LARGE );
2926
2946
2927
- /* We drop the frame if we failed to allocate a new buffer */
2928
- if (unlikely (!newskb ||
2929
- !(be16_to_cpu (bdp -> status ) & RXBD_LAST ) ||
2947
+ if (unlikely (!(be16_to_cpu (bdp -> status ) & RXBD_LAST ) ||
2930
2948
be16_to_cpu (bdp -> status ) & RXBD_ERR )) {
2931
2949
count_errors (be16_to_cpu (bdp -> status ), dev );
2932
2950
2933
- if (unlikely (!newskb )) {
2934
- newskb = skb ;
2935
- bufaddr = be32_to_cpu (bdp -> bufPtr );
2936
- } else if (skb )
2937
- dev_kfree_skb (skb );
2951
+ /* discard faulty buffer */
2952
+ dev_kfree_skb (skb );
2953
+
2938
2954
} else {
2939
2955
/* Increment the number of packets */
2940
2956
rx_queue -> stats .rx_packets ++ ;
2941
2957
howmany ++ ;
2942
2958
2943
2959
if (likely (skb )) {
2944
- pkt_len = be16_to_cpu (bdp -> length ) -
2960
+ int pkt_len = be16_to_cpu (bdp -> length ) -
2945
2961
ETH_FCS_LEN ;
2946
2962
/* Remove the FCS from the packet length */
2947
2963
skb_put (skb , pkt_len );
2948
2964
rx_queue -> stats .rx_bytes += pkt_len ;
2949
2965
skb_record_rx_queue (skb , rx_queue -> qindex );
2950
- gfar_process_frame (dev , skb , amount_pull ,
2966
+ gfar_process_frame (dev , skb ,
2951
2967
& rx_queue -> grp -> napi_rx );
2952
2968
2953
2969
} else {
@@ -2958,26 +2974,23 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2958
2974
2959
2975
}
2960
2976
2961
- rx_queue -> rx_skbuff [rx_queue -> skb_currx ] = newskb ;
2962
-
2963
- /* Setup the new bdp */
2964
- gfar_init_rxbdp (rx_queue , bdp , bufaddr );
2977
+ rx_queue -> rx_skbuff [i ] = NULL ;
2978
+ cleaned_cnt ++ ;
2979
+ if (unlikely (++ i == rx_queue -> rx_ring_size ))
2980
+ i = 0 ;
2981
+ }
2965
2982
2966
- /* Update Last Free RxBD pointer for LFC */
2967
- if (unlikely (rx_queue -> rfbptr && priv -> tx_actual_en ))
2968
- gfar_write (rx_queue -> rfbptr , (u32 )bdp );
2983
+ rx_queue -> next_to_clean = i ;
2969
2984
2970
- /* Update to the next pointer */
2971
- bdp = next_bd ( bdp , base , rx_queue -> rx_ring_size );
2985
+ if ( cleaned_cnt )
2986
+ gfar_alloc_rx_buffs ( rx_queue , cleaned_cnt );
2972
2987
2973
- /* update to point at the next skb */
2974
- rx_queue -> skb_currx = (rx_queue -> skb_currx + 1 ) &
2975
- RX_RING_MOD_MASK (rx_queue -> rx_ring_size );
2988
+ /* Update Last Free RxBD pointer for LFC */
2989
+ if (unlikely (priv -> tx_actual_en )) {
2990
+ bdp = gfar_rxbd_lastfree (rx_queue );
2991
+ gfar_write (rx_queue -> rfbptr , (u32 )bdp );
2976
2992
}
2977
2993
2978
- /* Update the current rxbd pointer to be the next one */
2979
- rx_queue -> cur_rx = bdp ;
2980
-
2981
2994
return howmany ;
2982
2995
}
2983
2996
@@ -3552,14 +3565,8 @@ static noinline void gfar_update_link_state(struct gfar_private *priv)
3552
3565
if ((tempval1 & MACCFG1_TX_FLOW ) && !tx_flow_oldval ) {
3553
3566
for (i = 0 ; i < priv -> num_rx_queues ; i ++ ) {
3554
3567
rx_queue = priv -> rx_queue [i ];
3555
- bdp = rx_queue -> cur_rx ;
3556
- /* skip to previous bd */
3557
- bdp = skip_bd (bdp , rx_queue -> rx_ring_size - 1 ,
3558
- rx_queue -> rx_bd_base ,
3559
- rx_queue -> rx_ring_size );
3560
-
3561
- if (rx_queue -> rfbptr )
3562
- gfar_write (rx_queue -> rfbptr , (u32 )bdp );
3568
+ bdp = gfar_rxbd_lastfree (rx_queue );
3569
+ gfar_write (rx_queue -> rfbptr , (u32 )bdp );
3563
3570
}
3564
3571
3565
3572
priv -> tx_actual_en = 1 ;
0 commit comments