234
234
235
235
#define DESC_NUM 256
236
236
237
+ #define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
238
+ #define NETSEC_RX_BUF_SZ 1536
239
+
237
240
#define DESC_SZ sizeof(struct netsec_de)
238
241
239
242
#define NETSEC_F_NETSEC_VER_MAJOR_NUM (x ) ((x) & 0xffff0000)
@@ -571,34 +574,10 @@ static const struct ethtool_ops netsec_ethtool_ops = {
571
574
572
575
/************* NETDEV_OPS FOLLOW *************/
573
576
574
- static struct sk_buff * netsec_alloc_skb (struct netsec_priv * priv ,
575
- struct netsec_desc * desc )
576
- {
577
- struct sk_buff * skb ;
578
-
579
- if (device_get_dma_attr (priv -> dev ) == DEV_DMA_COHERENT ) {
580
- skb = netdev_alloc_skb_ip_align (priv -> ndev , desc -> len );
581
- } else {
582
- desc -> len = L1_CACHE_ALIGN (desc -> len );
583
- skb = netdev_alloc_skb (priv -> ndev , desc -> len );
584
- }
585
- if (!skb )
586
- return NULL ;
587
-
588
- desc -> addr = skb -> data ;
589
- desc -> dma_addr = dma_map_single (priv -> dev , desc -> addr , desc -> len ,
590
- DMA_FROM_DEVICE );
591
- if (dma_mapping_error (priv -> dev , desc -> dma_addr )) {
592
- dev_kfree_skb_any (skb );
593
- return NULL ;
594
- }
595
- return skb ;
596
- }
597
577
598
578
static void netsec_set_rx_de (struct netsec_priv * priv ,
599
579
struct netsec_desc_ring * dring , u16 idx ,
600
- const struct netsec_desc * desc ,
601
- struct sk_buff * skb )
580
+ const struct netsec_desc * desc )
602
581
{
603
582
struct netsec_de * de = dring -> vaddr + DESC_SZ * idx ;
604
583
u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD ) |
@@ -617,59 +596,6 @@ static void netsec_set_rx_de(struct netsec_priv *priv,
617
596
dring -> desc [idx ].dma_addr = desc -> dma_addr ;
618
597
dring -> desc [idx ].addr = desc -> addr ;
619
598
dring -> desc [idx ].len = desc -> len ;
620
- dring -> desc [idx ].skb = skb ;
621
- }
622
-
623
- static struct sk_buff * netsec_get_rx_de (struct netsec_priv * priv ,
624
- struct netsec_desc_ring * dring ,
625
- u16 idx ,
626
- struct netsec_rx_pkt_info * rxpi ,
627
- struct netsec_desc * desc , u16 * len )
628
- {
629
- struct netsec_de de = {};
630
-
631
- memcpy (& de , dring -> vaddr + DESC_SZ * idx , DESC_SZ );
632
-
633
- * len = de .buf_len_info >> 16 ;
634
-
635
- rxpi -> err_flag = (de .attr >> NETSEC_RX_PKT_ER_FIELD ) & 1 ;
636
- rxpi -> rx_cksum_result = (de .attr >> NETSEC_RX_PKT_CO_FIELD ) & 3 ;
637
- rxpi -> err_code = (de .attr >> NETSEC_RX_PKT_ERR_FIELD ) &
638
- NETSEC_RX_PKT_ERR_MASK ;
639
- * desc = dring -> desc [idx ];
640
- return desc -> skb ;
641
- }
642
-
643
- static struct sk_buff * netsec_get_rx_pkt_data (struct netsec_priv * priv ,
644
- struct netsec_rx_pkt_info * rxpi ,
645
- struct netsec_desc * desc ,
646
- u16 * len )
647
- {
648
- struct netsec_desc_ring * dring = & priv -> desc_ring [NETSEC_RING_RX ];
649
- struct sk_buff * tmp_skb , * skb = NULL ;
650
- struct netsec_desc td ;
651
- int tail ;
652
-
653
- * rxpi = (struct netsec_rx_pkt_info ){};
654
-
655
- td .len = priv -> ndev -> mtu + 22 ;
656
-
657
- tmp_skb = netsec_alloc_skb (priv , & td );
658
-
659
- tail = dring -> tail ;
660
-
661
- if (!tmp_skb ) {
662
- netsec_set_rx_de (priv , dring , tail , & dring -> desc [tail ],
663
- dring -> desc [tail ].skb );
664
- } else {
665
- skb = netsec_get_rx_de (priv , dring , tail , rxpi , desc , len );
666
- netsec_set_rx_de (priv , dring , tail , & td , tmp_skb );
667
- }
668
-
669
- /* move tail ahead */
670
- dring -> tail = (dring -> tail + 1 ) % DESC_NUM ;
671
-
672
- return skb ;
673
599
}
674
600
675
601
static int netsec_clean_tx_dring (struct netsec_priv * priv , int budget )
@@ -736,19 +662,65 @@ static int netsec_process_tx(struct netsec_priv *priv, int budget)
736
662
return done ;
737
663
}
738
664
665
+ static void * netsec_alloc_rx_data (struct netsec_priv * priv ,
666
+ dma_addr_t * dma_handle , u16 * desc_len )
667
+ {
668
+ size_t total_len = SKB_DATA_ALIGN (sizeof (struct skb_shared_info ));
669
+ size_t payload_len = NETSEC_RX_BUF_SZ ;
670
+ dma_addr_t mapping ;
671
+ void * buf ;
672
+
673
+ total_len += SKB_DATA_ALIGN (payload_len + NETSEC_SKB_PAD );
674
+
675
+ buf = napi_alloc_frag (total_len );
676
+ if (!buf )
677
+ return NULL ;
678
+
679
+ mapping = dma_map_single (priv -> dev , buf + NETSEC_SKB_PAD , payload_len ,
680
+ DMA_FROM_DEVICE );
681
+ if (unlikely (dma_mapping_error (priv -> dev , mapping )))
682
+ goto err_out ;
683
+
684
+ * dma_handle = mapping ;
685
+ * desc_len = payload_len ;
686
+
687
+ return buf ;
688
+
689
+ err_out :
690
+ skb_free_frag (buf );
691
+ return NULL ;
692
+ }
693
+
694
+ static void netsec_rx_fill (struct netsec_priv * priv , u16 from , u16 num )
695
+ {
696
+ struct netsec_desc_ring * dring = & priv -> desc_ring [NETSEC_RING_RX ];
697
+ u16 idx = from ;
698
+
699
+ while (num ) {
700
+ netsec_set_rx_de (priv , dring , idx , & dring -> desc [idx ]);
701
+ idx ++ ;
702
+ if (idx >= DESC_NUM )
703
+ idx = 0 ;
704
+ num -- ;
705
+ }
706
+ }
707
+
739
708
static int netsec_process_rx (struct netsec_priv * priv , int budget )
740
709
{
741
710
struct netsec_desc_ring * dring = & priv -> desc_ring [NETSEC_RING_RX ];
742
711
struct net_device * ndev = priv -> ndev ;
743
712
struct netsec_rx_pkt_info rx_info ;
744
- int done = 0 ;
745
- struct netsec_desc desc ;
746
713
struct sk_buff * skb ;
747
- u16 len ;
714
+ int done = 0 ;
748
715
749
716
while (done < budget ) {
750
717
u16 idx = dring -> tail ;
751
718
struct netsec_de * de = dring -> vaddr + (DESC_SZ * idx );
719
+ struct netsec_desc * desc = & dring -> desc [idx ];
720
+ u16 pkt_len , desc_len ;
721
+ dma_addr_t dma_handle ;
722
+ void * buf_addr ;
723
+ u32 truesize ;
752
724
753
725
if (de -> attr & (1U << NETSEC_RX_PKT_OWN_FIELD )) {
754
726
/* reading the register clears the irq */
@@ -762,18 +734,59 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
762
734
*/
763
735
dma_rmb ();
764
736
done ++ ;
765
- skb = netsec_get_rx_pkt_data (priv , & rx_info , & desc , & len );
766
- if (unlikely (!skb ) || rx_info .err_flag ) {
737
+
738
+ pkt_len = de -> buf_len_info >> 16 ;
739
+ rx_info .err_code = (de -> attr >> NETSEC_RX_PKT_ERR_FIELD ) &
740
+ NETSEC_RX_PKT_ERR_MASK ;
741
+ rx_info .err_flag = (de -> attr >> NETSEC_RX_PKT_ER_FIELD ) & 1 ;
742
+ if (rx_info .err_flag ) {
767
743
netif_err (priv , drv , priv -> ndev ,
768
- "%s: rx fail err(%d)\n" ,
769
- __func__ , rx_info .err_code );
744
+ "%s: rx fail err(%d)\n" , __func__ ,
745
+ rx_info .err_code );
770
746
ndev -> stats .rx_dropped ++ ;
747
+ dring -> tail = (dring -> tail + 1 ) % DESC_NUM ;
748
+ /* reuse buffer page frag */
749
+ netsec_rx_fill (priv , idx , 1 );
771
750
continue ;
772
751
}
752
+ rx_info .rx_cksum_result =
753
+ (de -> attr >> NETSEC_RX_PKT_CO_FIELD ) & 3 ;
773
754
774
- dma_unmap_single (priv -> dev , desc .dma_addr , desc .len ,
775
- DMA_FROM_DEVICE );
776
- skb_put (skb , len );
755
+ /* allocate a fresh buffer and map it to the hardware.
756
+ * This will eventually replace the old buffer in the hardware
757
+ */
758
+ buf_addr = netsec_alloc_rx_data (priv , & dma_handle , & desc_len );
759
+ if (unlikely (!buf_addr ))
760
+ break ;
761
+
762
+ dma_sync_single_for_cpu (priv -> dev , desc -> dma_addr , pkt_len ,
763
+ DMA_FROM_DEVICE );
764
+ prefetch (desc -> addr );
765
+
766
+ truesize = SKB_DATA_ALIGN (desc -> len + NETSEC_SKB_PAD ) +
767
+ SKB_DATA_ALIGN (sizeof (struct skb_shared_info ));
768
+ skb = build_skb (desc -> addr , truesize );
769
+ if (unlikely (!skb )) {
770
+ /* free the newly allocated buffer, we are not going to
771
+ * use it
772
+ */
773
+ dma_unmap_single (priv -> dev , dma_handle , desc_len ,
774
+ DMA_FROM_DEVICE );
775
+ skb_free_frag (buf_addr );
776
+ netif_err (priv , drv , priv -> ndev ,
777
+ "rx failed to build skb\n" );
778
+ break ;
779
+ }
780
+ dma_unmap_single_attrs (priv -> dev , desc -> dma_addr , desc -> len ,
781
+ DMA_FROM_DEVICE , DMA_ATTR_SKIP_CPU_SYNC );
782
+
783
+ /* Update the descriptor with the new buffer we allocated */
784
+ desc -> len = desc_len ;
785
+ desc -> dma_addr = dma_handle ;
786
+ desc -> addr = buf_addr ;
787
+
788
+ skb_reserve (skb , NETSEC_SKB_PAD );
789
+ skb_put (skb , pkt_len );
777
790
skb -> protocol = eth_type_trans (skb , priv -> ndev );
778
791
779
792
if (priv -> rx_cksum_offload_flag &&
@@ -782,8 +795,11 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
782
795
783
796
if (napi_gro_receive (& priv -> napi , skb ) != GRO_DROP ) {
784
797
ndev -> stats .rx_packets ++ ;
785
- ndev -> stats .rx_bytes += len ;
798
+ ndev -> stats .rx_bytes += pkt_len ;
786
799
}
800
+
801
+ netsec_rx_fill (priv , idx , 1 );
802
+ dring -> tail = (dring -> tail + 1 ) % DESC_NUM ;
787
803
}
788
804
789
805
return done ;
@@ -946,7 +962,10 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
946
962
dma_unmap_single (priv -> dev , desc -> dma_addr , desc -> len ,
947
963
id == NETSEC_RING_RX ? DMA_FROM_DEVICE :
948
964
DMA_TO_DEVICE );
949
- dev_kfree_skb (desc -> skb );
965
+ if (id == NETSEC_RING_RX )
966
+ skb_free_frag (desc -> addr );
967
+ else if (id == NETSEC_RING_TX )
968
+ dev_kfree_skb (desc -> skb );
950
969
}
951
970
952
971
memset (dring -> desc , 0 , sizeof (struct netsec_desc ) * DESC_NUM );
@@ -1002,22 +1021,30 @@ static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
1002
1021
static int netsec_setup_rx_dring (struct netsec_priv * priv )
1003
1022
{
1004
1023
struct netsec_desc_ring * dring = & priv -> desc_ring [NETSEC_RING_RX ];
1005
- struct netsec_desc desc ;
1006
- struct sk_buff * skb ;
1007
- int n ;
1024
+ int i ;
1008
1025
1009
- desc .len = priv -> ndev -> mtu + 22 ;
1026
+ for (i = 0 ; i < DESC_NUM ; i ++ ) {
1027
+ struct netsec_desc * desc = & dring -> desc [i ];
1028
+ dma_addr_t dma_handle ;
1029
+ void * buf ;
1030
+ u16 len ;
1010
1031
1011
- for (n = 0 ; n < DESC_NUM ; n ++ ) {
1012
- skb = netsec_alloc_skb (priv , & desc );
1013
- if (!skb ) {
1032
+ buf = netsec_alloc_rx_data (priv , & dma_handle , & len );
1033
+ if (!buf ) {
1014
1034
netsec_uninit_pkt_dring (priv , NETSEC_RING_RX );
1015
- return - ENOMEM ;
1035
+ goto err_out ;
1016
1036
}
1017
- netsec_set_rx_de (priv , dring , n , & desc , skb );
1037
+ desc -> dma_addr = dma_handle ;
1038
+ desc -> addr = buf ;
1039
+ desc -> len = len ;
1018
1040
}
1019
1041
1042
+ netsec_rx_fill (priv , 0 , DESC_NUM );
1043
+
1020
1044
return 0 ;
1045
+
1046
+ err_out :
1047
+ return - ENOMEM ;
1021
1048
}
1022
1049
1023
1050
static int netsec_netdev_load_ucode_region (struct netsec_priv * priv , u32 reg ,
@@ -1377,6 +1404,8 @@ static int netsec_netdev_init(struct net_device *ndev)
1377
1404
int ret ;
1378
1405
u16 data ;
1379
1406
1407
+ BUILD_BUG_ON_NOT_POWER_OF_2 (DESC_NUM );
1408
+
1380
1409
ret = netsec_alloc_dring (priv , NETSEC_RING_TX );
1381
1410
if (ret )
1382
1411
return ret ;
0 commit comments