@@ -86,16 +86,16 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv,
86
86
for (i = 1 ; i < DPAA2_ETH_MAX_SG_ENTRIES ; i ++ ) {
87
87
addr = dpaa2_sg_get_addr (& sgt [i ]);
88
88
sg_vaddr = dpaa2_iova_to_virt (priv -> iommu_domain , addr );
89
- dma_unmap_single (dev , addr , DPAA2_ETH_RX_BUF_SIZE ,
90
- DMA_BIDIRECTIONAL );
89
+ dma_unmap_page (dev , addr , DPAA2_ETH_RX_BUF_SIZE ,
90
+ DMA_BIDIRECTIONAL );
91
91
92
- skb_free_frag ( sg_vaddr );
92
+ free_pages (( unsigned long ) sg_vaddr , 0 );
93
93
if (dpaa2_sg_is_final (& sgt [i ]))
94
94
break ;
95
95
}
96
96
97
97
free_buf :
98
- skb_free_frag ( vaddr );
98
+ free_pages (( unsigned long ) vaddr , 0 );
99
99
}
100
100
101
101
/* Build a linear skb based on a single-buffer frame descriptor */
@@ -109,7 +109,7 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
109
109
110
110
ch -> buf_count -- ;
111
111
112
- skb = build_skb (fd_vaddr , DPAA2_ETH_SKB_SIZE );
112
+ skb = build_skb (fd_vaddr , DPAA2_ETH_RX_BUF_RAW_SIZE );
113
113
if (unlikely (!skb ))
114
114
return NULL ;
115
115
@@ -144,19 +144,19 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
144
144
/* Get the address and length from the S/G entry */
145
145
sg_addr = dpaa2_sg_get_addr (sge );
146
146
sg_vaddr = dpaa2_iova_to_virt (priv -> iommu_domain , sg_addr );
147
- dma_unmap_single (dev , sg_addr , DPAA2_ETH_RX_BUF_SIZE ,
148
- DMA_BIDIRECTIONAL );
147
+ dma_unmap_page (dev , sg_addr , DPAA2_ETH_RX_BUF_SIZE ,
148
+ DMA_BIDIRECTIONAL );
149
149
150
150
sg_length = dpaa2_sg_get_len (sge );
151
151
152
152
if (i == 0 ) {
153
153
/* We build the skb around the first data buffer */
154
- skb = build_skb (sg_vaddr , DPAA2_ETH_SKB_SIZE );
154
+ skb = build_skb (sg_vaddr , DPAA2_ETH_RX_BUF_RAW_SIZE );
155
155
if (unlikely (!skb )) {
156
156
/* Free the first SG entry now, since we already
157
157
* unmapped it and obtained the virtual address
158
158
*/
159
- skb_free_frag ( sg_vaddr );
159
+ free_pages (( unsigned long ) sg_vaddr , 0 );
160
160
161
161
/* We still need to subtract the buffers used
162
162
* by this FD from our software counter
@@ -211,9 +211,9 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
211
211
212
212
for (i = 0 ; i < count ; i ++ ) {
213
213
vaddr = dpaa2_iova_to_virt (priv -> iommu_domain , buf_array [i ]);
214
- dma_unmap_single (dev , buf_array [i ], DPAA2_ETH_RX_BUF_SIZE ,
215
- DMA_BIDIRECTIONAL );
216
- skb_free_frag ( vaddr );
214
+ dma_unmap_page (dev , buf_array [i ], DPAA2_ETH_RX_BUF_SIZE ,
215
+ DMA_BIDIRECTIONAL );
216
+ free_pages (( unsigned long ) vaddr , 0 );
217
217
}
218
218
}
219
219
@@ -264,9 +264,7 @@ static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
264
264
265
265
fq = & priv -> fq [queue_id ];
266
266
for (i = 0 ; i < DPAA2_ETH_ENQUEUE_RETRIES ; i ++ ) {
267
- err = dpaa2_io_service_enqueue_qd (fq -> channel -> dpio ,
268
- priv -> tx_qdid , 0 ,
269
- fq -> tx_qdbin , fd );
267
+ err = priv -> enqueue (priv , fq , fd , 0 );
270
268
if (err != - EBUSY )
271
269
break ;
272
270
}
@@ -378,16 +376,16 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
378
376
return ;
379
377
}
380
378
381
- dma_unmap_single (dev , addr , DPAA2_ETH_RX_BUF_SIZE ,
382
- DMA_BIDIRECTIONAL );
379
+ dma_unmap_page (dev , addr , DPAA2_ETH_RX_BUF_SIZE ,
380
+ DMA_BIDIRECTIONAL );
383
381
skb = build_linear_skb (ch , fd , vaddr );
384
382
} else if (fd_format == dpaa2_fd_sg ) {
385
383
WARN_ON (priv -> xdp_prog );
386
384
387
- dma_unmap_single (dev , addr , DPAA2_ETH_RX_BUF_SIZE ,
388
- DMA_BIDIRECTIONAL );
385
+ dma_unmap_page (dev , addr , DPAA2_ETH_RX_BUF_SIZE ,
386
+ DMA_BIDIRECTIONAL );
389
387
skb = build_frag_skb (priv , ch , buf_data );
390
- skb_free_frag ( vaddr );
388
+ free_pages (( unsigned long ) vaddr , 0 );
391
389
percpu_extras -> rx_sg_frames ++ ;
392
390
percpu_extras -> rx_sg_bytes += dpaa2_fd_get_len (fd );
393
391
} else {
@@ -657,7 +655,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
657
655
* dpaa2_eth_tx().
658
656
*/
659
657
static void free_tx_fd (const struct dpaa2_eth_priv * priv ,
660
- const struct dpaa2_fd * fd )
658
+ const struct dpaa2_fd * fd , bool in_napi )
661
659
{
662
660
struct device * dev = priv -> net_dev -> dev .parent ;
663
661
dma_addr_t fd_addr ;
@@ -712,7 +710,7 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
712
710
skb_free_frag (skbh );
713
711
714
712
/* Move on with skb release */
715
- dev_kfree_skb (skb );
713
+ napi_consume_skb (skb , in_napi );
716
714
}
717
715
718
716
static netdev_tx_t dpaa2_eth_tx (struct sk_buff * skb , struct net_device * net_dev )
@@ -785,17 +783,15 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
785
783
queue_mapping = skb_get_queue_mapping (skb );
786
784
fq = & priv -> fq [queue_mapping ];
787
785
for (i = 0 ; i < DPAA2_ETH_ENQUEUE_RETRIES ; i ++ ) {
788
- err = dpaa2_io_service_enqueue_qd (fq -> channel -> dpio ,
789
- priv -> tx_qdid , 0 ,
790
- fq -> tx_qdbin , & fd );
786
+ err = priv -> enqueue (priv , fq , & fd , 0 );
791
787
if (err != - EBUSY )
792
788
break ;
793
789
}
794
790
percpu_extras -> tx_portal_busy += i ;
795
791
if (unlikely (err < 0 )) {
796
792
percpu_stats -> tx_errors ++ ;
797
793
/* Clean up everything, including freeing the skb */
798
- free_tx_fd (priv , & fd );
794
+ free_tx_fd (priv , & fd , false );
799
795
} else {
800
796
fd_len = dpaa2_fd_get_len (& fd );
801
797
percpu_stats -> tx_packets ++ ;
@@ -837,7 +833,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
837
833
838
834
/* Check frame errors in the FD field */
839
835
fd_errors = dpaa2_fd_get_ctrl (fd ) & DPAA2_FD_TX_ERR_MASK ;
840
- free_tx_fd (priv , fd );
836
+ free_tx_fd (priv , fd , true );
841
837
842
838
if (likely (!fd_errors ))
843
839
return ;
@@ -903,30 +899,32 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
903
899
{
904
900
struct device * dev = priv -> net_dev -> dev .parent ;
905
901
u64 buf_array [DPAA2_ETH_BUFS_PER_CMD ];
906
- void * buf ;
902
+ struct page * page ;
907
903
dma_addr_t addr ;
908
904
int i , err ;
909
905
910
906
for (i = 0 ; i < DPAA2_ETH_BUFS_PER_CMD ; i ++ ) {
911
907
/* Allocate buffer visible to WRIOP + skb shared info +
912
908
* alignment padding
913
909
*/
914
- buf = napi_alloc_frag (dpaa2_eth_buf_raw_size (priv ));
915
- if (unlikely (!buf ))
910
+ /* allocate one page for each Rx buffer. WRIOP sees
911
+ * the entire page except for a tailroom reserved for
912
+ * skb shared info
913
+ */
914
+ page = dev_alloc_pages (0 );
915
+ if (!page )
916
916
goto err_alloc ;
917
917
918
- buf = PTR_ALIGN (buf , priv -> rx_buf_align );
919
-
920
- addr = dma_map_single (dev , buf , DPAA2_ETH_RX_BUF_SIZE ,
921
- DMA_BIDIRECTIONAL );
918
+ addr = dma_map_page (dev , page , 0 , DPAA2_ETH_RX_BUF_SIZE ,
919
+ DMA_BIDIRECTIONAL );
922
920
if (unlikely (dma_mapping_error (dev , addr )))
923
921
goto err_map ;
924
922
925
923
buf_array [i ] = addr ;
926
924
927
925
/* tracing point */
928
926
trace_dpaa2_eth_buf_seed (priv -> net_dev ,
929
- buf , dpaa2_eth_buf_raw_size ( priv ) ,
927
+ page , DPAA2_ETH_RX_BUF_RAW_SIZE ,
930
928
addr , DPAA2_ETH_RX_BUF_SIZE ,
931
929
bpid );
932
930
}
@@ -948,7 +946,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
948
946
return i ;
949
947
950
948
err_map :
951
- skb_free_frag ( buf );
949
+ __free_pages ( page , 0 );
952
950
err_alloc :
953
951
/* If we managed to allocate at least some buffers,
954
952
* release them to hardware
@@ -2134,6 +2132,7 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
2134
2132
{
2135
2133
struct device * dev = priv -> net_dev -> dev .parent ;
2136
2134
struct dpni_buffer_layout buf_layout = {0 };
2135
+ u16 rx_buf_align ;
2137
2136
int err ;
2138
2137
2139
2138
/* We need to check for WRIOP version 1.0.0, but depending on the MC
@@ -2142,9 +2141,9 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
2142
2141
*/
2143
2142
if (priv -> dpni_attrs .wriop_version == DPAA2_WRIOP_VERSION (0 , 0 , 0 ) ||
2144
2143
priv -> dpni_attrs .wriop_version == DPAA2_WRIOP_VERSION (1 , 0 , 0 ))
2145
- priv -> rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1 ;
2144
+ rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1 ;
2146
2145
else
2147
- priv -> rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN ;
2146
+ rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN ;
2148
2147
2149
2148
/* tx buffer */
2150
2149
buf_layout .private_data_size = DPAA2_ETH_SWA_SIZE ;
@@ -2184,7 +2183,7 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
2184
2183
/* rx buffer */
2185
2184
buf_layout .pass_frame_status = true;
2186
2185
buf_layout .pass_parser_result = true;
2187
- buf_layout .data_align = priv -> rx_buf_align ;
2186
+ buf_layout .data_align = rx_buf_align ;
2188
2187
buf_layout .data_head_room = dpaa2_eth_rx_head_room (priv );
2189
2188
buf_layout .private_data_size = 0 ;
2190
2189
buf_layout .options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
@@ -2202,6 +2201,36 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
2202
2201
return 0 ;
2203
2202
}
2204
2203
2204
+ #define DPNI_ENQUEUE_FQID_VER_MAJOR 7
2205
+ #define DPNI_ENQUEUE_FQID_VER_MINOR 9
2206
+
2207
+ static inline int dpaa2_eth_enqueue_qd (struct dpaa2_eth_priv * priv ,
2208
+ struct dpaa2_eth_fq * fq ,
2209
+ struct dpaa2_fd * fd , u8 prio )
2210
+ {
2211
+ return dpaa2_io_service_enqueue_qd (fq -> channel -> dpio ,
2212
+ priv -> tx_qdid , prio ,
2213
+ fq -> tx_qdbin , fd );
2214
+ }
2215
+
2216
+ static inline int dpaa2_eth_enqueue_fq (struct dpaa2_eth_priv * priv ,
2217
+ struct dpaa2_eth_fq * fq ,
2218
+ struct dpaa2_fd * fd ,
2219
+ u8 prio __always_unused )
2220
+ {
2221
+ return dpaa2_io_service_enqueue_fq (fq -> channel -> dpio ,
2222
+ fq -> tx_fqid , fd );
2223
+ }
2224
+
2225
+ static void set_enqueue_mode (struct dpaa2_eth_priv * priv )
2226
+ {
2227
+ if (dpaa2_eth_cmp_dpni_ver (priv , DPNI_ENQUEUE_FQID_VER_MAJOR ,
2228
+ DPNI_ENQUEUE_FQID_VER_MINOR ) < 0 )
2229
+ priv -> enqueue = dpaa2_eth_enqueue_qd ;
2230
+ else
2231
+ priv -> enqueue = dpaa2_eth_enqueue_fq ;
2232
+ }
2233
+
2205
2234
/* Configure the DPNI object this interface is associated with */
2206
2235
static int setup_dpni (struct fsl_mc_device * ls_dev )
2207
2236
{
@@ -2255,6 +2284,8 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
2255
2284
if (err )
2256
2285
goto close ;
2257
2286
2287
+ set_enqueue_mode (priv );
2288
+
2258
2289
priv -> cls_rules = devm_kzalloc (dev , sizeof (struct dpaa2_eth_cls_rule ) *
2259
2290
dpaa2_eth_fs_count (priv ), GFP_KERNEL );
2260
2291
if (!priv -> cls_rules )
@@ -2339,6 +2370,7 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv,
2339
2370
}
2340
2371
2341
2372
fq -> tx_qdbin = qid .qdbin ;
2373
+ fq -> tx_fqid = qid .fqid ;
2342
2374
2343
2375
err = dpni_get_queue (priv -> mc_io , 0 , priv -> mc_token ,
2344
2376
DPNI_QUEUE_TX_CONFIRM , 0 , fq -> flowid ,
0 commit comments