Skip to content

Commit 8ce5cd5

Browse files
committed
Merge branch 'dpaa2-eth-Driver-updates'
Ioana Ciocoi Radulescu says: ==================== dpaa2-eth: Driver updates First patch moves the driver to a page-per-frame memory model. The others are minor tweaks and optimizations. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
2 parents e37268e + 20fb057 commit 8ce5cd5

File tree

2 files changed

+83
-55
lines changed

2 files changed

+83
-55
lines changed

drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c

Lines changed: 71 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -86,16 +86,16 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv,
8686
for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
8787
addr = dpaa2_sg_get_addr(&sgt[i]);
8888
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
89-
dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
90-
DMA_BIDIRECTIONAL);
89+
dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
90+
DMA_BIDIRECTIONAL);
9191

92-
skb_free_frag(sg_vaddr);
92+
free_pages((unsigned long)sg_vaddr, 0);
9393
if (dpaa2_sg_is_final(&sgt[i]))
9494
break;
9595
}
9696

9797
free_buf:
98-
skb_free_frag(vaddr);
98+
free_pages((unsigned long)vaddr, 0);
9999
}
100100

101101
/* Build a linear skb based on a single-buffer frame descriptor */
@@ -109,7 +109,7 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
109109

110110
ch->buf_count--;
111111

112-
skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
112+
skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
113113
if (unlikely(!skb))
114114
return NULL;
115115

@@ -144,19 +144,19 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
144144
/* Get the address and length from the S/G entry */
145145
sg_addr = dpaa2_sg_get_addr(sge);
146146
sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
147-
dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
148-
DMA_BIDIRECTIONAL);
147+
dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
148+
DMA_BIDIRECTIONAL);
149149

150150
sg_length = dpaa2_sg_get_len(sge);
151151

152152
if (i == 0) {
153153
/* We build the skb around the first data buffer */
154-
skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
154+
skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
155155
if (unlikely(!skb)) {
156156
/* Free the first SG entry now, since we already
157157
* unmapped it and obtained the virtual address
158158
*/
159-
skb_free_frag(sg_vaddr);
159+
free_pages((unsigned long)sg_vaddr, 0);
160160

161161
/* We still need to subtract the buffers used
162162
* by this FD from our software counter
@@ -211,9 +211,9 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
211211

212212
for (i = 0; i < count; i++) {
213213
vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
214-
dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
215-
DMA_BIDIRECTIONAL);
216-
skb_free_frag(vaddr);
214+
dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
215+
DMA_BIDIRECTIONAL);
216+
free_pages((unsigned long)vaddr, 0);
217217
}
218218
}
219219

@@ -264,9 +264,7 @@ static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
264264

265265
fq = &priv->fq[queue_id];
266266
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
267-
err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
268-
priv->tx_qdid, 0,
269-
fq->tx_qdbin, fd);
267+
err = priv->enqueue(priv, fq, fd, 0);
270268
if (err != -EBUSY)
271269
break;
272270
}
@@ -378,16 +376,16 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
378376
return;
379377
}
380378

381-
dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
382-
DMA_BIDIRECTIONAL);
379+
dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
380+
DMA_BIDIRECTIONAL);
383381
skb = build_linear_skb(ch, fd, vaddr);
384382
} else if (fd_format == dpaa2_fd_sg) {
385383
WARN_ON(priv->xdp_prog);
386384

387-
dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
388-
DMA_BIDIRECTIONAL);
385+
dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
386+
DMA_BIDIRECTIONAL);
389387
skb = build_frag_skb(priv, ch, buf_data);
390-
skb_free_frag(vaddr);
388+
free_pages((unsigned long)vaddr, 0);
391389
percpu_extras->rx_sg_frames++;
392390
percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
393391
} else {
@@ -657,7 +655,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
657655
* dpaa2_eth_tx().
658656
*/
659657
static void free_tx_fd(const struct dpaa2_eth_priv *priv,
660-
const struct dpaa2_fd *fd)
658+
const struct dpaa2_fd *fd, bool in_napi)
661659
{
662660
struct device *dev = priv->net_dev->dev.parent;
663661
dma_addr_t fd_addr;
@@ -712,7 +710,7 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
712710
skb_free_frag(skbh);
713711

714712
/* Move on with skb release */
715-
dev_kfree_skb(skb);
713+
napi_consume_skb(skb, in_napi);
716714
}
717715

718716
static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
@@ -785,17 +783,15 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
785783
queue_mapping = skb_get_queue_mapping(skb);
786784
fq = &priv->fq[queue_mapping];
787785
for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
788-
err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
789-
priv->tx_qdid, 0,
790-
fq->tx_qdbin, &fd);
786+
err = priv->enqueue(priv, fq, &fd, 0);
791787
if (err != -EBUSY)
792788
break;
793789
}
794790
percpu_extras->tx_portal_busy += i;
795791
if (unlikely(err < 0)) {
796792
percpu_stats->tx_errors++;
797793
/* Clean up everything, including freeing the skb */
798-
free_tx_fd(priv, &fd);
794+
free_tx_fd(priv, &fd, false);
799795
} else {
800796
fd_len = dpaa2_fd_get_len(&fd);
801797
percpu_stats->tx_packets++;
@@ -837,7 +833,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
837833

838834
/* Check frame errors in the FD field */
839835
fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
840-
free_tx_fd(priv, fd);
836+
free_tx_fd(priv, fd, true);
841837

842838
if (likely(!fd_errors))
843839
return;
@@ -903,30 +899,32 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
903899
{
904900
struct device *dev = priv->net_dev->dev.parent;
905901
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
906-
void *buf;
902+
struct page *page;
907903
dma_addr_t addr;
908904
int i, err;
909905

910906
for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
911907
/* Allocate buffer visible to WRIOP + skb shared info +
912908
* alignment padding
913909
*/
914-
buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv));
915-
if (unlikely(!buf))
910+
/* allocate one page for each Rx buffer. WRIOP sees
911+
* the entire page except for a tailroom reserved for
912+
* skb shared info
913+
*/
914+
page = dev_alloc_pages(0);
915+
if (!page)
916916
goto err_alloc;
917917

918-
buf = PTR_ALIGN(buf, priv->rx_buf_align);
919-
920-
addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
921-
DMA_BIDIRECTIONAL);
918+
addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
919+
DMA_BIDIRECTIONAL);
922920
if (unlikely(dma_mapping_error(dev, addr)))
923921
goto err_map;
924922

925923
buf_array[i] = addr;
926924

927925
/* tracing point */
928926
trace_dpaa2_eth_buf_seed(priv->net_dev,
929-
buf, dpaa2_eth_buf_raw_size(priv),
927+
page, DPAA2_ETH_RX_BUF_RAW_SIZE,
930928
addr, DPAA2_ETH_RX_BUF_SIZE,
931929
bpid);
932930
}
@@ -948,7 +946,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv,
948946
return i;
949947

950948
err_map:
951-
skb_free_frag(buf);
949+
__free_pages(page, 0);
952950
err_alloc:
953951
/* If we managed to allocate at least some buffers,
954952
* release them to hardware
@@ -2134,6 +2132,7 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
21342132
{
21352133
struct device *dev = priv->net_dev->dev.parent;
21362134
struct dpni_buffer_layout buf_layout = {0};
2135+
u16 rx_buf_align;
21372136
int err;
21382137

21392138
/* We need to check for WRIOP version 1.0.0, but depending on the MC
@@ -2142,9 +2141,9 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
21422141
*/
21432142
if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
21442143
priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
2145-
priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
2144+
rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
21462145
else
2147-
priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
2146+
rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
21482147

21492148
/* tx buffer */
21502149
buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
@@ -2184,7 +2183,7 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
21842183
/* rx buffer */
21852184
buf_layout.pass_frame_status = true;
21862185
buf_layout.pass_parser_result = true;
2187-
buf_layout.data_align = priv->rx_buf_align;
2186+
buf_layout.data_align = rx_buf_align;
21882187
buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
21892188
buf_layout.private_data_size = 0;
21902189
buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
@@ -2202,6 +2201,36 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
22022201
return 0;
22032202
}
22042203

2204+
#define DPNI_ENQUEUE_FQID_VER_MAJOR 7
2205+
#define DPNI_ENQUEUE_FQID_VER_MINOR 9
2206+
2207+
static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
2208+
struct dpaa2_eth_fq *fq,
2209+
struct dpaa2_fd *fd, u8 prio)
2210+
{
2211+
return dpaa2_io_service_enqueue_qd(fq->channel->dpio,
2212+
priv->tx_qdid, prio,
2213+
fq->tx_qdbin, fd);
2214+
}
2215+
2216+
static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv,
2217+
struct dpaa2_eth_fq *fq,
2218+
struct dpaa2_fd *fd,
2219+
u8 prio __always_unused)
2220+
{
2221+
return dpaa2_io_service_enqueue_fq(fq->channel->dpio,
2222+
fq->tx_fqid, fd);
2223+
}
2224+
2225+
static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
2226+
{
2227+
if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
2228+
DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
2229+
priv->enqueue = dpaa2_eth_enqueue_qd;
2230+
else
2231+
priv->enqueue = dpaa2_eth_enqueue_fq;
2232+
}
2233+
22052234
/* Configure the DPNI object this interface is associated with */
22062235
static int setup_dpni(struct fsl_mc_device *ls_dev)
22072236
{
@@ -2255,6 +2284,8 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
22552284
if (err)
22562285
goto close;
22572286

2287+
set_enqueue_mode(priv);
2288+
22582289
priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
22592290
dpaa2_eth_fs_count(priv), GFP_KERNEL);
22602291
if (!priv->cls_rules)
@@ -2339,6 +2370,7 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv,
23392370
}
23402371

23412372
fq->tx_qdbin = qid.qdbin;
2373+
fq->tx_fqid = qid.fqid;
23422374

23432375
err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
23442376
DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,

drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h

Lines changed: 12 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,8 @@
5353
*/
5454
#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
5555
#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
56-
#define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE
56+
#define DPAA2_ETH_REFILL_THRESH \
57+
(DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD)
5758

5859
/* Maximum number of buffers that can be acquired/released through a single
5960
* QBMan command
@@ -63,9 +64,11 @@
6364
/* Hardware requires alignment for ingress/egress buffer addresses */
6465
#define DPAA2_ETH_TX_BUF_ALIGN 64
6566

66-
#define DPAA2_ETH_RX_BUF_SIZE 2048
67-
#define DPAA2_ETH_SKB_SIZE \
68-
(DPAA2_ETH_RX_BUF_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
67+
#define DPAA2_ETH_RX_BUF_RAW_SIZE PAGE_SIZE
68+
#define DPAA2_ETH_RX_BUF_TAILROOM \
69+
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
70+
#define DPAA2_ETH_RX_BUF_SIZE \
71+
(DPAA2_ETH_RX_BUF_RAW_SIZE - DPAA2_ETH_RX_BUF_TAILROOM)
6972

7073
/* Hardware annotation area in RX/TX buffers */
7174
#define DPAA2_ETH_RX_HWA_SIZE 64
@@ -274,6 +277,7 @@ struct dpaa2_eth_priv;
274277
struct dpaa2_eth_fq {
275278
u32 fqid;
276279
u32 tx_qdbin;
280+
u32 tx_fqid;
277281
u16 flowid;
278282
int target_cpu;
279283
u32 dq_frames;
@@ -326,6 +330,9 @@ struct dpaa2_eth_priv {
326330

327331
u8 num_fqs;
328332
struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES];
333+
int (*enqueue)(struct dpaa2_eth_priv *priv,
334+
struct dpaa2_eth_fq *fq,
335+
struct dpaa2_fd *fd, u8 prio);
329336

330337
u8 num_channels;
331338
struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS];
@@ -343,7 +350,6 @@ struct dpaa2_eth_priv {
343350
bool rx_tstamp; /* Rx timestamping enabled */
344351

345352
u16 tx_qdid;
346-
u16 rx_buf_align;
347353
struct fsl_mc_io *mc_io;
348354
/* Cores which have an affine DPIO/DPCON.
349355
* This is the cpu set on which Rx and Tx conf frames are processed
@@ -418,15 +424,6 @@ enum dpaa2_eth_rx_dist {
418424
DPAA2_ETH_RX_DIST_CLS
419425
};
420426

421-
/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but the skb built around
422-
* the buffer also needs space for its shared info struct, and we need
423-
* to allocate enough to accommodate hardware alignment restrictions
424-
*/
425-
static inline unsigned int dpaa2_eth_buf_raw_size(struct dpaa2_eth_priv *priv)
426-
{
427-
return DPAA2_ETH_SKB_SIZE + priv->rx_buf_align;
428-
}
429-
430427
static inline
431428
unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
432429
struct sk_buff *skb)
@@ -451,8 +448,7 @@ unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
451448
*/
452449
static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
453450
{
454-
return priv->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN -
455-
DPAA2_ETH_RX_HWA_SIZE;
451+
return priv->tx_data_offset - DPAA2_ETH_RX_HWA_SIZE;
456452
}
457453

458454
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);

0 commit comments

Comments
 (0)