Skip to content

Commit 14db81d

Browse files
Manish Chopradavem330
authored andcommitted
qede: Add fastpath support for tunneling
This patch enables netdev tunneling features and adds TX/RX fastpath support for tunneling in driver. Signed-off-by: Manish Chopra <manish.chopra@qlogic.com> Signed-off-by: Yuval Mintz <Yuval.Mintz@qlogic.com> Signed-off-by: Ariel Elior <Ariel.Elior@qlogic.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent f798586 commit 14db81d

File tree

2 files changed

+92
-10
lines changed

2 files changed

+92
-10
lines changed

drivers/net/ethernet/qlogic/qede/qede.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -290,6 +290,7 @@ struct qede_fastpath {
290290

291291
#define QEDE_CSUM_ERROR BIT(0)
292292
#define QEDE_CSUM_UNNECESSARY BIT(1)
293+
#define QEDE_TUNN_CSUM_UNNECESSARY BIT(2)
293294

294295
#define QEDE_SP_RX_MODE 1
295296
#define QEDE_SP_VXLAN_PORT_CONFIG 2

drivers/net/ethernet/qlogic/qede/qede_main.c

Lines changed: 91 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -315,6 +315,9 @@ static u32 qede_xmit_type(struct qede_dev *edev,
315315
(ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
316316
*ipv6_ext = 1;
317317

318+
if (skb->encapsulation)
319+
rc |= XMIT_ENC;
320+
318321
if (skb_is_gso(skb))
319322
rc |= XMIT_LSO;
320323

@@ -376,6 +379,16 @@ static int map_frag_to_bd(struct qede_dev *edev,
376379
return 0;
377380
}
378381

382+
static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
383+
{
384+
if (is_encap_pkt)
385+
return (skb_inner_transport_header(skb) +
386+
inner_tcp_hdrlen(skb) - skb->data);
387+
else
388+
return (skb_transport_header(skb) +
389+
tcp_hdrlen(skb) - skb->data);
390+
}
391+
379392
/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
380393
#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
381394
static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
@@ -386,8 +399,7 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
386399
if (xmit_type & XMIT_LSO) {
387400
int hlen;
388401

389-
hlen = skb_transport_header(skb) +
390-
tcp_hdrlen(skb) - skb->data;
402+
hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
391403

392404
/* linear payload would require its own BD */
393405
if (skb_headlen(skb) > hlen)
@@ -495,7 +507,18 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
495507
first_bd->data.bd_flags.bitfields |=
496508
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
497509

498-
first_bd->data.bitfields |= cpu_to_le16(temp);
510+
if (xmit_type & XMIT_ENC) {
511+
first_bd->data.bd_flags.bitfields |=
512+
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
513+
} else {
514+
/* In cases when OS doesn't indicate for inner offloads
515+
* when packet is tunnelled, we need to override the HW
516+
* tunnel configuration so that packets are treated as
517+
* regular non tunnelled packets and no inner offloads
518+
* are done by the hardware.
519+
*/
520+
first_bd->data.bitfields |= cpu_to_le16(temp);
521+
}
499522

500523
/* If the packet is IPv6 with extension header, indicate that
501524
* to FW and pass few params, since the device cracker doesn't
@@ -511,10 +534,15 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
511534
third_bd->data.lso_mss =
512535
cpu_to_le16(skb_shinfo(skb)->gso_size);
513536

514-
first_bd->data.bd_flags.bitfields |=
515-
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
516-
hlen = skb_transport_header(skb) +
517-
tcp_hdrlen(skb) - skb->data;
537+
if (unlikely(xmit_type & XMIT_ENC)) {
538+
first_bd->data.bd_flags.bitfields |=
539+
1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
540+
hlen = qede_get_skb_hlen(skb, true);
541+
} else {
542+
first_bd->data.bd_flags.bitfields |=
543+
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
544+
hlen = qede_get_skb_hlen(skb, false);
545+
}
518546

519547
/* @@@TBD - if will not be removed need to check */
520548
third_bd->data.bitfields |=
@@ -848,6 +876,9 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
848876

849877
if (csum_flag & QEDE_CSUM_UNNECESSARY)
850878
skb->ip_summed = CHECKSUM_UNNECESSARY;
879+
880+
if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY)
881+
skb->csum_level = 1;
851882
}
852883

853884
static inline void qede_skb_receive(struct qede_dev *edev,
@@ -1137,13 +1168,47 @@ static void qede_tpa_end(struct qede_dev *edev,
11371168
tpa_info->skb = NULL;
11381169
}
11391170

1140-
static u8 qede_check_csum(u16 flag)
1171+
static bool qede_tunn_exist(u16 flag)
1172+
{
1173+
return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
1174+
PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
1175+
}
1176+
1177+
static u8 qede_check_tunn_csum(u16 flag)
1178+
{
1179+
u16 csum_flag = 0;
1180+
u8 tcsum = 0;
1181+
1182+
if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
1183+
PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
1184+
csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
1185+
PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
1186+
1187+
if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1188+
PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
1189+
csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
1190+
PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
1191+
tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
1192+
}
1193+
1194+
csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
1195+
PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
1196+
PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1197+
PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
1198+
1199+
if (csum_flag & flag)
1200+
return QEDE_CSUM_ERROR;
1201+
1202+
return QEDE_CSUM_UNNECESSARY | tcsum;
1203+
}
1204+
1205+
static u8 qede_check_notunn_csum(u16 flag)
11411206
{
11421207
u16 csum_flag = 0;
11431208
u8 csum = 0;
11441209

1145-
if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1146-
PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
1210+
if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1211+
PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
11471212
csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
11481213
PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
11491214
csum = QEDE_CSUM_UNNECESSARY;
@@ -1158,6 +1223,14 @@ static u8 qede_check_csum(u16 flag)
11581223
return csum;
11591224
}
11601225

1226+
static u8 qede_check_csum(u16 flag)
1227+
{
1228+
if (!qede_tunn_exist(flag))
1229+
return qede_check_notunn_csum(flag);
1230+
else
1231+
return qede_check_tunn_csum(flag);
1232+
}
1233+
11611234
static int qede_rx_int(struct qede_fastpath *fp, int budget)
11621235
{
11631236
struct qede_dev *edev = fp->edev;
@@ -1987,6 +2060,14 @@ static void qede_init_ndev(struct qede_dev *edev)
19872060
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
19882061
NETIF_F_TSO | NETIF_F_TSO6;
19892062

2063+
/* Encap features*/
2064+
hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
2065+
NETIF_F_TSO_ECN;
2066+
ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2067+
NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
2068+
NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2069+
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM;
2070+
19902071
ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
19912072
NETIF_F_HIGHDMA;
19922073
ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |

0 commit comments

Comments
 (0)