@@ -315,6 +315,9 @@ static u32 qede_xmit_type(struct qede_dev *edev,
315
315
(ipv6_hdr (skb )-> nexthdr == NEXTHDR_IPV6 ))
316
316
* ipv6_ext = 1 ;
317
317
318
+ if (skb -> encapsulation )
319
+ rc |= XMIT_ENC ;
320
+
318
321
if (skb_is_gso (skb ))
319
322
rc |= XMIT_LSO ;
320
323
@@ -376,6 +379,16 @@ static int map_frag_to_bd(struct qede_dev *edev,
376
379
return 0 ;
377
380
}
378
381
382
+ static u16 qede_get_skb_hlen (struct sk_buff * skb , bool is_encap_pkt )
383
+ {
384
+ if (is_encap_pkt )
385
+ return (skb_inner_transport_header (skb ) +
386
+ inner_tcp_hdrlen (skb ) - skb -> data );
387
+ else
388
+ return (skb_transport_header (skb ) +
389
+ tcp_hdrlen (skb ) - skb -> data );
390
+ }
391
+
379
392
/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
380
393
#if ((MAX_SKB_FRAGS + 2 ) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET )
381
394
static bool qede_pkt_req_lin (struct qede_dev * edev , struct sk_buff * skb ,
@@ -386,8 +399,7 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
386
399
if (xmit_type & XMIT_LSO ) {
387
400
int hlen ;
388
401
389
- hlen = skb_transport_header (skb ) +
390
- tcp_hdrlen (skb ) - skb -> data ;
402
+ hlen = qede_get_skb_hlen (skb , xmit_type & XMIT_ENC );
391
403
392
404
/* linear payload would require its own BD */
393
405
if (skb_headlen (skb ) > hlen )
@@ -495,7 +507,18 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
495
507
first_bd -> data .bd_flags .bitfields |=
496
508
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT ;
497
509
498
- first_bd -> data .bitfields |= cpu_to_le16 (temp );
510
+ if (xmit_type & XMIT_ENC ) {
511
+ first_bd -> data .bd_flags .bitfields |=
512
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT ;
513
+ } else {
514
+ /* In cases when OS doesn't indicate for inner offloads
515
+ * when packet is tunnelled, we need to override the HW
516
+ * tunnel configuration so that packets are treated as
517
+ * regular non tunnelled packets and no inner offloads
518
+ * are done by the hardware.
519
+ */
520
+ first_bd -> data .bitfields |= cpu_to_le16 (temp );
521
+ }
499
522
500
523
/* If the packet is IPv6 with extension header, indicate that
501
524
* to FW and pass few params, since the device cracker doesn't
@@ -511,10 +534,15 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
511
534
third_bd -> data .lso_mss =
512
535
cpu_to_le16 (skb_shinfo (skb )-> gso_size );
513
536
514
- first_bd -> data .bd_flags .bitfields |=
515
- 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT ;
516
- hlen = skb_transport_header (skb ) +
517
- tcp_hdrlen (skb ) - skb -> data ;
537
+ if (unlikely (xmit_type & XMIT_ENC )) {
538
+ first_bd -> data .bd_flags .bitfields |=
539
+ 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT ;
540
+ hlen = qede_get_skb_hlen (skb , true);
541
+ } else {
542
+ first_bd -> data .bd_flags .bitfields |=
543
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT ;
544
+ hlen = qede_get_skb_hlen (skb , false);
545
+ }
518
546
519
547
/* @@@TBD - if will not be removed need to check */
520
548
third_bd -> data .bitfields |=
@@ -848,6 +876,9 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
848
876
849
877
if (csum_flag & QEDE_CSUM_UNNECESSARY )
850
878
skb -> ip_summed = CHECKSUM_UNNECESSARY ;
879
+
880
+ if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY )
881
+ skb -> csum_level = 1 ;
851
882
}
852
883
853
884
static inline void qede_skb_receive (struct qede_dev * edev ,
@@ -1137,13 +1168,47 @@ static void qede_tpa_end(struct qede_dev *edev,
1137
1168
tpa_info -> skb = NULL ;
1138
1169
}
1139
1170
1140
- static u8 qede_check_csum (u16 flag )
1171
+ static bool qede_tunn_exist (u16 flag )
1172
+ {
1173
+ return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
1174
+ PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT ));
1175
+ }
1176
+
1177
+ static u8 qede_check_tunn_csum (u16 flag )
1178
+ {
1179
+ u16 csum_flag = 0 ;
1180
+ u8 tcsum = 0 ;
1181
+
1182
+ if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
1183
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT ))
1184
+ csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
1185
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT ;
1186
+
1187
+ if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1188
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT )) {
1189
+ csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
1190
+ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT ;
1191
+ tcsum = QEDE_TUNN_CSUM_UNNECESSARY ;
1192
+ }
1193
+
1194
+ csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
1195
+ PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
1196
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1197
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT ;
1198
+
1199
+ if (csum_flag & flag )
1200
+ return QEDE_CSUM_ERROR ;
1201
+
1202
+ return QEDE_CSUM_UNNECESSARY | tcsum ;
1203
+ }
1204
+
1205
+ static u8 qede_check_notunn_csum (u16 flag )
1141
1206
{
1142
1207
u16 csum_flag = 0 ;
1143
1208
u8 csum = 0 ;
1144
1209
1145
- if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1146
- PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT ) & flag ) {
1210
+ if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1211
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT )) {
1147
1212
csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
1148
1213
PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT ;
1149
1214
csum = QEDE_CSUM_UNNECESSARY ;
@@ -1158,6 +1223,14 @@ static u8 qede_check_csum(u16 flag)
1158
1223
return csum ;
1159
1224
}
1160
1225
1226
+ static u8 qede_check_csum (u16 flag )
1227
+ {
1228
+ if (!qede_tunn_exist (flag ))
1229
+ return qede_check_notunn_csum (flag );
1230
+ else
1231
+ return qede_check_tunn_csum (flag );
1232
+ }
1233
+
1161
1234
static int qede_rx_int (struct qede_fastpath * fp , int budget )
1162
1235
{
1163
1236
struct qede_dev * edev = fp -> edev ;
@@ -1987,6 +2060,14 @@ static void qede_init_ndev(struct qede_dev *edev)
1987
2060
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1988
2061
NETIF_F_TSO | NETIF_F_TSO6 ;
1989
2062
2063
+ /* Encap features*/
2064
+ hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
2065
+ NETIF_F_TSO_ECN ;
2066
+ ndev -> hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2067
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
2068
+ NETIF_F_TSO6 | NETIF_F_GSO_GRE |
2069
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM ;
2070
+
1990
2071
ndev -> vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
1991
2072
NETIF_F_HIGHDMA ;
1992
2073
ndev -> features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
0 commit comments