Skip to content

Commit 993feee

Browse files
committed
Merge branch 'qed-tunneling-offload'
Manish Chopra says: ==================== qed/qede: Add tunneling support This patch series adds support for VXLAN, GRE and GENEVE tunnels to be used over this driver. With this support, adapter can perform TSO offload, inner/outer checksums offloads on TX and RX for encapsulated packets. V1->V2 [ Comments from Jesse Gross incorporated ] * Drop general infrastructure change patch. "net: Make vxlan/geneve default udp ports public" * Remove by default Linux default UDP ports configurations in driver. Instead, use general registration APIs for UDP port configurations * Removing .ndo_features_check - we will add it later with proper change. Please consider applying this series to net-next. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
2 parents ee1c279 + 14db81d commit 993feee

File tree

14 files changed

+811
-16
lines changed

14 files changed

+811
-16
lines changed

drivers/net/ethernet/qlogic/Kconfig

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -103,4 +103,25 @@ config QEDE
103103
depends on QED
104104
---help---
105105
This enables the support for ...
106+
107+
config QEDE_VXLAN
108+
bool "Virtual eXtensible Local Area Network support"
109+
default n
110+
depends on QEDE && VXLAN && !(QEDE=y && VXLAN=m)
111+
---help---
112+
This enables hardware offload support for VXLAN protocol over
113+
qede module. Say Y here if you want to enable hardware offload
114+
support for Virtual eXtensible Local Area Network (VXLAN)
115+
in the driver.
116+
117+
config QEDE_GENEVE
118+
bool "Generic Network Virtualization Encapsulation (GENEVE) support"
119+
depends on QEDE && GENEVE && !(QEDE=y && GENEVE=m)
120+
---help---
121+
This allows one to create GENEVE virtual interfaces that provide
122+
Layer 2 Networks over Layer 3 Networks. GENEVE is often used
123+
to tunnel virtual network infrastructure in virtualized environments.
124+
Say Y here if you want to enable hardware offload support for
125+
Generic Network Virtualization Encapsulation (GENEVE) in the driver.
126+
106127
endif # NET_VENDOR_QLOGIC

drivers/net/ethernet/qlogic/qed/qed.h

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,51 @@ struct qed_rt_data {
7474
bool *b_valid;
7575
};
7676

77+
enum qed_tunn_mode {
78+
QED_MODE_L2GENEVE_TUNN,
79+
QED_MODE_IPGENEVE_TUNN,
80+
QED_MODE_L2GRE_TUNN,
81+
QED_MODE_IPGRE_TUNN,
82+
QED_MODE_VXLAN_TUNN,
83+
};
84+
85+
enum qed_tunn_clss {
86+
QED_TUNN_CLSS_MAC_VLAN,
87+
QED_TUNN_CLSS_MAC_VNI,
88+
QED_TUNN_CLSS_INNER_MAC_VLAN,
89+
QED_TUNN_CLSS_INNER_MAC_VNI,
90+
MAX_QED_TUNN_CLSS,
91+
};
92+
93+
struct qed_tunn_start_params {
94+
unsigned long tunn_mode;
95+
u16 vxlan_udp_port;
96+
u16 geneve_udp_port;
97+
u8 update_vxlan_udp_port;
98+
u8 update_geneve_udp_port;
99+
u8 tunn_clss_vxlan;
100+
u8 tunn_clss_l2geneve;
101+
u8 tunn_clss_ipgeneve;
102+
u8 tunn_clss_l2gre;
103+
u8 tunn_clss_ipgre;
104+
};
105+
106+
struct qed_tunn_update_params {
107+
unsigned long tunn_mode_update_mask;
108+
unsigned long tunn_mode;
109+
u16 vxlan_udp_port;
110+
u16 geneve_udp_port;
111+
u8 update_rx_pf_clss;
112+
u8 update_tx_pf_clss;
113+
u8 update_vxlan_udp_port;
114+
u8 update_geneve_udp_port;
115+
u8 tunn_clss_vxlan;
116+
u8 tunn_clss_l2geneve;
117+
u8 tunn_clss_ipgeneve;
118+
u8 tunn_clss_l2gre;
119+
u8 tunn_clss_ipgre;
120+
};
121+
77122
/* The PCI personality is not quite synonymous to protocol ID:
78123
* 1. All personalities need CORE connections
79124
* 2. The Ethernet personality may support also the RoCE protocol
@@ -430,6 +475,7 @@ struct qed_dev {
430475
u8 num_hwfns;
431476
struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
432477

478+
unsigned long tunn_mode;
433479
u32 drv_type;
434480

435481
struct qed_eth_stats *reset_stats;

drivers/net/ethernet/qlogic/qed/qed_dev.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -558,6 +558,7 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
558558

559559
static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
560560
struct qed_ptt *p_ptt,
561+
struct qed_tunn_start_params *p_tunn,
561562
int hw_mode,
562563
bool b_hw_start,
563564
enum qed_int_mode int_mode,
@@ -625,7 +626,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
625626
qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
626627

627628
/* send function start command */
628-
rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
629+
rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode);
629630
if (rc)
630631
DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
631632
}
@@ -672,6 +673,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
672673
}
673674

674675
int qed_hw_init(struct qed_dev *cdev,
676+
struct qed_tunn_start_params *p_tunn,
675677
bool b_hw_start,
676678
enum qed_int_mode int_mode,
677679
bool allow_npar_tx_switch,
@@ -724,7 +726,7 @@ int qed_hw_init(struct qed_dev *cdev,
724726
/* Fall into */
725727
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
726728
rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
727-
p_hwfn->hw_info.hw_mode,
729+
p_tunn, p_hwfn->hw_info.hw_mode,
728730
b_hw_start, int_mode,
729731
allow_npar_tx_switch);
730732
break;

drivers/net/ethernet/qlogic/qed/qed_dev_api.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,7 @@ void qed_resc_setup(struct qed_dev *cdev);
6262
* @brief qed_hw_init -
6363
*
6464
* @param cdev
65+
* @param p_tunn
6566
* @param b_hw_start
6667
* @param int_mode - interrupt mode [msix, inta, etc.] to use.
6768
* @param allow_npar_tx_switch - npar tx switching to be used
@@ -72,6 +73,7 @@ void qed_resc_setup(struct qed_dev *cdev);
7273
* @return int
7374
*/
7475
int qed_hw_init(struct qed_dev *cdev,
76+
struct qed_tunn_start_params *p_tunn,
7577
bool b_hw_start,
7678
enum qed_int_mode int_mode,
7779
bool allow_npar_tx_switch,

drivers/net/ethernet/qlogic/qed/qed_hsi.h

Lines changed: 50 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ enum common_ramrod_cmd_id {
4646
COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
4747
COMMON_RAMROD_RESERVED,
4848
COMMON_RAMROD_RESERVED2,
49-
COMMON_RAMROD_RESERVED3,
49+
COMMON_RAMROD_PF_UPDATE,
5050
COMMON_RAMROD_EMPTY,
5151
MAX_COMMON_RAMROD_CMD_ID
5252
};
@@ -626,6 +626,42 @@ struct pf_start_ramrod_data {
626626
u8 reserved0[4];
627627
};
628628

629+
/* tunnel configuration */
630+
struct pf_update_tunnel_config {
631+
u8 update_rx_pf_clss;
632+
u8 update_tx_pf_clss;
633+
u8 set_vxlan_udp_port_flg;
634+
u8 set_geneve_udp_port_flg;
635+
u8 tx_enable_vxlan;
636+
u8 tx_enable_l2geneve;
637+
u8 tx_enable_ipgeneve;
638+
u8 tx_enable_l2gre;
639+
u8 tx_enable_ipgre;
640+
u8 tunnel_clss_vxlan;
641+
u8 tunnel_clss_l2geneve;
642+
u8 tunnel_clss_ipgeneve;
643+
u8 tunnel_clss_l2gre;
644+
u8 tunnel_clss_ipgre;
645+
__le16 vxlan_udp_port;
646+
__le16 geneve_udp_port;
647+
__le16 reserved[3];
648+
};
649+
650+
struct pf_update_ramrod_data {
651+
u32 reserved[2];
652+
u32 reserved_1[6];
653+
struct pf_update_tunnel_config tunnel_config;
654+
};
655+
656+
/* Tunnel classification scheme */
657+
enum tunnel_clss {
658+
TUNNEL_CLSS_MAC_VLAN = 0,
659+
TUNNEL_CLSS_MAC_VNI,
660+
TUNNEL_CLSS_INNER_MAC_VLAN,
661+
TUNNEL_CLSS_INNER_MAC_VNI,
662+
MAX_TUNNEL_CLSS
663+
};
664+
629665
enum ports_mode {
630666
ENGX2_PORTX1 /* 2 engines x 1 port */,
631667
ENGX2_PORTX2 /* 2 engines x 2 ports */,
@@ -1603,6 +1639,19 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
16031639
u16 start_pq,
16041640
u16 num_pqs);
16051641

1642+
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
1643+
struct qed_ptt *p_ptt, u16 dest_port);
1644+
void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
1645+
struct qed_ptt *p_ptt, bool vxlan_enable);
1646+
void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
1647+
struct qed_ptt *p_ptt, bool eth_gre_enable,
1648+
bool ip_gre_enable);
1649+
void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
1650+
struct qed_ptt *p_ptt, u16 dest_port);
1651+
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
1652+
struct qed_ptt *p_ptt, bool eth_geneve_enable,
1653+
bool ip_geneve_enable);
1654+
16061655
/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
16071656
#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
16081657
#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)

drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c

Lines changed: 127 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -788,3 +788,130 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
788788

789789
return true;
790790
}
791+
792+
static void
793+
qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
794+
{
795+
if (enable)
796+
set_bit(bit, var);
797+
else
798+
clear_bit(bit, var);
799+
}
800+
801+
#define PRS_ETH_TUNN_FIC_FORMAT -188897008
802+
803+
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
804+
struct qed_ptt *p_ptt,
805+
u16 dest_port)
806+
{
807+
qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
808+
qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port);
809+
qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
810+
}
811+
812+
void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
813+
struct qed_ptt *p_ptt,
814+
bool vxlan_enable)
815+
{
816+
unsigned long reg_val = 0;
817+
u8 shift;
818+
819+
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
820+
shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
821+
qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
822+
823+
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
824+
825+
if (reg_val)
826+
qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
827+
PRS_ETH_TUNN_FIC_FORMAT);
828+
829+
reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
830+
shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
831+
qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
832+
833+
qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
834+
835+
qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
836+
vxlan_enable ? 1 : 0);
837+
}
838+
839+
void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
840+
bool eth_gre_enable, bool ip_gre_enable)
841+
{
842+
unsigned long reg_val = 0;
843+
u8 shift;
844+
845+
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
846+
shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
847+
qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
848+
849+
shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
850+
qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
851+
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
852+
if (reg_val)
853+
qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
854+
PRS_ETH_TUNN_FIC_FORMAT);
855+
856+
reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
857+
shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
858+
qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
859+
860+
shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
861+
qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
862+
qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
863+
864+
qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
865+
eth_gre_enable ? 1 : 0);
866+
qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
867+
ip_gre_enable ? 1 : 0);
868+
}
869+
870+
void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
871+
struct qed_ptt *p_ptt,
872+
u16 dest_port)
873+
{
874+
qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
875+
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
876+
qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
877+
}
878+
879+
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
880+
struct qed_ptt *p_ptt,
881+
bool eth_geneve_enable,
882+
bool ip_geneve_enable)
883+
{
884+
unsigned long reg_val = 0;
885+
u8 shift;
886+
887+
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
888+
shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
889+
qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);
890+
891+
shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
892+
qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);
893+
894+
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
895+
if (reg_val)
896+
qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
897+
PRS_ETH_TUNN_FIC_FORMAT);
898+
899+
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
900+
eth_geneve_enable ? 1 : 0);
901+
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
902+
903+
/* comp ver */
904+
reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
905+
qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
906+
qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
907+
qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
908+
909+
/* EDPM with geneve tunnel not supported in BB_B0 */
910+
if (QED_IS_BB_B0(p_hwfn->cdev))
911+
return;
912+
913+
qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
914+
eth_geneve_enable ? 1 : 0);
915+
qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
916+
ip_geneve_enable ? 1 : 0);
917+
}

drivers/net/ethernet/qlogic/qed/qed_l2.c

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1884,6 +1884,36 @@ static int qed_stop_txq(struct qed_dev *cdev,
18841884
return 0;
18851885
}
18861886

1887+
static int qed_tunn_configure(struct qed_dev *cdev,
1888+
struct qed_tunn_params *tunn_params)
1889+
{
1890+
struct qed_tunn_update_params tunn_info;
1891+
int i, rc;
1892+
1893+
memset(&tunn_info, 0, sizeof(tunn_info));
1894+
if (tunn_params->update_vxlan_port == 1) {
1895+
tunn_info.update_vxlan_udp_port = 1;
1896+
tunn_info.vxlan_udp_port = tunn_params->vxlan_port;
1897+
}
1898+
1899+
if (tunn_params->update_geneve_port == 1) {
1900+
tunn_info.update_geneve_udp_port = 1;
1901+
tunn_info.geneve_udp_port = tunn_params->geneve_port;
1902+
}
1903+
1904+
for_each_hwfn(cdev, i) {
1905+
struct qed_hwfn *hwfn = &cdev->hwfns[i];
1906+
1907+
rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
1908+
QED_SPQ_MODE_EBLOCK, NULL);
1909+
1910+
if (rc)
1911+
return rc;
1912+
}
1913+
1914+
return 0;
1915+
}
1916+
18871917
static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
18881918
enum qed_filter_rx_mode_type type)
18891919
{
@@ -2026,6 +2056,7 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
20262056
.fastpath_stop = &qed_fastpath_stop,
20272057
.eth_cqe_completion = &qed_fp_cqe_completion,
20282058
.get_vport_stats = &qed_get_vport_stats,
2059+
.tunn_config = &qed_tunn_configure,
20292060
};
20302061

20312062
const struct qed_eth_ops *qed_get_eth_ops(void)

0 commit comments

Comments
 (0)