Skip to content

Commit 0e913f2

Browse files
committed
Merge branch 'mlxsw-Introduce-support-for-CQEv1-2'
Ido Schimmel says: ==================== mlxsw: Introduce support for CQEv1/2 Jiri says: Current SwitchX2 and Spectrum FWs support CQEv0 and that is what we implement in mlxsw. Spectrum FW also supports CQE v1 and v2. However, Spectrum-2 won't support CQEv0. Prepare for it and setup the CQE versions to use according to what is queried from FW. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
2 parents faa1cd8 + 4110768 commit 0e913f2

File tree

4 files changed

+208
-51
lines changed

4 files changed

+208
-51
lines changed

drivers/net/ethernet/mellanox/mlxsw/cmd.h

Lines changed: 27 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -424,10 +424,15 @@ MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_rdq_sz, 0x04, 24, 8);
424424
MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8);
425425

426426
/* cmd_mbox_query_aq_cap_log_max_cq_sz
427-
* Log (base 2) of max CQEs allowed on CQ.
427+
* Log (base 2) of the Maximum CQEs allowed in a CQ for CQEv0 and CQEv1.
428428
*/
429429
MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8);
430430

431+
/* cmd_mbox_query_aq_cap_log_max_cqv2_sz
432+
* Log (base 2) of the Maximum CQEs allowed in a CQ for CQEv2.
433+
*/
434+
MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cqv2_sz, 0x08, 16, 8);
435+
431436
/* cmd_mbox_query_aq_cap_max_num_cqs
432437
* Maximum number of CQs.
433438
*/
@@ -662,6 +667,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_single_size, 0x0C, 25, 1);
662667
*/
663668
MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1);
664669

670+
/* cmd_mbox_config_set_cqe_version
671+
* Capability bit. Setting a bit to 1 configures the profile
672+
* according to the mailbox contents.
673+
*/
674+
MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_version, 0x08, 0, 1);
675+
665676
/* cmd_mbox_config_profile_max_vepa_channels
666677
* Maximum number of VEPA channels per port (0 through 16)
667678
* 0 - multi-channel VEPA is disabled
@@ -841,6 +852,14 @@ MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type,
841852
MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
842853
0x60, 0, 8, 0x08, 0x00, false);
843854

855+
/* cmd_mbox_config_profile_cqe_version
856+
* CQE version:
857+
* 0: CQE version is 0
858+
* 1: CQE version is either 1 or 2
859+
* CQE ver 1 or 2 is configured by Completion Queue Context field cqe_ver.
860+
*/
861+
MLXSW_ITEM32(cmd_mbox, config_profile, cqe_version, 0xB0, 0, 8);
862+
844863
/* ACCESS_REG - Access EMAD Supported Register
845864
* ----------------------------------
846865
* OpMod == 0 (N/A), INMmod == 0 (N/A)
@@ -1032,11 +1051,15 @@ static inline int mlxsw_cmd_sw2hw_cq(struct mlxsw_core *mlxsw_core,
10321051
0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
10331052
}
10341053

1035-
/* cmd_mbox_sw2hw_cq_cv
1054+
enum mlxsw_cmd_mbox_sw2hw_cq_cqe_ver {
1055+
MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1,
1056+
MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2,
1057+
};
1058+
1059+
/* cmd_mbox_sw2hw_cq_cqe_ver
10361060
* CQE Version.
1037-
* 0 - CQE Version 0, 1 - CQE Version 1
10381061
*/
1039-
MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4);
1062+
MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cqe_ver, 0x00, 28, 4);
10401063

10411064
/* cmd_mbox_sw2hw_cq_c_eqn
10421065
* Event Queue this CQ reports completion events to.

drivers/net/ethernet/mellanox/mlxsw/pci.c

Lines changed: 111 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,7 @@ struct mlxsw_pci_queue {
117117
struct {
118118
u32 comp_sdq_count;
119119
u32 comp_rdq_count;
120+
enum mlxsw_pci_cqe_v v;
120121
} cq;
121122
struct {
122123
u32 ev_cmd_count;
@@ -155,6 +156,8 @@ struct mlxsw_pci {
155156
} cmd;
156157
struct mlxsw_bus_info bus_info;
157158
const struct pci_device_id *id;
159+
enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
160+
u8 num_sdq_cqs; /* Number of CQs used for SDQs */
158161
};
159162

160163
static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
@@ -202,24 +205,6 @@ static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
202205
return owner_bit != !!(q->consumer_counter & q->count);
203206
}
204207

205-
static char *
206-
mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
207-
u32 (*get_elem_owner_func)(const char *))
208-
{
209-
struct mlxsw_pci_queue_elem_info *elem_info;
210-
char *elem;
211-
bool owner_bit;
212-
213-
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
214-
elem = elem_info->elem;
215-
owner_bit = get_elem_owner_func(elem);
216-
if (mlxsw_pci_elem_hw_owned(q, owner_bit))
217-
return NULL;
218-
q->consumer_counter++;
219-
rmb(); /* make sure we read owned bit before the rest of elem */
220-
return elem;
221-
}
222-
223208
static struct mlxsw_pci_queue_type_group *
224209
mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
225210
enum mlxsw_pci_queue_type q_type)
@@ -494,6 +479,17 @@ static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
494479
}
495480
}
496481

482+
static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
483+
struct mlxsw_pci_queue *q)
484+
{
485+
q->u.cq.v = mlxsw_pci->max_cqe_ver;
486+
487+
/* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */
488+
if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
489+
q->num < mlxsw_pci->num_sdq_cqs)
490+
q->u.cq.v = MLXSW_PCI_CQE_V1;
491+
}
492+
497493
static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
498494
struct mlxsw_pci_queue *q)
499495
{
@@ -505,10 +501,16 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
505501
for (i = 0; i < q->count; i++) {
506502
char *elem = mlxsw_pci_queue_elem_get(q, i);
507503

508-
mlxsw_pci_cqe_owner_set(elem, 1);
504+
mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
509505
}
510506

511-
mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
507+
if (q->u.cq.v == MLXSW_PCI_CQE_V1)
508+
mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
509+
MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
510+
else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
511+
mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
512+
MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
513+
512514
mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
513515
mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
514516
mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
@@ -559,7 +561,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
559561
static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
560562
struct mlxsw_pci_queue *q,
561563
u16 consumer_counter_limit,
562-
char *cqe)
564+
enum mlxsw_pci_cqe_v cqe_v, char *cqe)
563565
{
564566
struct pci_dev *pdev = mlxsw_pci->pdev;
565567
struct mlxsw_pci_queue_elem_info *elem_info;
@@ -579,10 +581,11 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
579581
if (q->consumer_counter++ != consumer_counter_limit)
580582
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
581583

582-
if (mlxsw_pci_cqe_lag_get(cqe)) {
584+
if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
583585
rx_info.is_lag = true;
584-
rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe);
585-
rx_info.lag_port_index = mlxsw_pci_cqe_lag_port_index_get(cqe);
586+
rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
587+
rx_info.lag_port_index =
588+
mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
586589
} else {
587590
rx_info.is_lag = false;
588591
rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
@@ -591,7 +594,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
591594
rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
592595

593596
byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
594-
if (mlxsw_pci_cqe_crc_get(cqe))
597+
if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
595598
byte_count -= ETH_FCS_LEN;
596599
skb_put(skb, byte_count);
597600
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
@@ -608,7 +611,18 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
608611

609612
static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
610613
{
611-
return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get);
614+
struct mlxsw_pci_queue_elem_info *elem_info;
615+
char *elem;
616+
bool owner_bit;
617+
618+
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
619+
elem = elem_info->elem;
620+
owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
621+
if (mlxsw_pci_elem_hw_owned(q, owner_bit))
622+
return NULL;
623+
q->consumer_counter++;
624+
rmb(); /* make sure we read owned bit before the rest of elem */
625+
return elem;
612626
}
613627

614628
static void mlxsw_pci_cq_tasklet(unsigned long data)
@@ -621,8 +635,8 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
621635

622636
while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
623637
u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
624-
u8 sendq = mlxsw_pci_cqe_sr_get(cqe);
625-
u8 dqn = mlxsw_pci_cqe_dqn_get(cqe);
638+
u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
639+
u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
626640

627641
if (sendq) {
628642
struct mlxsw_pci_queue *sdq;
@@ -636,7 +650,7 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
636650

637651
rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
638652
mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
639-
wqe_counter, cqe);
653+
wqe_counter, q->u.cq.v, cqe);
640654
q->u.cq.comp_rdq_count++;
641655
}
642656
if (++items == credits)
@@ -648,6 +662,18 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
648662
}
649663
}
650664

665+
static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
666+
{
667+
return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
668+
MLXSW_PCI_CQE01_COUNT;
669+
}
670+
671+
static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
672+
{
673+
return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
674+
MLXSW_PCI_CQE01_SIZE;
675+
}
676+
651677
static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
652678
struct mlxsw_pci_queue *q)
653679
{
@@ -696,7 +722,18 @@ static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
696722

697723
static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
698724
{
699-
return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get);
725+
struct mlxsw_pci_queue_elem_info *elem_info;
726+
char *elem;
727+
bool owner_bit;
728+
729+
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
730+
elem = elem_info->elem;
731+
owner_bit = mlxsw_pci_eqe_owner_get(elem);
732+
if (mlxsw_pci_elem_hw_owned(q, owner_bit))
733+
return NULL;
734+
q->consumer_counter++;
735+
rmb(); /* make sure we read owned bit before the rest of elem */
736+
return elem;
700737
}
701738

702739
static void mlxsw_pci_eq_tasklet(unsigned long data)
@@ -749,11 +786,15 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
749786
struct mlxsw_pci_queue_ops {
750787
const char *name;
751788
enum mlxsw_pci_queue_type type;
789+
void (*pre_init)(struct mlxsw_pci *mlxsw_pci,
790+
struct mlxsw_pci_queue *q);
752791
int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
753792
struct mlxsw_pci_queue *q);
754793
void (*fini)(struct mlxsw_pci *mlxsw_pci,
755794
struct mlxsw_pci_queue *q);
756795
void (*tasklet)(unsigned long data);
796+
u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
797+
u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
757798
u16 elem_count;
758799
u8 elem_size;
759800
};
@@ -776,11 +817,12 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
776817

777818
static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
778819
.type = MLXSW_PCI_QUEUE_TYPE_CQ,
820+
.pre_init = mlxsw_pci_cq_pre_init,
779821
.init = mlxsw_pci_cq_init,
780822
.fini = mlxsw_pci_cq_fini,
781823
.tasklet = mlxsw_pci_cq_tasklet,
782-
.elem_count = MLXSW_PCI_CQE_COUNT,
783-
.elem_size = MLXSW_PCI_CQE_SIZE
824+
.elem_count_f = mlxsw_pci_cq_elem_count,
825+
.elem_size_f = mlxsw_pci_cq_elem_size
784826
};
785827

786828
static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
@@ -800,10 +842,15 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
800842
int i;
801843
int err;
802844

803-
spin_lock_init(&q->lock);
804845
q->num = q_num;
805-
q->count = q_ops->elem_count;
806-
q->elem_size = q_ops->elem_size;
846+
if (q_ops->pre_init)
847+
q_ops->pre_init(mlxsw_pci, q);
848+
849+
spin_lock_init(&q->lock);
850+
q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) :
851+
q_ops->elem_count;
852+
q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) :
853+
q_ops->elem_size;
807854
q->type = q_ops->type;
808855
q->pci = mlxsw_pci;
809856

@@ -832,7 +879,7 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
832879

833880
elem_info = mlxsw_pci_queue_elem_info_get(q, i);
834881
elem_info->elem =
835-
__mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i);
882+
__mlxsw_pci_queue_elem_get(q, q->elem_size, i);
836883
}
837884

838885
mlxsw_cmd_mbox_zero(mbox);
@@ -912,6 +959,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
912959
u8 rdq_log2sz;
913960
u8 num_cqs;
914961
u8 cq_log2sz;
962+
u8 cqv2_log2sz;
915963
u8 num_eqs;
916964
u8 eq_log2sz;
917965
int err;
@@ -927,6 +975,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
927975
rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
928976
num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
929977
cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
978+
cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox);
930979
num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
931980
eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
932981

@@ -938,12 +987,16 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
938987

939988
if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
940989
(1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
941-
(1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) ||
990+
(1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) ||
991+
(mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 &&
992+
(1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) ||
942993
(1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
943994
dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
944995
return -EINVAL;
945996
}
946997

998+
mlxsw_pci->num_sdq_cqs = num_sdqs;
999+
9471000
err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
9481001
num_eqs);
9491002
if (err) {
@@ -1184,6 +1237,11 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
11841237
mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
11851238
&profile->swid_config[i]);
11861239

1240+
if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) {
1241+
mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1);
1242+
mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
1243+
}
1244+
11871245
return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
11881246
}
11891247

@@ -1378,6 +1436,21 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
13781436
if (err)
13791437
goto err_query_resources;
13801438

1439+
if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) &&
1440+
MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2))
1441+
mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2;
1442+
else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) &&
1443+
MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1))
1444+
mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1;
1445+
else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) &&
1446+
MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) ||
1447+
!MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) {
1448+
mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0;
1449+
} else {
1450+
dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n");
1451+
goto err_cqe_v_check;
1452+
}
1453+
13811454
err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
13821455
if (err)
13831456
goto err_config_profile;
@@ -1400,6 +1473,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
14001473
mlxsw_pci_aqs_fini(mlxsw_pci);
14011474
err_aqs_init:
14021475
err_config_profile:
1476+
err_cqe_v_check:
14031477
err_query_resources:
14041478
err_boardinfo:
14051479
mlxsw_pci_fw_area_fini(mlxsw_pci);

0 commit comments

Comments
 (0)