Skip to content

Commit b76550b

Browse files
jpirkodavem330
authored andcommitted
mlxsw: pci: Introduce helpers to work with multiple CQE versions
Introduce definitions of fields in CQE version 1 and 2. Also, introduce common helpers that would call appropriate version-specific helpers according to the version enum passed. Signed-off-by: Jiri Pirko <jiri@mellanox.com> Signed-off-by: Ido Schimmel <idosch@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 9b934a3 commit b76550b

File tree

2 files changed

+104
-42
lines changed

2 files changed

+104
-42
lines changed

drivers/net/ethernet/mellanox/mlxsw/pci.c

Lines changed: 40 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,7 @@ struct mlxsw_pci_queue {
117117
struct {
118118
u32 comp_sdq_count;
119119
u32 comp_rdq_count;
120+
enum mlxsw_pci_cqe_v v;
120121
} cq;
121122
struct {
122123
u32 ev_cmd_count;
@@ -202,24 +203,6 @@ static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
202203
return owner_bit != !!(q->consumer_counter & q->count);
203204
}
204205

205-
static char *
206-
mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
207-
u32 (*get_elem_owner_func)(const char *))
208-
{
209-
struct mlxsw_pci_queue_elem_info *elem_info;
210-
char *elem;
211-
bool owner_bit;
212-
213-
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
214-
elem = elem_info->elem;
215-
owner_bit = get_elem_owner_func(elem);
216-
if (mlxsw_pci_elem_hw_owned(q, owner_bit))
217-
return NULL;
218-
q->consumer_counter++;
219-
rmb(); /* make sure we read owned bit before the rest of elem */
220-
return elem;
221-
}
222-
223206
static struct mlxsw_pci_queue_type_group *
224207
mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
225208
enum mlxsw_pci_queue_type q_type)
@@ -505,7 +488,7 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
505488
for (i = 0; i < q->count; i++) {
506489
char *elem = mlxsw_pci_queue_elem_get(q, i);
507490

508-
mlxsw_pci_cqe_owner_set(elem, 1);
491+
mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
509492
}
510493

511494
mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
@@ -559,7 +542,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
559542
static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
560543
struct mlxsw_pci_queue *q,
561544
u16 consumer_counter_limit,
562-
char *cqe)
545+
enum mlxsw_pci_cqe_v cqe_v, char *cqe)
563546
{
564547
struct pci_dev *pdev = mlxsw_pci->pdev;
565548
struct mlxsw_pci_queue_elem_info *elem_info;
@@ -579,10 +562,11 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
579562
if (q->consumer_counter++ != consumer_counter_limit)
580563
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
581564

582-
if (mlxsw_pci_cqe_lag_get(cqe)) {
565+
if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
583566
rx_info.is_lag = true;
584-
rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe);
585-
rx_info.lag_port_index = mlxsw_pci_cqe_lag_port_index_get(cqe);
567+
rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
568+
rx_info.lag_port_index =
569+
mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
586570
} else {
587571
rx_info.is_lag = false;
588572
rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
@@ -591,7 +575,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
591575
rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
592576

593577
byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
594-
if (mlxsw_pci_cqe_crc_get(cqe))
578+
if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
595579
byte_count -= ETH_FCS_LEN;
596580
skb_put(skb, byte_count);
597581
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
@@ -608,7 +592,18 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
608592

609593
static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
610594
{
611-
return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get);
595+
struct mlxsw_pci_queue_elem_info *elem_info;
596+
char *elem;
597+
bool owner_bit;
598+
599+
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
600+
elem = elem_info->elem;
601+
owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
602+
if (mlxsw_pci_elem_hw_owned(q, owner_bit))
603+
return NULL;
604+
q->consumer_counter++;
605+
rmb(); /* make sure we read owned bit before the rest of elem */
606+
return elem;
612607
}
613608

614609
static void mlxsw_pci_cq_tasklet(unsigned long data)
@@ -621,8 +616,8 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
621616

622617
while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
623618
u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
624-
u8 sendq = mlxsw_pci_cqe_sr_get(cqe);
625-
u8 dqn = mlxsw_pci_cqe_dqn_get(cqe);
619+
u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
620+
u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
626621

627622
if (sendq) {
628623
struct mlxsw_pci_queue *sdq;
@@ -636,7 +631,7 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
636631

637632
rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
638633
mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
639-
wqe_counter, cqe);
634+
wqe_counter, q->u.cq.v, cqe);
640635
q->u.cq.comp_rdq_count++;
641636
}
642637
if (++items == credits)
@@ -696,7 +691,18 @@ static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
696691

697692
static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
698693
{
699-
return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get);
694+
struct mlxsw_pci_queue_elem_info *elem_info;
695+
char *elem;
696+
bool owner_bit;
697+
698+
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
699+
elem = elem_info->elem;
700+
owner_bit = mlxsw_pci_eqe_owner_get(elem);
701+
if (mlxsw_pci_elem_hw_owned(q, owner_bit))
702+
return NULL;
703+
q->consumer_counter++;
704+
rmb(); /* make sure we read owned bit before the rest of elem */
705+
return elem;
700706
}
701707

702708
static void mlxsw_pci_eq_tasklet(unsigned long data)
@@ -779,8 +785,8 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
779785
.init = mlxsw_pci_cq_init,
780786
.fini = mlxsw_pci_cq_fini,
781787
.tasklet = mlxsw_pci_cq_tasklet,
782-
.elem_count = MLXSW_PCI_CQE_COUNT,
783-
.elem_size = MLXSW_PCI_CQE_SIZE
788+
.elem_count = MLXSW_PCI_CQE01_COUNT,
789+
.elem_size = MLXSW_PCI_CQE01_SIZE
784790
};
785791

786792
static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
@@ -800,6 +806,8 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
800806
int i;
801807
int err;
802808

809+
q->u.cq.v = MLXSW_PCI_CQE_V0;
810+
803811
spin_lock_init(&q->lock);
804812
q->num = q_num;
805813
q->count = q_ops->elem_count;
@@ -938,7 +946,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
938946

939947
if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
940948
(1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
941-
(1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) ||
949+
(1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) ||
942950
(1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
943951
dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
944952
return -EINVAL;

drivers/net/ethernet/mellanox/mlxsw/pci_hw.h

Lines changed: 64 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -82,10 +82,12 @@
8282
#define MLXSW_PCI_AQ_PAGES 8
8383
#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
8484
#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
85-
#define MLXSW_PCI_CQE_SIZE 16 /* 16 bytes per element */
85+
#define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */
86+
#define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */
8687
#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
8788
#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
88-
#define MLXSW_PCI_CQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE)
89+
#define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE)
90+
#define MLXSW_PCI_CQE2_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE2_SIZE)
8991
#define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE)
9092
#define MLXSW_PCI_EQE_UPDATE_COUNT 0x80
9193

@@ -126,10 +128,48 @@ MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false);
126128
*/
127129
MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
128130

131+
enum mlxsw_pci_cqe_v {
132+
MLXSW_PCI_CQE_V0,
133+
MLXSW_PCI_CQE_V1,
134+
MLXSW_PCI_CQE_V2,
135+
};
136+
137+
#define mlxsw_pci_cqe_item_helpers(name, v0, v1, v2) \
138+
static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe) \
139+
{ \
140+
switch (v) { \
141+
default: \
142+
case MLXSW_PCI_CQE_V0: \
143+
return mlxsw_pci_cqe##v0##_##name##_get(cqe); \
144+
case MLXSW_PCI_CQE_V1: \
145+
return mlxsw_pci_cqe##v1##_##name##_get(cqe); \
146+
case MLXSW_PCI_CQE_V2: \
147+
return mlxsw_pci_cqe##v2##_##name##_get(cqe); \
148+
} \
149+
} \
150+
static inline void mlxsw_pci_cqe_##name##_set(enum mlxsw_pci_cqe_v v, \
151+
char *cqe, u32 val) \
152+
{ \
153+
switch (v) { \
154+
default: \
155+
case MLXSW_PCI_CQE_V0: \
156+
mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \
157+
break; \
158+
case MLXSW_PCI_CQE_V1: \
159+
mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \
160+
break; \
161+
case MLXSW_PCI_CQE_V2: \
162+
mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \
163+
break; \
164+
} \
165+
}
166+
129167
/* pci_cqe_lag
130168
* Packet arrives from a port which is a LAG
131169
*/
132-
MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
170+
MLXSW_ITEM32(pci, cqe0, lag, 0x00, 23, 1);
171+
MLXSW_ITEM32(pci, cqe12, lag, 0x00, 24, 1);
172+
mlxsw_pci_cqe_item_helpers(lag, 0, 12, 12);
133173

134174
/* pci_cqe_system_port/lag_id
135175
* When lag=0: System port on which the packet was received
@@ -138,8 +178,12 @@ MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
138178
* bits [3:0] sub_port on which the packet was received
139179
*/
140180
MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
141-
MLXSW_ITEM32(pci, cqe, lag_id, 0x00, 4, 12);
142-
MLXSW_ITEM32(pci, cqe, lag_port_index, 0x00, 0, 4);
181+
MLXSW_ITEM32(pci, cqe0, lag_id, 0x00, 4, 12);
182+
MLXSW_ITEM32(pci, cqe12, lag_id, 0x00, 0, 16);
183+
mlxsw_pci_cqe_item_helpers(lag_id, 0, 12, 12);
184+
MLXSW_ITEM32(pci, cqe0, lag_subport, 0x00, 0, 4);
185+
MLXSW_ITEM32(pci, cqe12, lag_subport, 0x00, 16, 8);
186+
mlxsw_pci_cqe_item_helpers(lag_subport, 0, 12, 12);
143187

144188
/* pci_cqe_wqe_counter
145189
* WQE count of the WQEs completed on the associated dqn
@@ -162,28 +206,38 @@ MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 9);
162206
* Length include CRC. Indicates the length field includes
163207
* the packet's CRC.
164208
*/
165-
MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1);
209+
MLXSW_ITEM32(pci, cqe0, crc, 0x0C, 8, 1);
210+
MLXSW_ITEM32(pci, cqe12, crc, 0x0C, 9, 1);
211+
mlxsw_pci_cqe_item_helpers(crc, 0, 12, 12);
166212

167213
/* pci_cqe_e
168214
* CQE with Error.
169215
*/
170-
MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1);
216+
MLXSW_ITEM32(pci, cqe0, e, 0x0C, 7, 1);
217+
MLXSW_ITEM32(pci, cqe12, e, 0x00, 27, 1);
218+
mlxsw_pci_cqe_item_helpers(e, 0, 12, 12);
171219

172220
/* pci_cqe_sr
173221
* 1 - Send Queue
174222
* 0 - Receive Queue
175223
*/
176-
MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1);
224+
MLXSW_ITEM32(pci, cqe0, sr, 0x0C, 6, 1);
225+
MLXSW_ITEM32(pci, cqe12, sr, 0x00, 26, 1);
226+
mlxsw_pci_cqe_item_helpers(sr, 0, 12, 12);
177227

178228
/* pci_cqe_dqn
179229
* Descriptor Queue (DQ) Number.
180230
*/
181-
MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5);
231+
MLXSW_ITEM32(pci, cqe0, dqn, 0x0C, 1, 5);
232+
MLXSW_ITEM32(pci, cqe12, dqn, 0x0C, 1, 6);
233+
mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12);
182234

183235
/* pci_cqe_owner
184236
* Ownership bit.
185237
*/
186-
MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1);
238+
MLXSW_ITEM32(pci, cqe01, owner, 0x0C, 0, 1);
239+
MLXSW_ITEM32(pci, cqe2, owner, 0x1C, 0, 1);
240+
mlxsw_pci_cqe_item_helpers(owner, 01, 01, 2);
187241

188242
/* pci_eqe_event_type
189243
* Event type.

0 commit comments

Comments
 (0)