@@ -117,6 +117,7 @@ struct mlxsw_pci_queue {
117
117
struct {
118
118
u32 comp_sdq_count ;
119
119
u32 comp_rdq_count ;
120
+ enum mlxsw_pci_cqe_v v ;
120
121
} cq ;
121
122
struct {
122
123
u32 ev_cmd_count ;
@@ -155,6 +156,8 @@ struct mlxsw_pci {
155
156
} cmd ;
156
157
struct mlxsw_bus_info bus_info ;
157
158
const struct pci_device_id * id ;
159
+ enum mlxsw_pci_cqe_v max_cqe_ver ; /* Maximal supported CQE version */
160
+ u8 num_sdq_cqs ; /* Number of CQs used for SDQs */
158
161
};
159
162
160
163
static void mlxsw_pci_queue_tasklet_schedule (struct mlxsw_pci_queue * q )
@@ -202,24 +205,6 @@ static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
202
205
return owner_bit != !!(q -> consumer_counter & q -> count );
203
206
}
204
207
205
- static char *
206
- mlxsw_pci_queue_sw_elem_get (struct mlxsw_pci_queue * q ,
207
- u32 (* get_elem_owner_func )(const char * ))
208
- {
209
- struct mlxsw_pci_queue_elem_info * elem_info ;
210
- char * elem ;
211
- bool owner_bit ;
212
-
213
- elem_info = mlxsw_pci_queue_elem_info_consumer_get (q );
214
- elem = elem_info -> elem ;
215
- owner_bit = get_elem_owner_func (elem );
216
- if (mlxsw_pci_elem_hw_owned (q , owner_bit ))
217
- return NULL ;
218
- q -> consumer_counter ++ ;
219
- rmb (); /* make sure we read owned bit before the rest of elem */
220
- return elem ;
221
- }
222
-
223
208
static struct mlxsw_pci_queue_type_group *
224
209
mlxsw_pci_queue_type_group_get (struct mlxsw_pci * mlxsw_pci ,
225
210
enum mlxsw_pci_queue_type q_type )
@@ -494,6 +479,17 @@ static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
494
479
}
495
480
}
496
481
482
+ static void mlxsw_pci_cq_pre_init (struct mlxsw_pci * mlxsw_pci ,
483
+ struct mlxsw_pci_queue * q )
484
+ {
485
+ q -> u .cq .v = mlxsw_pci -> max_cqe_ver ;
486
+
487
+ /* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */
488
+ if (q -> u .cq .v == MLXSW_PCI_CQE_V2 &&
489
+ q -> num < mlxsw_pci -> num_sdq_cqs )
490
+ q -> u .cq .v = MLXSW_PCI_CQE_V1 ;
491
+ }
492
+
497
493
static int mlxsw_pci_cq_init (struct mlxsw_pci * mlxsw_pci , char * mbox ,
498
494
struct mlxsw_pci_queue * q )
499
495
{
@@ -505,10 +501,16 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
505
501
for (i = 0 ; i < q -> count ; i ++ ) {
506
502
char * elem = mlxsw_pci_queue_elem_get (q , i );
507
503
508
- mlxsw_pci_cqe_owner_set (elem , 1 );
504
+ mlxsw_pci_cqe_owner_set (q -> u . cq . v , elem , 1 );
509
505
}
510
506
511
- mlxsw_cmd_mbox_sw2hw_cq_cv_set (mbox , 0 ); /* CQE ver 0 */
507
+ if (q -> u .cq .v == MLXSW_PCI_CQE_V1 )
508
+ mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set (mbox ,
509
+ MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1 );
510
+ else if (q -> u .cq .v == MLXSW_PCI_CQE_V2 )
511
+ mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set (mbox ,
512
+ MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2 );
513
+
512
514
mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set (mbox , MLXSW_PCI_EQ_COMP_NUM );
513
515
mlxsw_cmd_mbox_sw2hw_cq_st_set (mbox , 0 );
514
516
mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set (mbox , ilog2 (q -> count ));
@@ -559,7 +561,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
559
561
static void mlxsw_pci_cqe_rdq_handle (struct mlxsw_pci * mlxsw_pci ,
560
562
struct mlxsw_pci_queue * q ,
561
563
u16 consumer_counter_limit ,
562
- char * cqe )
564
+ enum mlxsw_pci_cqe_v cqe_v , char * cqe )
563
565
{
564
566
struct pci_dev * pdev = mlxsw_pci -> pdev ;
565
567
struct mlxsw_pci_queue_elem_info * elem_info ;
@@ -579,10 +581,11 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
579
581
if (q -> consumer_counter ++ != consumer_counter_limit )
580
582
dev_dbg_ratelimited (& pdev -> dev , "Consumer counter does not match limit in RDQ\n" );
581
583
582
- if (mlxsw_pci_cqe_lag_get (cqe )) {
584
+ if (mlxsw_pci_cqe_lag_get (cqe_v , cqe )) {
583
585
rx_info .is_lag = true;
584
- rx_info .u .lag_id = mlxsw_pci_cqe_lag_id_get (cqe );
585
- rx_info .lag_port_index = mlxsw_pci_cqe_lag_port_index_get (cqe );
586
+ rx_info .u .lag_id = mlxsw_pci_cqe_lag_id_get (cqe_v , cqe );
587
+ rx_info .lag_port_index =
588
+ mlxsw_pci_cqe_lag_subport_get (cqe_v , cqe );
586
589
} else {
587
590
rx_info .is_lag = false;
588
591
rx_info .u .sys_port = mlxsw_pci_cqe_system_port_get (cqe );
@@ -591,7 +594,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
591
594
rx_info .trap_id = mlxsw_pci_cqe_trap_id_get (cqe );
592
595
593
596
byte_count = mlxsw_pci_cqe_byte_count_get (cqe );
594
- if (mlxsw_pci_cqe_crc_get (cqe ))
597
+ if (mlxsw_pci_cqe_crc_get (cqe_v , cqe ))
595
598
byte_count -= ETH_FCS_LEN ;
596
599
skb_put (skb , byte_count );
597
600
mlxsw_core_skb_receive (mlxsw_pci -> core , skb , & rx_info );
@@ -608,7 +611,18 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
608
611
609
612
static char * mlxsw_pci_cq_sw_cqe_get (struct mlxsw_pci_queue * q )
610
613
{
611
- return mlxsw_pci_queue_sw_elem_get (q , mlxsw_pci_cqe_owner_get );
614
+ struct mlxsw_pci_queue_elem_info * elem_info ;
615
+ char * elem ;
616
+ bool owner_bit ;
617
+
618
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get (q );
619
+ elem = elem_info -> elem ;
620
+ owner_bit = mlxsw_pci_cqe_owner_get (q -> u .cq .v , elem );
621
+ if (mlxsw_pci_elem_hw_owned (q , owner_bit ))
622
+ return NULL ;
623
+ q -> consumer_counter ++ ;
624
+ rmb (); /* make sure we read owned bit before the rest of elem */
625
+ return elem ;
612
626
}
613
627
614
628
static void mlxsw_pci_cq_tasklet (unsigned long data )
@@ -621,8 +635,8 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
621
635
622
636
while ((cqe = mlxsw_pci_cq_sw_cqe_get (q ))) {
623
637
u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get (cqe );
624
- u8 sendq = mlxsw_pci_cqe_sr_get (cqe );
625
- u8 dqn = mlxsw_pci_cqe_dqn_get (cqe );
638
+ u8 sendq = mlxsw_pci_cqe_sr_get (q -> u . cq . v , cqe );
639
+ u8 dqn = mlxsw_pci_cqe_dqn_get (q -> u . cq . v , cqe );
626
640
627
641
if (sendq ) {
628
642
struct mlxsw_pci_queue * sdq ;
@@ -636,7 +650,7 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
636
650
637
651
rdq = mlxsw_pci_rdq_get (mlxsw_pci , dqn );
638
652
mlxsw_pci_cqe_rdq_handle (mlxsw_pci , rdq ,
639
- wqe_counter , cqe );
653
+ wqe_counter , q -> u . cq . v , cqe );
640
654
q -> u .cq .comp_rdq_count ++ ;
641
655
}
642
656
if (++ items == credits )
@@ -648,6 +662,18 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
648
662
}
649
663
}
650
664
665
+ static u16 mlxsw_pci_cq_elem_count (const struct mlxsw_pci_queue * q )
666
+ {
667
+ return q -> u .cq .v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
668
+ MLXSW_PCI_CQE01_COUNT ;
669
+ }
670
+
671
+ static u8 mlxsw_pci_cq_elem_size (const struct mlxsw_pci_queue * q )
672
+ {
673
+ return q -> u .cq .v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
674
+ MLXSW_PCI_CQE01_SIZE ;
675
+ }
676
+
651
677
static int mlxsw_pci_eq_init (struct mlxsw_pci * mlxsw_pci , char * mbox ,
652
678
struct mlxsw_pci_queue * q )
653
679
{
@@ -696,7 +722,18 @@ static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
696
722
697
723
static char * mlxsw_pci_eq_sw_eqe_get (struct mlxsw_pci_queue * q )
698
724
{
699
- return mlxsw_pci_queue_sw_elem_get (q , mlxsw_pci_eqe_owner_get );
725
+ struct mlxsw_pci_queue_elem_info * elem_info ;
726
+ char * elem ;
727
+ bool owner_bit ;
728
+
729
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get (q );
730
+ elem = elem_info -> elem ;
731
+ owner_bit = mlxsw_pci_eqe_owner_get (elem );
732
+ if (mlxsw_pci_elem_hw_owned (q , owner_bit ))
733
+ return NULL ;
734
+ q -> consumer_counter ++ ;
735
+ rmb (); /* make sure we read owned bit before the rest of elem */
736
+ return elem ;
700
737
}
701
738
702
739
static void mlxsw_pci_eq_tasklet (unsigned long data )
@@ -749,11 +786,15 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
749
786
struct mlxsw_pci_queue_ops {
750
787
const char * name ;
751
788
enum mlxsw_pci_queue_type type ;
789
+ void (* pre_init )(struct mlxsw_pci * mlxsw_pci ,
790
+ struct mlxsw_pci_queue * q );
752
791
int (* init )(struct mlxsw_pci * mlxsw_pci , char * mbox ,
753
792
struct mlxsw_pci_queue * q );
754
793
void (* fini )(struct mlxsw_pci * mlxsw_pci ,
755
794
struct mlxsw_pci_queue * q );
756
795
void (* tasklet )(unsigned long data );
796
+ u16 (* elem_count_f )(const struct mlxsw_pci_queue * q );
797
+ u8 (* elem_size_f )(const struct mlxsw_pci_queue * q );
757
798
u16 elem_count ;
758
799
u8 elem_size ;
759
800
};
@@ -776,11 +817,12 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
776
817
777
818
static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
778
819
.type = MLXSW_PCI_QUEUE_TYPE_CQ ,
820
+ .pre_init = mlxsw_pci_cq_pre_init ,
779
821
.init = mlxsw_pci_cq_init ,
780
822
.fini = mlxsw_pci_cq_fini ,
781
823
.tasklet = mlxsw_pci_cq_tasklet ,
782
- .elem_count = MLXSW_PCI_CQE_COUNT ,
783
- .elem_size = MLXSW_PCI_CQE_SIZE
824
+ .elem_count_f = mlxsw_pci_cq_elem_count ,
825
+ .elem_size_f = mlxsw_pci_cq_elem_size
784
826
};
785
827
786
828
static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
@@ -800,10 +842,15 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
800
842
int i ;
801
843
int err ;
802
844
803
- spin_lock_init (& q -> lock );
804
845
q -> num = q_num ;
805
- q -> count = q_ops -> elem_count ;
806
- q -> elem_size = q_ops -> elem_size ;
846
+ if (q_ops -> pre_init )
847
+ q_ops -> pre_init (mlxsw_pci , q );
848
+
849
+ spin_lock_init (& q -> lock );
850
+ q -> count = q_ops -> elem_count_f ? q_ops -> elem_count_f (q ) :
851
+ q_ops -> elem_count ;
852
+ q -> elem_size = q_ops -> elem_size_f ? q_ops -> elem_size_f (q ) :
853
+ q_ops -> elem_size ;
807
854
q -> type = q_ops -> type ;
808
855
q -> pci = mlxsw_pci ;
809
856
@@ -832,7 +879,7 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
832
879
833
880
elem_info = mlxsw_pci_queue_elem_info_get (q , i );
834
881
elem_info -> elem =
835
- __mlxsw_pci_queue_elem_get (q , q_ops -> elem_size , i );
882
+ __mlxsw_pci_queue_elem_get (q , q -> elem_size , i );
836
883
}
837
884
838
885
mlxsw_cmd_mbox_zero (mbox );
@@ -912,6 +959,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
912
959
u8 rdq_log2sz ;
913
960
u8 num_cqs ;
914
961
u8 cq_log2sz ;
962
+ u8 cqv2_log2sz ;
915
963
u8 num_eqs ;
916
964
u8 eq_log2sz ;
917
965
int err ;
@@ -927,6 +975,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
927
975
rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get (mbox );
928
976
num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get (mbox );
929
977
cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get (mbox );
978
+ cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get (mbox );
930
979
num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get (mbox );
931
980
eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get (mbox );
932
981
@@ -938,12 +987,16 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
938
987
939
988
if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT ) ||
940
989
(1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT ) ||
941
- (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT ) ||
990
+ (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT ) ||
991
+ (mlxsw_pci -> max_cqe_ver == MLXSW_PCI_CQE_V2 &&
992
+ (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT )) ||
942
993
(1 << eq_log2sz != MLXSW_PCI_EQE_COUNT )) {
943
994
dev_err (& pdev -> dev , "Unsupported number of async queue descriptors\n" );
944
995
return - EINVAL ;
945
996
}
946
997
998
+ mlxsw_pci -> num_sdq_cqs = num_sdqs ;
999
+
947
1000
err = mlxsw_pci_queue_group_init (mlxsw_pci , mbox , & mlxsw_pci_eq_ops ,
948
1001
num_eqs );
949
1002
if (err ) {
@@ -1184,6 +1237,11 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
1184
1237
mlxsw_pci_config_profile_swid_config (mlxsw_pci , mbox , i ,
1185
1238
& profile -> swid_config [i ]);
1186
1239
1240
+ if (mlxsw_pci -> max_cqe_ver > MLXSW_PCI_CQE_V0 ) {
1241
+ mlxsw_cmd_mbox_config_profile_set_cqe_version_set (mbox , 1 );
1242
+ mlxsw_cmd_mbox_config_profile_cqe_version_set (mbox , 1 );
1243
+ }
1244
+
1187
1245
return mlxsw_cmd_config_profile_set (mlxsw_pci -> core , mbox );
1188
1246
}
1189
1247
@@ -1378,6 +1436,21 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
1378
1436
if (err )
1379
1437
goto err_query_resources ;
1380
1438
1439
+ if (MLXSW_CORE_RES_VALID (mlxsw_core , CQE_V2 ) &&
1440
+ MLXSW_CORE_RES_GET (mlxsw_core , CQE_V2 ))
1441
+ mlxsw_pci -> max_cqe_ver = MLXSW_PCI_CQE_V2 ;
1442
+ else if (MLXSW_CORE_RES_VALID (mlxsw_core , CQE_V1 ) &&
1443
+ MLXSW_CORE_RES_GET (mlxsw_core , CQE_V1 ))
1444
+ mlxsw_pci -> max_cqe_ver = MLXSW_PCI_CQE_V1 ;
1445
+ else if ((MLXSW_CORE_RES_VALID (mlxsw_core , CQE_V0 ) &&
1446
+ MLXSW_CORE_RES_GET (mlxsw_core , CQE_V0 )) ||
1447
+ !MLXSW_CORE_RES_VALID (mlxsw_core , CQE_V0 )) {
1448
+ mlxsw_pci -> max_cqe_ver = MLXSW_PCI_CQE_V0 ;
1449
+ } else {
1450
+ dev_err (& pdev -> dev , "Invalid supported CQE version combination reported\n" );
1451
+ goto err_cqe_v_check ;
1452
+ }
1453
+
1381
1454
err = mlxsw_pci_config_profile (mlxsw_pci , mbox , profile , res );
1382
1455
if (err )
1383
1456
goto err_config_profile ;
@@ -1400,6 +1473,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
1400
1473
mlxsw_pci_aqs_fini (mlxsw_pci );
1401
1474
err_aqs_init :
1402
1475
err_config_profile :
1476
+ err_cqe_v_check :
1403
1477
err_query_resources :
1404
1478
err_boardinfo :
1405
1479
mlxsw_pci_fw_area_fini (mlxsw_pci );
0 commit comments