@@ -444,6 +444,18 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
444
444
EF10_DMA_STAT (rx_align_error , RX_ALIGN_ERROR_PKTS ),
445
445
EF10_DMA_STAT (rx_length_error , RX_LENGTH_ERROR_PKTS ),
446
446
EF10_DMA_STAT (rx_nodesc_drops , RX_NODESC_DROPS ),
447
+ EF10_DMA_STAT (rx_pm_trunc_bb_overflow , PM_TRUNC_BB_OVERFLOW ),
448
+ EF10_DMA_STAT (rx_pm_discard_bb_overflow , PM_DISCARD_BB_OVERFLOW ),
449
+ EF10_DMA_STAT (rx_pm_trunc_vfifo_full , PM_TRUNC_VFIFO_FULL ),
450
+ EF10_DMA_STAT (rx_pm_discard_vfifo_full , PM_DISCARD_VFIFO_FULL ),
451
+ EF10_DMA_STAT (rx_pm_trunc_qbb , PM_TRUNC_QBB ),
452
+ EF10_DMA_STAT (rx_pm_discard_qbb , PM_DISCARD_QBB ),
453
+ EF10_DMA_STAT (rx_pm_discard_mapping , PM_DISCARD_MAPPING ),
454
+ EF10_DMA_STAT (rx_dp_q_disabled_packets , RXDP_Q_DISABLED_PKTS ),
455
+ EF10_DMA_STAT (rx_dp_di_dropped_packets , RXDP_DI_DROPPED_PKTS ),
456
+ EF10_DMA_STAT (rx_dp_streaming_packets , RXDP_STREAMING_PKTS ),
457
+ EF10_DMA_STAT (rx_dp_emerg_fetch , RXDP_EMERGENCY_FETCH_CONDITIONS ),
458
+ EF10_DMA_STAT (rx_dp_emerg_wait , RXDP_EMERGENCY_WAIT_CONDITIONS ),
447
459
};
448
460
449
461
#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
@@ -498,53 +510,82 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
498
510
#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
499
511
(1ULL << EF10_STAT_rx_length_error))
500
512
501
- #if BITS_PER_LONG == 64
502
- #define STAT_MASK_BITMAP (bits ) (bits)
503
- #else
504
- #define STAT_MASK_BITMAP (bits ) (bits) & 0xffffffff, (bits) >> 32
505
- #endif
506
-
507
- static const unsigned long * efx_ef10_stat_mask (struct efx_nic * efx )
508
- {
509
- static const unsigned long hunt_40g_stat_mask [] = {
510
- STAT_MASK_BITMAP (HUNT_COMMON_STAT_MASK |
511
- HUNT_40G_EXTRA_STAT_MASK )
512
- };
513
- static const unsigned long hunt_10g_only_stat_mask [] = {
514
- STAT_MASK_BITMAP (HUNT_COMMON_STAT_MASK |
515
- HUNT_10G_ONLY_STAT_MASK )
516
- };
513
+ /* These statistics are only provided if the firmware supports the
514
+ * capability PM_AND_RXDP_COUNTERS.
515
+ */
516
+ #define HUNT_PM_AND_RXDP_STAT_MASK ( \
517
+ (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \
518
+ (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \
519
+ (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \
520
+ (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \
521
+ (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \
522
+ (1ULL << EF10_STAT_rx_pm_discard_qbb) | \
523
+ (1ULL << EF10_STAT_rx_pm_discard_mapping) | \
524
+ (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
525
+ (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
526
+ (1ULL << EF10_STAT_rx_dp_streaming_packets) | \
527
+ (1ULL << EF10_STAT_rx_dp_emerg_fetch) | \
528
+ (1ULL << EF10_STAT_rx_dp_emerg_wait))
529
+
530
+ static u64 efx_ef10_raw_stat_mask (struct efx_nic * efx )
531
+ {
532
+ u64 raw_mask = HUNT_COMMON_STAT_MASK ;
517
533
u32 port_caps = efx_mcdi_phy_get_caps (efx );
534
+ struct efx_ef10_nic_data * nic_data = efx -> nic_data ;
518
535
519
536
if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN ))
520
- return hunt_40g_stat_mask ;
537
+ raw_mask |= HUNT_40G_EXTRA_STAT_MASK ;
521
538
else
522
- return hunt_10g_only_stat_mask ;
539
+ raw_mask |= HUNT_10G_ONLY_STAT_MASK ;
540
+
541
+ if (nic_data -> datapath_caps &
542
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN ))
543
+ raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK ;
544
+
545
+ return raw_mask ;
546
+ }
547
+
548
+ static void efx_ef10_get_stat_mask (struct efx_nic * efx , unsigned long * mask )
549
+ {
550
+ u64 raw_mask = efx_ef10_raw_stat_mask (efx );
551
+
552
+ #if BITS_PER_LONG == 64
553
+ mask [0 ] = raw_mask ;
554
+ #else
555
+ mask [0 ] = raw_mask & 0xffffffff ;
556
+ mask [1 ] = raw_mask >> 32 ;
557
+ #endif
523
558
}
524
559
525
560
static size_t efx_ef10_describe_stats (struct efx_nic * efx , u8 * names )
526
561
{
562
+ DECLARE_BITMAP (mask , EF10_STAT_COUNT );
563
+
564
+ efx_ef10_get_stat_mask (efx , mask );
527
565
return efx_nic_describe_stats (efx_ef10_stat_desc , EF10_STAT_COUNT ,
528
- efx_ef10_stat_mask ( efx ) , names );
566
+ mask , names );
529
567
}
530
568
531
569
static int efx_ef10_try_update_nic_stats (struct efx_nic * efx )
532
570
{
533
571
struct efx_ef10_nic_data * nic_data = efx -> nic_data ;
534
- const unsigned long * stats_mask = efx_ef10_stat_mask ( efx );
572
+ DECLARE_BITMAP ( mask , EF10_STAT_COUNT );
535
573
__le64 generation_start , generation_end ;
536
574
u64 * stats = nic_data -> stats ;
537
575
__le64 * dma_stats ;
538
576
577
+ efx_ef10_get_stat_mask (efx , mask );
578
+
539
579
dma_stats = efx -> stats_buffer .addr ;
540
580
nic_data = efx -> nic_data ;
541
581
542
582
generation_end = dma_stats [MC_CMD_MAC_GENERATION_END ];
543
583
if (generation_end == EFX_MC_STATS_GENERATION_INVALID )
544
584
return 0 ;
545
585
rmb ();
546
- efx_nic_update_stats (efx_ef10_stat_desc , EF10_STAT_COUNT , stats_mask ,
586
+ efx_nic_update_stats (efx_ef10_stat_desc , EF10_STAT_COUNT , mask ,
547
587
stats , efx -> stats_buffer .addr , false);
588
+ rmb ();
548
589
generation_start = dma_stats [MC_CMD_MAC_GENERATION_START ];
549
590
if (generation_end != generation_start )
550
591
return - EAGAIN ;
@@ -563,12 +604,14 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
563
604
static size_t efx_ef10_update_stats (struct efx_nic * efx , u64 * full_stats ,
564
605
struct rtnl_link_stats64 * core_stats )
565
606
{
566
- const unsigned long * mask = efx_ef10_stat_mask ( efx );
607
+ DECLARE_BITMAP ( mask , EF10_STAT_COUNT );
567
608
struct efx_ef10_nic_data * nic_data = efx -> nic_data ;
568
609
u64 * stats = nic_data -> stats ;
569
610
size_t stats_count = 0 , index ;
570
611
int retry ;
571
612
613
+ efx_ef10_get_stat_mask (efx , mask );
614
+
572
615
/* If we're unlucky enough to read statistics during the DMA, wait
573
616
* up to 10ms for it to finish (typically takes <500us)
574
617
*/
0 commit comments