@@ -526,22 +526,18 @@ static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
526
526
struct ixgbevf_q_vector * q_vector =
527
527
container_of (napi , struct ixgbevf_q_vector , napi );
528
528
struct ixgbevf_adapter * adapter = q_vector -> adapter ;
529
- struct ixgbevf_ring * rx_ring = NULL ;
530
529
int work_done = 0 ;
531
- long r_idx ;
532
530
533
- r_idx = find_first_bit (q_vector -> rxr_idx , adapter -> num_rx_queues );
534
- rx_ring = & (adapter -> rx_ring [r_idx ]);
535
-
536
- ixgbevf_clean_rx_irq (q_vector , rx_ring , & work_done , budget );
531
+ ixgbevf_clean_rx_irq (q_vector , q_vector -> rx .ring , & work_done , budget );
537
532
538
533
/* If all Rx work done, exit the polling mode */
539
534
if (work_done < budget ) {
540
535
napi_complete (napi );
541
536
if (adapter -> itr_setting & 1 )
542
537
ixgbevf_set_itr_msix (q_vector );
543
538
if (!test_bit (__IXGBEVF_DOWN , & adapter -> state ))
544
- ixgbevf_irq_enable_queues (adapter , rx_ring -> v_idx );
539
+ ixgbevf_irq_enable_queues (adapter ,
540
+ 1 << q_vector -> v_idx );
545
541
}
546
542
547
543
return work_done ;
@@ -560,34 +556,25 @@ static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
560
556
struct ixgbevf_q_vector * q_vector =
561
557
container_of (napi , struct ixgbevf_q_vector , napi );
562
558
struct ixgbevf_adapter * adapter = q_vector -> adapter ;
563
- struct ixgbevf_ring * rx_ring = NULL ;
564
- int work_done = 0 , i ;
565
- long r_idx ;
566
- u64 enable_mask = 0 ;
559
+ struct ixgbevf_ring * rx_ring ;
560
+ int work_done = 0 ;
567
561
568
562
/* attempt to distribute budget to each queue fairly, but don't allow
569
563
* the budget to go below 1 because we'll exit polling */
570
- budget /= (q_vector -> rxr_count ?: 1 );
564
+ budget /= (q_vector -> rx . count ?: 1 );
571
565
budget = max (budget , 1 );
572
- r_idx = find_first_bit (q_vector -> rxr_idx , adapter -> num_rx_queues );
573
- for (i = 0 ; i < q_vector -> rxr_count ; i ++ ) {
574
- rx_ring = & (adapter -> rx_ring [r_idx ]);
575
- ixgbevf_clean_rx_irq (q_vector , rx_ring , & work_done , budget );
576
- enable_mask |= rx_ring -> v_idx ;
577
- r_idx = find_next_bit (q_vector -> rxr_idx , adapter -> num_rx_queues ,
578
- r_idx + 1 );
579
- }
580
566
581
- r_idx = find_first_bit ( q_vector -> rxr_idx , adapter -> num_rx_queues );
582
- rx_ring = & ( adapter -> rx_ring [ r_idx ] );
567
+ ixgbevf_for_each_ring ( rx_ring , q_vector -> rx )
568
+ ixgbevf_clean_rx_irq ( q_vector , rx_ring , & work_done , budget );
583
569
584
570
/* If all Rx work done, exit the polling mode */
585
571
if (work_done < budget ) {
586
572
napi_complete (napi );
587
573
if (adapter -> itr_setting & 1 )
588
574
ixgbevf_set_itr_msix (q_vector );
589
575
if (!test_bit (__IXGBEVF_DOWN , & adapter -> state ))
590
- ixgbevf_irq_enable_queues (adapter , enable_mask );
576
+ ixgbevf_irq_enable_queues (adapter ,
577
+ 1 << q_vector -> v_idx );
591
578
}
592
579
593
580
return work_done ;
@@ -605,7 +592,7 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
605
592
{
606
593
struct ixgbevf_q_vector * q_vector ;
607
594
struct ixgbe_hw * hw = & adapter -> hw ;
608
- int i , j , q_vectors , v_idx , r_idx ;
595
+ int q_vectors , v_idx ;
609
596
u32 mask ;
610
597
611
598
q_vectors = adapter -> num_msix_vectors - NON_Q_VECTORS ;
@@ -615,33 +602,19 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
615
602
* corresponding register.
616
603
*/
617
604
for (v_idx = 0 ; v_idx < q_vectors ; v_idx ++ ) {
605
+ struct ixgbevf_ring * ring ;
618
606
q_vector = adapter -> q_vector [v_idx ];
619
- /* XXX for_each_set_bit(...) */
620
- r_idx = find_first_bit (q_vector -> rxr_idx ,
621
- adapter -> num_rx_queues );
622
-
623
- for (i = 0 ; i < q_vector -> rxr_count ; i ++ ) {
624
- j = adapter -> rx_ring [r_idx ].reg_idx ;
625
- ixgbevf_set_ivar (adapter , 0 , j , v_idx );
626
- r_idx = find_next_bit (q_vector -> rxr_idx ,
627
- adapter -> num_rx_queues ,
628
- r_idx + 1 );
629
- }
630
- r_idx = find_first_bit (q_vector -> txr_idx ,
631
- adapter -> num_tx_queues );
632
-
633
- for (i = 0 ; i < q_vector -> txr_count ; i ++ ) {
634
- j = adapter -> tx_ring [r_idx ].reg_idx ;
635
- ixgbevf_set_ivar (adapter , 1 , j , v_idx );
636
- r_idx = find_next_bit (q_vector -> txr_idx ,
637
- adapter -> num_tx_queues ,
638
- r_idx + 1 );
639
- }
607
+
608
+ ixgbevf_for_each_ring (ring , q_vector -> rx )
609
+ ixgbevf_set_ivar (adapter , 0 , ring -> reg_idx , v_idx );
610
+
611
+ ixgbevf_for_each_ring (ring , q_vector -> tx )
612
+ ixgbevf_set_ivar (adapter , 1 , ring -> reg_idx , v_idx );
640
613
641
614
/* if this is a tx only vector halve the interrupt rate */
642
- if (q_vector -> txr_count && !q_vector -> rxr_count )
615
+ if (q_vector -> tx . ring && !q_vector -> rx . ring )
643
616
q_vector -> eitr = (adapter -> eitr_param >> 1 );
644
- else if (q_vector -> rxr_count )
617
+ else if (q_vector -> rx . ring )
645
618
/* rx only */
646
619
q_vector -> eitr = adapter -> eitr_param ;
647
620
@@ -752,40 +725,32 @@ static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
752
725
struct ixgbevf_adapter * adapter = q_vector -> adapter ;
753
726
u32 new_itr ;
754
727
u8 current_itr , ret_itr ;
755
- int i , r_idx , v_idx = q_vector -> v_idx ;
728
+ int v_idx = q_vector -> v_idx ;
756
729
struct ixgbevf_ring * rx_ring , * tx_ring ;
757
730
758
- r_idx = find_first_bit (q_vector -> txr_idx , adapter -> num_tx_queues );
759
- for (i = 0 ; i < q_vector -> txr_count ; i ++ ) {
760
- tx_ring = & (adapter -> tx_ring [r_idx ]);
731
+ ixgbevf_for_each_ring (tx_ring , q_vector -> tx ) {
761
732
ret_itr = ixgbevf_update_itr (adapter , q_vector -> eitr ,
762
- q_vector -> tx_itr ,
733
+ q_vector -> tx . itr ,
763
734
tx_ring -> total_packets ,
764
735
tx_ring -> total_bytes );
765
736
/* if the result for this queue would decrease interrupt
766
737
* rate for this vector then use that result */
767
- q_vector -> tx_itr = ((q_vector -> tx_itr > ret_itr ) ?
768
- q_vector -> tx_itr - 1 : ret_itr );
769
- r_idx = find_next_bit (q_vector -> txr_idx , adapter -> num_tx_queues ,
770
- r_idx + 1 );
738
+ q_vector -> tx .itr = ((q_vector -> tx .itr > ret_itr ) ?
739
+ q_vector -> tx .itr - 1 : ret_itr );
771
740
}
772
741
773
- r_idx = find_first_bit (q_vector -> rxr_idx , adapter -> num_rx_queues );
774
- for (i = 0 ; i < q_vector -> rxr_count ; i ++ ) {
775
- rx_ring = & (adapter -> rx_ring [r_idx ]);
742
+ ixgbevf_for_each_ring (rx_ring , q_vector -> rx ) {
776
743
ret_itr = ixgbevf_update_itr (adapter , q_vector -> eitr ,
777
- q_vector -> rx_itr ,
744
+ q_vector -> rx . itr ,
778
745
rx_ring -> total_packets ,
779
746
rx_ring -> total_bytes );
780
747
/* if the result for this queue would decrease interrupt
781
748
* rate for this vector then use that result */
782
- q_vector -> rx_itr = ((q_vector -> rx_itr > ret_itr ) ?
783
- q_vector -> rx_itr - 1 : ret_itr );
784
- r_idx = find_next_bit (q_vector -> rxr_idx , adapter -> num_rx_queues ,
785
- r_idx + 1 );
749
+ q_vector -> rx .itr = ((q_vector -> rx .itr > ret_itr ) ?
750
+ q_vector -> rx .itr - 1 : ret_itr );
786
751
}
787
752
788
- current_itr = max (q_vector -> rx_itr , q_vector -> tx_itr );
753
+ current_itr = max (q_vector -> rx . itr , q_vector -> tx . itr );
789
754
790
755
switch (current_itr ) {
791
756
/* counts and packets in update_itr are dependent on these numbers */
@@ -861,19 +826,14 @@ static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
861
826
struct ixgbevf_q_vector * q_vector = data ;
862
827
struct ixgbevf_adapter * adapter = q_vector -> adapter ;
863
828
struct ixgbevf_ring * tx_ring ;
864
- int i , r_idx ;
865
829
866
- if (!q_vector -> txr_count )
830
+ if (!q_vector -> tx . ring )
867
831
return IRQ_HANDLED ;
868
832
869
- r_idx = find_first_bit (q_vector -> txr_idx , adapter -> num_tx_queues );
870
- for (i = 0 ; i < q_vector -> txr_count ; i ++ ) {
871
- tx_ring = & (adapter -> tx_ring [r_idx ]);
833
+ ixgbevf_for_each_ring (tx_ring , q_vector -> tx ) {
872
834
tx_ring -> total_bytes = 0 ;
873
835
tx_ring -> total_packets = 0 ;
874
836
ixgbevf_clean_tx_irq (adapter , tx_ring );
875
- r_idx = find_next_bit (q_vector -> txr_idx , adapter -> num_tx_queues ,
876
- r_idx + 1 );
877
837
}
878
838
879
839
if (adapter -> itr_setting & 1 )
@@ -893,25 +853,17 @@ static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
893
853
struct ixgbevf_adapter * adapter = q_vector -> adapter ;
894
854
struct ixgbe_hw * hw = & adapter -> hw ;
895
855
struct ixgbevf_ring * rx_ring ;
896
- int r_idx ;
897
- int i ;
898
856
899
- r_idx = find_first_bit (q_vector -> rxr_idx , adapter -> num_rx_queues );
900
- for (i = 0 ; i < q_vector -> rxr_count ; i ++ ) {
901
- rx_ring = & (adapter -> rx_ring [r_idx ]);
857
+ ixgbevf_for_each_ring (rx_ring , q_vector -> rx ) {
902
858
rx_ring -> total_bytes = 0 ;
903
859
rx_ring -> total_packets = 0 ;
904
- r_idx = find_next_bit (q_vector -> rxr_idx , adapter -> num_rx_queues ,
905
- r_idx + 1 );
906
860
}
907
861
908
- if (!q_vector -> rxr_count )
862
+ if (!q_vector -> rx . ring )
909
863
return IRQ_HANDLED ;
910
864
911
- r_idx = find_first_bit (q_vector -> rxr_idx , adapter -> num_rx_queues );
912
- rx_ring = & (adapter -> rx_ring [r_idx ]);
913
865
/* disable interrupts on this vector only */
914
- IXGBE_WRITE_REG (hw , IXGBE_VTEIMC , rx_ring -> v_idx );
866
+ IXGBE_WRITE_REG (hw , IXGBE_VTEIMC , 1 << q_vector -> v_idx );
915
867
napi_schedule (& q_vector -> napi );
916
868
917
869
@@ -931,8 +883,9 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
931
883
{
932
884
struct ixgbevf_q_vector * q_vector = a -> q_vector [v_idx ];
933
885
934
- set_bit (r_idx , q_vector -> rxr_idx );
935
- q_vector -> rxr_count ++ ;
886
+ a -> rx_ring [r_idx ].next = q_vector -> rx .ring ;
887
+ q_vector -> rx .ring = & a -> rx_ring [r_idx ];
888
+ q_vector -> rx .count ++ ;
936
889
a -> rx_ring [r_idx ].v_idx = 1 << v_idx ;
937
890
}
938
891
@@ -941,8 +894,9 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
941
894
{
942
895
struct ixgbevf_q_vector * q_vector = a -> q_vector [v_idx ];
943
896
944
- set_bit (t_idx , q_vector -> txr_idx );
945
- q_vector -> txr_count ++ ;
897
+ a -> tx_ring [t_idx ].next = q_vector -> tx .ring ;
898
+ q_vector -> tx .ring = & a -> tx_ring [t_idx ];
899
+ q_vector -> tx .count ++ ;
946
900
a -> tx_ring [t_idx ].v_idx = 1 << v_idx ;
947
901
}
948
902
@@ -1026,10 +980,10 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1026
980
/* Decrement for Other and TCP Timer vectors */
1027
981
q_vectors = adapter -> num_msix_vectors - NON_Q_VECTORS ;
1028
982
1029
- #define SET_HANDLER (_v ) (((_v)->rxr_count && (_v)->txr_count ) \
1030
- ? &ixgbevf_msix_clean_many : \
1031
- (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
1032
- (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
983
+ #define SET_HANDLER (_v ) (((_v)->rx.ring && (_v)->tx.ring ) \
984
+ ? &ixgbevf_msix_clean_many : \
985
+ (_v)->rx.ring ? &ixgbevf_msix_clean_rx : \
986
+ (_v)->tx.ring ? &ixgbevf_msix_clean_tx : \
1033
987
NULL)
1034
988
for (vector = 0 ; vector < q_vectors ; vector ++ ) {
1035
989
handler = SET_HANDLER (adapter -> q_vector [vector ]);
@@ -1085,10 +1039,10 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1085
1039
1086
1040
for (i = 0 ; i < q_vectors ; i ++ ) {
1087
1041
struct ixgbevf_q_vector * q_vector = adapter -> q_vector [i ];
1088
- bitmap_zero ( q_vector -> rxr_idx , MAX_RX_QUEUES ) ;
1089
- bitmap_zero ( q_vector -> txr_idx , MAX_TX_QUEUES ) ;
1090
- q_vector -> rxr_count = 0 ;
1091
- q_vector -> txr_count = 0 ;
1042
+ q_vector -> rx . ring = NULL ;
1043
+ q_vector -> tx . ring = NULL ;
1044
+ q_vector -> rx . count = 0 ;
1045
+ q_vector -> tx . count = 0 ;
1092
1046
q_vector -> eitr = adapter -> eitr_param ;
1093
1047
}
1094
1048
}
@@ -1365,10 +1319,10 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1365
1319
for (q_idx = 0 ; q_idx < q_vectors ; q_idx ++ ) {
1366
1320
struct napi_struct * napi ;
1367
1321
q_vector = adapter -> q_vector [q_idx ];
1368
- if (!q_vector -> rxr_count )
1322
+ if (!q_vector -> rx . ring )
1369
1323
continue ;
1370
1324
napi = & q_vector -> napi ;
1371
- if (q_vector -> rxr_count > 1 )
1325
+ if (q_vector -> rx . count > 1 )
1372
1326
napi -> poll = & ixgbevf_clean_rxonly_many ;
1373
1327
1374
1328
napi_enable (napi );
@@ -1383,7 +1337,7 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1383
1337
1384
1338
for (q_idx = 0 ; q_idx < q_vectors ; q_idx ++ ) {
1385
1339
q_vector = adapter -> q_vector [q_idx ];
1386
- if (!q_vector -> rxr_count )
1340
+ if (!q_vector -> rx . ring )
1387
1341
continue ;
1388
1342
napi_disable (& q_vector -> napi );
1389
1343
}
@@ -2144,7 +2098,7 @@ static void ixgbevf_watchdog(unsigned long data)
2144
2098
/* get one bit for every active tx/rx interrupt vector */
2145
2099
for (i = 0 ; i < adapter -> num_msix_vectors - NON_Q_VECTORS ; i ++ ) {
2146
2100
struct ixgbevf_q_vector * qv = adapter -> q_vector [i ];
2147
- if (qv -> rxr_count || qv -> txr_count )
2101
+ if (qv -> rx . ring || qv -> tx . ring )
2148
2102
eics |= (1 << i );
2149
2103
}
2150
2104
0 commit comments