@@ -685,6 +685,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
685
685
/* EBLOCK responsible to free the allocated p_ent */
686
686
if (p_ent -> comp_mode != QED_SPQ_MODE_EBLOCK )
687
687
kfree (p_ent );
688
+ else
689
+ p_ent -> post_ent = p_en2 ;
688
690
689
691
p_ent = p_en2 ;
690
692
}
@@ -767,6 +769,25 @@ static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
767
769
SPQ_HIGH_PRI_RESERVE_DEFAULT );
768
770
}
769
771
772
+ /* Avoid overriding of SPQ entries when getting out-of-order completions, by
773
+ * marking the completions in a bitmap and increasing the chain consumer only
774
+ * for the first successive completed entries.
775
+ */
776
+ static void qed_spq_comp_bmap_update (struct qed_hwfn * p_hwfn , __le16 echo )
777
+ {
778
+ u16 pos = le16_to_cpu (echo ) % SPQ_RING_SIZE ;
779
+ struct qed_spq * p_spq = p_hwfn -> p_spq ;
780
+
781
+ __set_bit (pos , p_spq -> p_comp_bitmap );
782
+ while (test_bit (p_spq -> comp_bitmap_idx ,
783
+ p_spq -> p_comp_bitmap )) {
784
+ __clear_bit (p_spq -> comp_bitmap_idx ,
785
+ p_spq -> p_comp_bitmap );
786
+ p_spq -> comp_bitmap_idx ++ ;
787
+ qed_chain_return_produced (& p_spq -> chain );
788
+ }
789
+ }
790
+
770
791
int qed_spq_post (struct qed_hwfn * p_hwfn ,
771
792
struct qed_spq_entry * p_ent , u8 * fw_return_code )
772
793
{
@@ -824,11 +845,12 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
824
845
p_ent -> queue == & p_spq -> unlimited_pending );
825
846
826
847
if (p_ent -> queue == & p_spq -> unlimited_pending ) {
827
- /* This is an allocated p_ent which does not need to
828
- * return to pool.
829
- */
848
+ struct qed_spq_entry * p_post_ent = p_ent -> post_ent ;
849
+
830
850
kfree (p_ent );
831
- return rc ;
851
+
852
+ /* Return the entry which was actually posted */
853
+ p_ent = p_post_ent ;
832
854
}
833
855
834
856
if (rc )
@@ -842,7 +864,7 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
842
864
spq_post_fail2 :
843
865
spin_lock_bh (& p_spq -> lock );
844
866
list_del (& p_ent -> list );
845
- qed_chain_return_produced ( & p_spq -> chain );
867
+ qed_spq_comp_bmap_update ( p_hwfn , p_ent -> elem . hdr . echo );
846
868
847
869
spq_post_fail :
848
870
/* return to the free pool */
@@ -874,25 +896,8 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
874
896
spin_lock_bh (& p_spq -> lock );
875
897
list_for_each_entry_safe (p_ent , tmp , & p_spq -> completion_pending , list ) {
876
898
if (p_ent -> elem .hdr .echo == echo ) {
877
- u16 pos = le16_to_cpu (echo ) % SPQ_RING_SIZE ;
878
-
879
899
list_del (& p_ent -> list );
880
-
881
- /* Avoid overriding of SPQ entries when getting
882
- * out-of-order completions, by marking the completions
883
- * in a bitmap and increasing the chain consumer only
884
- * for the first successive completed entries.
885
- */
886
- __set_bit (pos , p_spq -> p_comp_bitmap );
887
-
888
- while (test_bit (p_spq -> comp_bitmap_idx ,
889
- p_spq -> p_comp_bitmap )) {
890
- __clear_bit (p_spq -> comp_bitmap_idx ,
891
- p_spq -> p_comp_bitmap );
892
- p_spq -> comp_bitmap_idx ++ ;
893
- qed_chain_return_produced (& p_spq -> chain );
894
- }
895
-
900
+ qed_spq_comp_bmap_update (p_hwfn , echo );
896
901
p_spq -> comp_count ++ ;
897
902
found = p_ent ;
898
903
break ;
@@ -931,11 +936,9 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
931
936
QED_MSG_SPQ ,
932
937
"Got a completion without a callback function\n" );
933
938
934
- if ((found -> comp_mode != QED_SPQ_MODE_EBLOCK ) ||
935
- (found -> queue == & p_spq -> unlimited_pending ))
939
+ if (found -> comp_mode != QED_SPQ_MODE_EBLOCK )
936
940
/* EBLOCK is responsible for returning its own entry into the
937
- * free list, unless it originally added the entry into the
938
- * unlimited pending list.
941
+ * free list.
939
942
*/
940
943
qed_spq_return_entry (p_hwfn , found );
941
944
0 commit comments