@@ -468,6 +468,11 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
468
468
i != IWL_MVM_DQA_BSS_CLIENT_QUEUE )
469
469
continue ;
470
470
471
+ /* Don't try and take queues being reconfigured */
472
+ if (mvm -> queue_info [queue ].status ==
473
+ IWL_MVM_QUEUE_RECONFIGURING )
474
+ continue ;
475
+
471
476
ac_to_queue [mvm -> queue_info [i ].mac80211_ac ] = i ;
472
477
}
473
478
@@ -501,27 +506,33 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
501
506
queue = ac_to_queue [IEEE80211_AC_VO ];
502
507
503
508
/* Make sure queue found (or not) is legal */
504
- if (!((queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE &&
505
- queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE ) ||
506
- (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE &&
507
- queue <= IWL_MVM_DQA_MAX_DATA_QUEUE ) ||
508
- (queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE ))) {
509
+ if (!iwl_mvm_is_dqa_data_queue (mvm , queue ) &&
510
+ !iwl_mvm_is_dqa_mgmt_queue (mvm , queue ) &&
511
+ (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE )) {
509
512
IWL_ERR (mvm , "No DATA queues available to share\n" );
510
- queue = - ENOSPC ;
513
+ return - ENOSPC ;
514
+ }
515
+
516
+ /* Make sure the queue isn't in the middle of being reconfigured */
517
+ if (mvm -> queue_info [queue ].status == IWL_MVM_QUEUE_RECONFIGURING ) {
518
+ IWL_ERR (mvm ,
519
+ "TXQ %d is in the middle of re-config - try again\n" ,
520
+ queue );
521
+ return - EBUSY ;
511
522
}
512
523
513
524
return queue ;
514
525
}
515
526
516
527
/*
517
- * If a given queue has a higher AC than the TID stream that is being added to
518
- * it , the queue needs to be redirected to the lower AC. This function does that
528
+ * If a given queue has a higher AC than the TID stream that is being compared
529
+ * to , the queue needs to be redirected to the lower AC. This function does that
519
530
* in such a case, otherwise - if no redirection required - it does nothing,
520
531
* unless the %force param is true.
521
532
*/
522
- static int iwl_mvm_scd_queue_redirect (struct iwl_mvm * mvm , int queue , int tid ,
523
- int ac , int ssn , unsigned int wdg_timeout ,
524
- bool force )
533
+ int iwl_mvm_scd_queue_redirect (struct iwl_mvm * mvm , int queue , int tid ,
534
+ int ac , int ssn , unsigned int wdg_timeout ,
535
+ bool force )
525
536
{
526
537
struct iwl_scd_txq_cfg_cmd cmd = {
527
538
.scd_queue = queue ,
@@ -555,7 +566,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
555
566
shared_queue = (mvm -> queue_info [queue ].hw_queue_refcount > 1 );
556
567
spin_unlock_bh (& mvm -> queue_info_lock );
557
568
558
- IWL_DEBUG_TX_QUEUES (mvm , "Redirecting shared TXQ #%d to FIFO #%d\n" ,
569
+ IWL_DEBUG_TX_QUEUES (mvm , "Redirecting TXQ #%d to FIFO #%d\n" ,
559
570
queue , iwl_mvm_ac_to_tx_fifo [ac ]);
560
571
561
572
/* Stop MAC queues and wait for this queue to empty */
@@ -709,7 +720,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
709
720
if (WARN_ON (queue <= 0 )) {
710
721
IWL_ERR (mvm , "No available queues for tid %d on sta_id %d\n" ,
711
722
tid , cfg .sta_id );
712
- return - ENOSPC ;
723
+ return queue ;
713
724
}
714
725
715
726
/*
@@ -827,6 +838,84 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
827
838
return ret ;
828
839
}
829
840
841
+ static void iwl_mvm_unshare_queue (struct iwl_mvm * mvm , int queue )
842
+ {
843
+ struct ieee80211_sta * sta ;
844
+ struct iwl_mvm_sta * mvmsta ;
845
+ s8 sta_id ;
846
+ int tid = -1 ;
847
+ unsigned long tid_bitmap ;
848
+ unsigned int wdg_timeout ;
849
+ int ssn ;
850
+ int ret = true;
851
+
852
+ lockdep_assert_held (& mvm -> mutex );
853
+
854
+ spin_lock_bh (& mvm -> queue_info_lock );
855
+ sta_id = mvm -> queue_info [queue ].ra_sta_id ;
856
+ tid_bitmap = mvm -> queue_info [queue ].tid_bitmap ;
857
+ spin_unlock_bh (& mvm -> queue_info_lock );
858
+
859
+ /* Find TID for queue, and make sure it is the only one on the queue */
860
+ tid = find_first_bit (& tid_bitmap , IWL_MAX_TID_COUNT + 1 );
861
+ if (tid_bitmap != BIT (tid )) {
862
+ IWL_ERR (mvm , "Failed to unshare q %d, active tids=0x%lx\n" ,
863
+ queue , tid_bitmap );
864
+ return ;
865
+ }
866
+
867
+ IWL_DEBUG_TX_QUEUES (mvm , "Unsharing TXQ %d, keeping tid %d\n" , queue ,
868
+ tid );
869
+
870
+ sta = rcu_dereference_protected (mvm -> fw_id_to_mac_id [sta_id ],
871
+ lockdep_is_held (& mvm -> mutex ));
872
+
873
+ if (WARN_ON_ONCE (IS_ERR_OR_NULL (sta )))
874
+ return ;
875
+
876
+ mvmsta = iwl_mvm_sta_from_mac80211 (sta );
877
+ wdg_timeout = iwl_mvm_get_wd_timeout (mvm , mvmsta -> vif , false, false);
878
+
879
+ ssn = IEEE80211_SEQ_TO_SN (mvmsta -> tid_data [tid ].seq_number );
880
+
881
+ ret = iwl_mvm_scd_queue_redirect (mvm , queue , tid ,
882
+ tid_to_mac80211_ac [tid ], ssn ,
883
+ wdg_timeout , true);
884
+ if (ret ) {
885
+ IWL_ERR (mvm , "Failed to redirect TXQ %d\n" , queue );
886
+ return ;
887
+ }
888
+
889
+ /* If aggs should be turned back on - do it */
890
+ if (mvmsta -> tid_data [tid ].state == IWL_AGG_ON ) {
891
+ struct iwl_mvm_add_sta_cmd cmd ;
892
+
893
+ mvmsta -> tid_disable_agg &= ~BIT (tid );
894
+
895
+ cmd .mac_id_n_color = cpu_to_le32 (mvmsta -> mac_id_n_color );
896
+ cmd .sta_id = mvmsta -> sta_id ;
897
+ cmd .add_modify = STA_MODE_MODIFY ;
898
+ cmd .modify_mask = STA_MODIFY_TID_DISABLE_TX ;
899
+ cmd .tfd_queue_msk = cpu_to_le32 (mvmsta -> tfd_queue_msk );
900
+ cmd .tid_disable_tx = cpu_to_le16 (mvmsta -> tid_disable_agg );
901
+
902
+ ret = iwl_mvm_send_cmd_pdu (mvm , ADD_STA , CMD_ASYNC ,
903
+ iwl_mvm_add_sta_cmd_size (mvm ), & cmd );
904
+ if (!ret ) {
905
+ IWL_DEBUG_TX_QUEUES (mvm ,
906
+ "TXQ #%d is now aggregated again\n" ,
907
+ queue );
908
+
909
+ /* Mark queue intenally as aggregating again */
910
+ iwl_trans_txq_set_shared_mode (mvm -> trans , queue , false);
911
+ }
912
+ }
913
+
914
+ spin_lock_bh (& mvm -> queue_info_lock );
915
+ mvm -> queue_info [queue ].status = IWL_MVM_QUEUE_READY ;
916
+ spin_unlock_bh (& mvm -> queue_info_lock );
917
+ }
918
+
830
919
static inline u8 iwl_mvm_tid_to_ac_queue (int tid )
831
920
{
832
921
if (tid == IWL_MAX_TID_COUNT )
@@ -894,13 +983,26 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
894
983
struct ieee80211_sta * sta ;
895
984
struct iwl_mvm_sta * mvmsta ;
896
985
unsigned long deferred_tid_traffic ;
897
- int sta_id , tid ;
986
+ int queue , sta_id , tid ;
898
987
899
988
/* Check inactivity of queues */
900
989
iwl_mvm_inactivity_check (mvm );
901
990
902
991
mutex_lock (& mvm -> mutex );
903
992
993
+ /* Reconfigure queues requiring reconfiguation */
994
+ for (queue = 0 ; queue < IWL_MAX_HW_QUEUES ; queue ++ ) {
995
+ bool reconfig ;
996
+
997
+ spin_lock_bh (& mvm -> queue_info_lock );
998
+ reconfig = (mvm -> queue_info [queue ].status ==
999
+ IWL_MVM_QUEUE_RECONFIGURING );
1000
+ spin_unlock_bh (& mvm -> queue_info_lock );
1001
+
1002
+ if (reconfig )
1003
+ iwl_mvm_unshare_queue (mvm , queue );
1004
+ }
1005
+
904
1006
/* Go over all stations with deferred traffic */
905
1007
for_each_set_bit (sta_id , mvm -> sta_deferred_frames ,
906
1008
IWL_MVM_STATION_COUNT ) {
@@ -1956,7 +2058,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1956
2058
return - EIO ;
1957
2059
}
1958
2060
1959
- spin_lock_bh (& mvm -> queue_info_lock );
2061
+ spin_lock (& mvm -> queue_info_lock );
1960
2062
1961
2063
/*
1962
2064
* Note the possible cases:
@@ -1967,22 +2069,29 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1967
2069
* non-DQA mode, since the TXQ hasn't yet been allocated
1968
2070
*/
1969
2071
txq_id = mvmsta -> tid_data [tid ].txq_id ;
1970
- if (!iwl_mvm_is_dqa_supported (mvm ) ||
2072
+ if (iwl_mvm_is_dqa_supported (mvm ) &&
2073
+ unlikely (mvm -> queue_info [txq_id ].status == IWL_MVM_QUEUE_SHARED )) {
2074
+ ret = - ENXIO ;
2075
+ IWL_DEBUG_TX_QUEUES (mvm ,
2076
+ "Can't start tid %d agg on shared queue!\n" ,
2077
+ tid );
2078
+ goto release_locks ;
2079
+ } else if (!iwl_mvm_is_dqa_supported (mvm ) ||
1971
2080
mvm -> queue_info [txq_id ].status != IWL_MVM_QUEUE_READY ) {
1972
2081
txq_id = iwl_mvm_find_free_queue (mvm , mvmsta -> sta_id ,
1973
2082
mvm -> first_agg_queue ,
1974
2083
mvm -> last_agg_queue );
1975
2084
if (txq_id < 0 ) {
1976
2085
ret = txq_id ;
1977
- spin_unlock_bh (& mvm -> queue_info_lock );
1978
2086
IWL_ERR (mvm , "Failed to allocate agg queue\n" );
1979
2087
goto release_locks ;
1980
2088
}
1981
2089
1982
2090
/* TXQ hasn't yet been enabled, so mark it only as reserved */
1983
2091
mvm -> queue_info [txq_id ].status = IWL_MVM_QUEUE_RESERVED ;
1984
2092
}
1985
- spin_unlock_bh (& mvm -> queue_info_lock );
2093
+
2094
+ spin_unlock (& mvm -> queue_info_lock );
1986
2095
1987
2096
IWL_DEBUG_TX_QUEUES (mvm ,
1988
2097
"AGG for tid %d will be on queue #%d\n" ,
@@ -2006,8 +2115,11 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2006
2115
}
2007
2116
2008
2117
ret = 0 ;
2118
+ goto out ;
2009
2119
2010
2120
release_locks :
2121
+ spin_unlock (& mvm -> queue_info_lock );
2122
+ out :
2011
2123
spin_unlock_bh (& mvmsta -> lock );
2012
2124
2013
2125
return ret ;
@@ -2023,6 +2135,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2023
2135
iwl_mvm_get_wd_timeout (mvm , vif , sta -> tdls , false);
2024
2136
int queue , ret ;
2025
2137
bool alloc_queue = true;
2138
+ enum iwl_mvm_queue_status queue_status ;
2026
2139
u16 ssn ;
2027
2140
2028
2141
struct iwl_trans_txq_scd_cfg cfg = {
@@ -2048,13 +2161,15 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2048
2161
2049
2162
cfg .fifo = iwl_mvm_ac_to_tx_fifo [tid_to_mac80211_ac [tid ]];
2050
2163
2164
+ spin_lock_bh (& mvm -> queue_info_lock );
2165
+ queue_status = mvm -> queue_info [queue ].status ;
2166
+ spin_unlock_bh (& mvm -> queue_info_lock );
2167
+
2051
2168
/* In DQA mode, the existing queue might need to be reconfigured */
2052
2169
if (iwl_mvm_is_dqa_supported (mvm )) {
2053
- spin_lock_bh (& mvm -> queue_info_lock );
2054
2170
/* Maybe there is no need to even alloc a queue... */
2055
2171
if (mvm -> queue_info [queue ].status == IWL_MVM_QUEUE_READY )
2056
2172
alloc_queue = false;
2057
- spin_unlock_bh (& mvm -> queue_info_lock );
2058
2173
2059
2174
/*
2060
2175
* Only reconfig the SCD for the queue if the window size has
@@ -2089,9 +2204,12 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2089
2204
vif -> hw_queue [tid_to_mac80211_ac [tid ]], ssn ,
2090
2205
& cfg , wdg_timeout );
2091
2206
2092
- ret = iwl_mvm_sta_tx_agg (mvm , sta , tid , queue , true);
2093
- if (ret )
2094
- return - EIO ;
2207
+ /* Send ADD_STA command to enable aggs only if the queue isn't shared */
2208
+ if (queue_status != IWL_MVM_QUEUE_SHARED ) {
2209
+ ret = iwl_mvm_sta_tx_agg (mvm , sta , tid , queue , true);
2210
+ if (ret )
2211
+ return - EIO ;
2212
+ }
2095
2213
2096
2214
/* No need to mark as reserved */
2097
2215
spin_lock_bh (& mvm -> queue_info_lock );
@@ -2123,7 +2241,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2123
2241
u16 txq_id ;
2124
2242
int err ;
2125
2243
2126
-
2127
2244
/*
2128
2245
* If mac80211 is cleaning its state, then say that we finished since
2129
2246
* our state has been cleared anyway.
@@ -2152,6 +2269,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2152
2269
*/
2153
2270
if (mvm -> queue_info [txq_id ].status == IWL_MVM_QUEUE_RESERVED )
2154
2271
mvm -> queue_info [txq_id ].status = IWL_MVM_QUEUE_FREE ;
2272
+
2155
2273
spin_unlock_bh (& mvm -> queue_info_lock );
2156
2274
2157
2275
switch (tid_data -> state ) {
0 commit comments