Skip to content

Commit 9f9af3d

Browse files
lkaufman-helucacoelho
authored andcommitted
iwlwifi: mvm: re-aggregate shared queue after unsharing
When a shared queue becomes unshared, aggregations should be re-enabled if they've existed before. Make sure that they do this, if required. Signed-off-by: Liad Kaufman <liad.kaufman@intel.com> Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
1 parent d975d72 commit 9f9af3d

File tree

5 files changed

+205
-28
lines changed

5 files changed

+205
-28
lines changed

drivers/net/wireless/intel/iwlwifi/mvm/mvm.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -697,13 +697,18 @@ struct iwl_mvm_baid_data {
697697
* it. In this state, when a new queue is needed to be allocated but no
698698
* such free queue exists, an inactive queue might be freed and given to
699699
* the new RA/TID.
700+
* @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured
701+
* This is the state of a queue that has had traffic pass through it, but
702+
* needs to be reconfigured for some reason, e.g. the queue needs to
703+
* become unshared and aggregations re-enabled on.
700704
*/
701705
enum iwl_mvm_queue_status {
702706
IWL_MVM_QUEUE_FREE,
703707
IWL_MVM_QUEUE_RESERVED,
704708
IWL_MVM_QUEUE_READY,
705709
IWL_MVM_QUEUE_SHARED,
706710
IWL_MVM_QUEUE_INACTIVE,
711+
IWL_MVM_QUEUE_RECONFIGURING,
707712
};
708713

709714
#define IWL_MVM_DQA_QUEUE_TIMEOUT (5 * HZ)
@@ -1122,6 +1127,18 @@ static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
11221127
(mvm->trans->runtime_pm_mode != IWL_PLAT_PM_MODE_D0I3);
11231128
}
11241129

1130+
static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm *mvm, u8 queue)
1131+
{
1132+
return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) &&
1133+
(queue <= IWL_MVM_DQA_MAX_DATA_QUEUE);
1134+
}
1135+
1136+
static inline bool iwl_mvm_is_dqa_mgmt_queue(struct iwl_mvm *mvm, u8 queue)
1137+
{
1138+
return (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) &&
1139+
(queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE);
1140+
}
1141+
11251142
static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
11261143
{
11271144
bool nvm_lar = mvm->nvm_data->lar_enabled;

drivers/net/wireless/intel/iwlwifi/mvm/sta.c

Lines changed: 142 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -468,6 +468,11 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
468468
i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
469469
continue;
470470

471+
/* Don't try and take queues being reconfigured */
472+
if (mvm->queue_info[queue].status ==
473+
IWL_MVM_QUEUE_RECONFIGURING)
474+
continue;
475+
471476
ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
472477
}
473478

@@ -501,27 +506,33 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
501506
queue = ac_to_queue[IEEE80211_AC_VO];
502507

503508
/* Make sure queue found (or not) is legal */
504-
if (!((queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE &&
505-
queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE) ||
506-
(queue >= IWL_MVM_DQA_MIN_DATA_QUEUE &&
507-
queue <= IWL_MVM_DQA_MAX_DATA_QUEUE) ||
508-
(queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE))) {
509+
if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
510+
!iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
511+
(queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
509512
IWL_ERR(mvm, "No DATA queues available to share\n");
510-
queue = -ENOSPC;
513+
return -ENOSPC;
514+
}
515+
516+
/* Make sure the queue isn't in the middle of being reconfigured */
517+
if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
518+
IWL_ERR(mvm,
519+
"TXQ %d is in the middle of re-config - try again\n",
520+
queue);
521+
return -EBUSY;
511522
}
512523

513524
return queue;
514525
}
515526

516527
/*
517-
* If a given queue has a higher AC than the TID stream that is being added to
518-
* it, the queue needs to be redirected to the lower AC. This function does that
528+
* If a given queue has a higher AC than the TID stream that is being compared
529+
* to, the queue needs to be redirected to the lower AC. This function does that
519530
* in such a case, otherwise - if no redirection required - it does nothing,
520531
* unless the %force param is true.
521532
*/
522-
static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
523-
int ac, int ssn, unsigned int wdg_timeout,
524-
bool force)
533+
int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
534+
int ac, int ssn, unsigned int wdg_timeout,
535+
bool force)
525536
{
526537
struct iwl_scd_txq_cfg_cmd cmd = {
527538
.scd_queue = queue,
@@ -555,7 +566,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
555566
shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
556567
spin_unlock_bh(&mvm->queue_info_lock);
557568

558-
IWL_DEBUG_TX_QUEUES(mvm, "Redirecting shared TXQ #%d to FIFO #%d\n",
569+
IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
559570
queue, iwl_mvm_ac_to_tx_fifo[ac]);
560571

561572
/* Stop MAC queues and wait for this queue to empty */
@@ -709,7 +720,7 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
709720
if (WARN_ON(queue <= 0)) {
710721
IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
711722
tid, cfg.sta_id);
712-
return -ENOSPC;
723+
return queue;
713724
}
714725

715726
/*
@@ -827,6 +838,84 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
827838
return ret;
828839
}
829840

841+
static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
842+
{
843+
struct ieee80211_sta *sta;
844+
struct iwl_mvm_sta *mvmsta;
845+
s8 sta_id;
846+
int tid = -1;
847+
unsigned long tid_bitmap;
848+
unsigned int wdg_timeout;
849+
int ssn;
850+
int ret = true;
851+
852+
lockdep_assert_held(&mvm->mutex);
853+
854+
spin_lock_bh(&mvm->queue_info_lock);
855+
sta_id = mvm->queue_info[queue].ra_sta_id;
856+
tid_bitmap = mvm->queue_info[queue].tid_bitmap;
857+
spin_unlock_bh(&mvm->queue_info_lock);
858+
859+
/* Find TID for queue, and make sure it is the only one on the queue */
860+
tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
861+
if (tid_bitmap != BIT(tid)) {
862+
IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
863+
queue, tid_bitmap);
864+
return;
865+
}
866+
867+
IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
868+
tid);
869+
870+
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
871+
lockdep_is_held(&mvm->mutex));
872+
873+
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
874+
return;
875+
876+
mvmsta = iwl_mvm_sta_from_mac80211(sta);
877+
wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
878+
879+
ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
880+
881+
ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
882+
tid_to_mac80211_ac[tid], ssn,
883+
wdg_timeout, true);
884+
if (ret) {
885+
IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
886+
return;
887+
}
888+
889+
/* If aggs should be turned back on - do it */
890+
if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
891+
struct iwl_mvm_add_sta_cmd cmd;
892+
893+
mvmsta->tid_disable_agg &= ~BIT(tid);
894+
895+
cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
896+
cmd.sta_id = mvmsta->sta_id;
897+
cmd.add_modify = STA_MODE_MODIFY;
898+
cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
899+
cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
900+
cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
901+
902+
ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
903+
iwl_mvm_add_sta_cmd_size(mvm), &cmd);
904+
if (!ret) {
905+
IWL_DEBUG_TX_QUEUES(mvm,
906+
"TXQ #%d is now aggregated again\n",
907+
queue);
908+
909+
/* Mark queue intenally as aggregating again */
910+
iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
911+
}
912+
}
913+
914+
spin_lock_bh(&mvm->queue_info_lock);
915+
mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
916+
spin_unlock_bh(&mvm->queue_info_lock);
917+
}
918+
830919
static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
831920
{
832921
if (tid == IWL_MAX_TID_COUNT)
@@ -894,13 +983,26 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
894983
struct ieee80211_sta *sta;
895984
struct iwl_mvm_sta *mvmsta;
896985
unsigned long deferred_tid_traffic;
897-
int sta_id, tid;
986+
int queue, sta_id, tid;
898987

899988
/* Check inactivity of queues */
900989
iwl_mvm_inactivity_check(mvm);
901990

902991
mutex_lock(&mvm->mutex);
903992

993+
/* Reconfigure queues requiring reconfiguation */
994+
for (queue = 0; queue < IWL_MAX_HW_QUEUES; queue++) {
995+
bool reconfig;
996+
997+
spin_lock_bh(&mvm->queue_info_lock);
998+
reconfig = (mvm->queue_info[queue].status ==
999+
IWL_MVM_QUEUE_RECONFIGURING);
1000+
spin_unlock_bh(&mvm->queue_info_lock);
1001+
1002+
if (reconfig)
1003+
iwl_mvm_unshare_queue(mvm, queue);
1004+
}
1005+
9041006
/* Go over all stations with deferred traffic */
9051007
for_each_set_bit(sta_id, mvm->sta_deferred_frames,
9061008
IWL_MVM_STATION_COUNT) {
@@ -1956,7 +2058,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
19562058
return -EIO;
19572059
}
19582060

1959-
spin_lock_bh(&mvm->queue_info_lock);
2061+
spin_lock(&mvm->queue_info_lock);
19602062

19612063
/*
19622064
* Note the possible cases:
@@ -1967,22 +2069,29 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
19672069
* non-DQA mode, since the TXQ hasn't yet been allocated
19682070
*/
19692071
txq_id = mvmsta->tid_data[tid].txq_id;
1970-
if (!iwl_mvm_is_dqa_supported(mvm) ||
2072+
if (iwl_mvm_is_dqa_supported(mvm) &&
2073+
unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED)) {
2074+
ret = -ENXIO;
2075+
IWL_DEBUG_TX_QUEUES(mvm,
2076+
"Can't start tid %d agg on shared queue!\n",
2077+
tid);
2078+
goto release_locks;
2079+
} else if (!iwl_mvm_is_dqa_supported(mvm) ||
19712080
mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
19722081
txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
19732082
mvm->first_agg_queue,
19742083
mvm->last_agg_queue);
19752084
if (txq_id < 0) {
19762085
ret = txq_id;
1977-
spin_unlock_bh(&mvm->queue_info_lock);
19782086
IWL_ERR(mvm, "Failed to allocate agg queue\n");
19792087
goto release_locks;
19802088
}
19812089

19822090
/* TXQ hasn't yet been enabled, so mark it only as reserved */
19832091
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
19842092
}
1985-
spin_unlock_bh(&mvm->queue_info_lock);
2093+
2094+
spin_unlock(&mvm->queue_info_lock);
19862095

19872096
IWL_DEBUG_TX_QUEUES(mvm,
19882097
"AGG for tid %d will be on queue #%d\n",
@@ -2006,8 +2115,11 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
20062115
}
20072116

20082117
ret = 0;
2118+
goto out;
20092119

20102120
release_locks:
2121+
spin_unlock(&mvm->queue_info_lock);
2122+
out:
20112123
spin_unlock_bh(&mvmsta->lock);
20122124

20132125
return ret;
@@ -2023,6 +2135,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
20232135
iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
20242136
int queue, ret;
20252137
bool alloc_queue = true;
2138+
enum iwl_mvm_queue_status queue_status;
20262139
u16 ssn;
20272140

20282141
struct iwl_trans_txq_scd_cfg cfg = {
@@ -2048,13 +2161,15 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
20482161

20492162
cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
20502163

2164+
spin_lock_bh(&mvm->queue_info_lock);
2165+
queue_status = mvm->queue_info[queue].status;
2166+
spin_unlock_bh(&mvm->queue_info_lock);
2167+
20512168
/* In DQA mode, the existing queue might need to be reconfigured */
20522169
if (iwl_mvm_is_dqa_supported(mvm)) {
2053-
spin_lock_bh(&mvm->queue_info_lock);
20542170
/* Maybe there is no need to even alloc a queue... */
20552171
if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
20562172
alloc_queue = false;
2057-
spin_unlock_bh(&mvm->queue_info_lock);
20582173

20592174
/*
20602175
* Only reconfig the SCD for the queue if the window size has
@@ -2089,9 +2204,12 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
20892204
vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
20902205
&cfg, wdg_timeout);
20912206

2092-
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2093-
if (ret)
2094-
return -EIO;
2207+
/* Send ADD_STA command to enable aggs only if the queue isn't shared */
2208+
if (queue_status != IWL_MVM_QUEUE_SHARED) {
2209+
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
2210+
if (ret)
2211+
return -EIO;
2212+
}
20952213

20962214
/* No need to mark as reserved */
20972215
spin_lock_bh(&mvm->queue_info_lock);
@@ -2123,7 +2241,6 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
21232241
u16 txq_id;
21242242
int err;
21252243

2126-
21272244
/*
21282245
* If mac80211 is cleaning its state, then say that we finished since
21292246
* our state has been cleared anyway.
@@ -2152,6 +2269,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
21522269
*/
21532270
if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
21542271
mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
2272+
21552273
spin_unlock_bh(&mvm->queue_info_lock);
21562274

21572275
switch (tid_data->state) {

drivers/net/wireless/intel/iwlwifi/mvm/sta.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -554,4 +554,8 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
554554
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
555555
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
556556

557+
int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
558+
int ac, int ssn, unsigned int wdg_timeout,
559+
bool force);
560+
557561
#endif /* __sta_h__ */

0 commit comments

Comments
 (0)