@@ -759,8 +759,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
759
759
spin_unlock_bh (& mvm -> queue_info_lock );
760
760
761
761
/* Disable the queue */
762
- iwl_mvm_invalidate_sta_queue (mvm , queue , disable_agg_tids ,
763
- true);
762
+ if (disable_agg_tids )
763
+ iwl_mvm_invalidate_sta_queue (mvm , queue ,
764
+ disable_agg_tids , false);
764
765
iwl_trans_txq_disable (mvm -> trans , queue , false);
765
766
ret = iwl_mvm_send_cmd_pdu (mvm , SCD_QUEUE_CFG , 0 , sizeof (cmd ),
766
767
& cmd );
@@ -776,6 +777,10 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
776
777
777
778
return ret ;
778
779
}
780
+
781
+ /* If TXQ is allocated to another STA, update removal in FW */
782
+ if (cmd .sta_id != mvmsta -> sta_id )
783
+ iwl_mvm_invalidate_sta_queue (mvm , queue , 0 , true);
779
784
}
780
785
781
786
IWL_DEBUG_TX_QUEUES (mvm ,
@@ -1072,6 +1077,61 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1072
1077
return 0 ;
1073
1078
}
1074
1079
1080
+ /*
1081
+ * In DQA mode, after a HW restart the queues should be allocated as before, in
1082
+ * order to avoid race conditions when there are shared queues. This function
1083
+ * does the re-mapping and queue allocation.
1084
+ *
1085
+ * Note that re-enabling aggregations isn't done in this function.
1086
+ */
1087
+ static void iwl_mvm_realloc_queues_after_restart (struct iwl_mvm * mvm ,
1088
+ struct iwl_mvm_sta * mvm_sta )
1089
+ {
1090
+ unsigned int wdg_timeout =
1091
+ iwl_mvm_get_wd_timeout (mvm , mvm_sta -> vif , false, false);
1092
+ int i ;
1093
+ struct iwl_trans_txq_scd_cfg cfg = {
1094
+ .sta_id = mvm_sta -> sta_id ,
1095
+ .frame_limit = IWL_FRAME_LIMIT ,
1096
+ };
1097
+
1098
+ /* Make sure reserved queue is still marked as such (or allocated) */
1099
+ mvm -> queue_info [mvm_sta -> reserved_queue ].status =
1100
+ IWL_MVM_QUEUE_RESERVED ;
1101
+
1102
+ for (i = 0 ; i <= IWL_MAX_TID_COUNT ; i ++ ) {
1103
+ struct iwl_mvm_tid_data * tid_data = & mvm_sta -> tid_data [i ];
1104
+ int txq_id = tid_data -> txq_id ;
1105
+ int ac ;
1106
+ u8 mac_queue ;
1107
+
1108
+ if (txq_id == IEEE80211_INVAL_HW_QUEUE )
1109
+ continue ;
1110
+
1111
+ skb_queue_head_init (& tid_data -> deferred_tx_frames );
1112
+
1113
+ ac = tid_to_mac80211_ac [i ];
1114
+ mac_queue = mvm_sta -> vif -> hw_queue [ac ];
1115
+
1116
+ cfg .tid = i ;
1117
+ cfg .fifo = iwl_mvm_ac_to_tx_fifo [ac ];
1118
+ cfg .aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1119
+ txq_id == IWL_MVM_DQA_BSS_CLIENT_QUEUE );
1120
+
1121
+ IWL_DEBUG_TX_QUEUES (mvm ,
1122
+ "Re-mapping sta %d tid %d to queue %d\n" ,
1123
+ mvm_sta -> sta_id , i , txq_id );
1124
+
1125
+ iwl_mvm_enable_txq (mvm , txq_id , mac_queue ,
1126
+ IEEE80211_SEQ_TO_SN (tid_data -> seq_number ),
1127
+ & cfg , wdg_timeout );
1128
+
1129
+ mvm -> queue_info [txq_id ].status = IWL_MVM_QUEUE_READY ;
1130
+ }
1131
+
1132
+ atomic_set (& mvm -> pending_frames [mvm_sta -> sta_id ], 0 );
1133
+ }
1134
+
1075
1135
int iwl_mvm_add_sta (struct iwl_mvm * mvm ,
1076
1136
struct ieee80211_vif * vif ,
1077
1137
struct ieee80211_sta * sta )
@@ -1094,6 +1154,13 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1094
1154
1095
1155
spin_lock_init (& mvm_sta -> lock );
1096
1156
1157
+ /* In DQA mode, if this is a HW restart, re-alloc existing queues */
1158
+ if (iwl_mvm_is_dqa_supported (mvm ) &&
1159
+ test_bit (IWL_MVM_STATUS_IN_HW_RESTART , & mvm -> status )) {
1160
+ iwl_mvm_realloc_queues_after_restart (mvm , mvm_sta );
1161
+ goto update_fw ;
1162
+ }
1163
+
1097
1164
mvm_sta -> sta_id = sta_id ;
1098
1165
mvm_sta -> mac_id_n_color = FW_CMD_ID_AND_COLOR (mvmvif -> id ,
1099
1166
mvmvif -> color );
@@ -1157,6 +1224,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1157
1224
goto err ;
1158
1225
}
1159
1226
1227
+ update_fw :
1160
1228
ret = iwl_mvm_sta_send_to_fw (mvm , sta , false, 0 );
1161
1229
if (ret )
1162
1230
goto err ;
0 commit comments