@@ -223,6 +223,39 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
223223 return ret ;
224224}
225225
226+ static void iwl_mvm_rx_agg_session_expired (unsigned long data )
227+ {
228+ struct iwl_mvm_baid_data __rcu * * rcu_ptr = (void * )data ;
229+ struct iwl_mvm_baid_data * ba_data ;
230+ struct ieee80211_sta * sta ;
231+ struct iwl_mvm_sta * mvm_sta ;
232+ unsigned long timeout ;
233+
234+ rcu_read_lock ();
235+
236+ ba_data = rcu_dereference (* rcu_ptr );
237+
238+ if (WARN_ON (!ba_data ))
239+ goto unlock ;
240+
241+ if (!ba_data -> timeout )
242+ goto unlock ;
243+
244+ timeout = ba_data -> last_rx + TU_TO_JIFFIES (ba_data -> timeout * 2 );
245+ if (time_is_after_jiffies (timeout )) {
246+ mod_timer (& ba_data -> session_timer , timeout );
247+ goto unlock ;
248+ }
249+
250+ /* Timer expired */
251+ sta = rcu_dereference (ba_data -> mvm -> fw_id_to_mac_id [ba_data -> sta_id ]);
252+ mvm_sta = iwl_mvm_sta_from_mac80211 (sta );
253+ ieee80211_stop_rx_ba_session_offl (mvm_sta -> vif ,
254+ sta -> addr , ba_data -> tid );
255+ unlock :
256+ rcu_read_unlock ();
257+ }
258+
226259static int iwl_mvm_tdls_sta_init (struct iwl_mvm * mvm ,
227260 struct ieee80211_sta * sta )
228261{
@@ -1134,11 +1167,22 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
11341167
11351168#define IWL_MAX_RX_BA_SESSIONS 16
11361169
1170+ static void iwl_mvm_sync_rxq_del_ba (struct iwl_mvm * mvm )
1171+ {
1172+ struct iwl_mvm_internal_rxq_notif data = {
1173+ .type = IWL_MVM_RXQ_EMPTY ,
1174+ .sync = 1 ,
1175+ };
1176+
1177+ iwl_mvm_sync_rx_queues_internal (mvm , & data , sizeof (data ));
1178+ }
1179+
11371180int iwl_mvm_sta_rx_agg (struct iwl_mvm * mvm , struct ieee80211_sta * sta ,
1138- int tid , u16 ssn , bool start , u8 buf_size )
1181+ int tid , u16 ssn , bool start , u8 buf_size , u16 timeout )
11391182{
11401183 struct iwl_mvm_sta * mvm_sta = iwl_mvm_sta_from_mac80211 (sta );
11411184 struct iwl_mvm_add_sta_cmd cmd = {};
1185+ struct iwl_mvm_baid_data * baid_data = NULL ;
11421186 int ret ;
11431187 u32 status ;
11441188
@@ -1149,6 +1193,16 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
11491193 return - ENOSPC ;
11501194 }
11511195
1196+ if (iwl_mvm_has_new_rx_api (mvm ) && start ) {
1197+ /*
1198+ * Allocate here so if allocation fails we can bail out early
1199+ * before starting the BA session in the firmware
1200+ */
1201+ baid_data = kzalloc (sizeof (* baid_data ), GFP_KERNEL );
1202+ if (!baid_data )
1203+ return - ENOMEM ;
1204+ }
1205+
11521206 cmd .mac_id_n_color = cpu_to_le32 (mvm_sta -> mac_id_n_color );
11531207 cmd .sta_id = mvm_sta -> sta_id ;
11541208 cmd .add_modify = STA_MODE_MODIFY ;
@@ -1167,7 +1221,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
11671221 iwl_mvm_add_sta_cmd_size (mvm ),
11681222 & cmd , & status );
11691223 if (ret )
1170- return ret ;
1224+ goto out_free ;
11711225
11721226 switch (status & IWL_ADD_STA_STATUS_MASK ) {
11731227 case ADD_STA_SUCCESS :
@@ -1185,14 +1239,74 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
11851239 break ;
11861240 }
11871241
1188- if (!ret ) {
1189- if (start )
1190- mvm -> rx_ba_sessions ++ ;
1191- else if (mvm -> rx_ba_sessions > 0 )
1192- /* check that restart flow didn't zero the counter */
1193- mvm -> rx_ba_sessions -- ;
1242+ if (ret )
1243+ goto out_free ;
1244+
1245+ if (start ) {
1246+ u8 baid ;
1247+
1248+ mvm -> rx_ba_sessions ++ ;
1249+
1250+ if (!iwl_mvm_has_new_rx_api (mvm ))
1251+ return 0 ;
1252+
1253+ if (WARN_ON (!(status & IWL_ADD_STA_BAID_VALID_MASK ))) {
1254+ ret = - EINVAL ;
1255+ goto out_free ;
1256+ }
1257+ baid = (u8 )((status & IWL_ADD_STA_BAID_MASK ) >>
1258+ IWL_ADD_STA_BAID_SHIFT );
1259+ baid_data -> baid = baid ;
1260+ baid_data -> timeout = timeout ;
1261+ baid_data -> last_rx = jiffies ;
1262+ init_timer (& baid_data -> session_timer );
1263+ baid_data -> session_timer .function =
1264+ iwl_mvm_rx_agg_session_expired ;
1265+ baid_data -> session_timer .data =
1266+ (unsigned long )& mvm -> baid_map [baid ];
1267+ baid_data -> mvm = mvm ;
1268+ baid_data -> tid = tid ;
1269+ baid_data -> sta_id = mvm_sta -> sta_id ;
1270+
1271+ mvm_sta -> tid_to_baid [tid ] = baid ;
1272+ if (timeout )
1273+ mod_timer (& baid_data -> session_timer ,
1274+ TU_TO_EXP_TIME (timeout * 2 ));
1275+
1276+ /*
1277+ * protect the BA data with RCU to cover a case where our
1278+ * internal RX sync mechanism will timeout (not that it's
1279+ * supposed to happen) and we will free the session data while
1280+ * RX is being processed in parallel
1281+ */
1282+ WARN_ON (rcu_access_pointer (mvm -> baid_map [baid ]));
1283+ rcu_assign_pointer (mvm -> baid_map [baid ], baid_data );
1284+ } else if (mvm -> rx_ba_sessions > 0 ) {
1285+ u8 baid = mvm_sta -> tid_to_baid [tid ];
1286+
1287+ /* check that restart flow didn't zero the counter */
1288+ mvm -> rx_ba_sessions -- ;
1289+ if (!iwl_mvm_has_new_rx_api (mvm ))
1290+ return 0 ;
1291+
1292+ if (WARN_ON (baid == IWL_RX_REORDER_DATA_INVALID_BAID ))
1293+ return - EINVAL ;
1294+
1295+ baid_data = rcu_access_pointer (mvm -> baid_map [baid ]);
1296+ if (WARN_ON (!baid_data ))
1297+ return - EINVAL ;
1298+
1299+ /* synchronize all rx queues so we can safely delete */
1300+ iwl_mvm_sync_rxq_del_ba (mvm );
1301+ del_timer_sync (& baid_data -> session_timer );
1302+
1303+ RCU_INIT_POINTER (mvm -> baid_map [baid ], NULL );
1304+ kfree_rcu (baid_data , rcu_head );
11941305 }
1306+ return 0 ;
11951307
1308+ out_free :
1309+ kfree (baid_data );
11961310 return ret ;
11971311}
11981312
0 commit comments