Commit fdccafad authored by Miri Korenblit's avatar Miri Korenblit Committed by Johannes Berg

wifi: iwlwifi: add support for a wiphy_work rx handler

The wiphy_work infra ensures that the entire worker will run
with the wiphy mutex. It is useful to have RX handlers
running as a wiphy_work, when we don't want the handler to
run in parallel with mac80211 work (to avoid races).

For example - BT notification can disable eSR starting from the next
patch.
In ieee80211_set_active_links we first check that eSR is
allowed, (drv_can_activate_links) and then activate it.
If the BT notif was received after drv_can_activate_links
(which returned true), and before the activation - eSR will be
activated when it shouldn't.
If BT notif is handled with the wiphy mutex, it can't run in
parallel to ieee80211_set_active_links, which also holds that
mutex.

Add the necessary infrastructure here, for use in the next commit.
Signed-off-by: default avatarMiri Korenblit <miriam.rachel.korenblit@intel.com>
Reviewed-by: default avatarGregory Greenman <gregory.greenman@intel.com>
Reviewed-by: default avatarJohannes Berg <johannes.berg@intel.com>
Link: https://msgid.link/20240123200528.ce83d16cdec8.I35ef53fa23f58b9ec17924099238b61deafcecd7@changeidSigned-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
parent c4d32f27
...@@ -1342,6 +1342,7 @@ void iwl_mvm_mac_stop(struct ieee80211_hw *hw) ...@@ -1342,6 +1342,7 @@ void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
* discover that its list is now empty. * discover that its list is now empty.
*/ */
cancel_work_sync(&mvm->async_handlers_wk); cancel_work_sync(&mvm->async_handlers_wk);
wiphy_work_cancel(hw->wiphy, &mvm->async_handlers_wiphy_wk);
} }
struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
......
...@@ -848,6 +848,9 @@ struct iwl_mvm { ...@@ -848,6 +848,9 @@ struct iwl_mvm {
spinlock_t async_handlers_lock; spinlock_t async_handlers_lock;
struct work_struct async_handlers_wk; struct work_struct async_handlers_wk;
/* For async rx handlers that require the wiphy lock */
struct wiphy_work async_handlers_wiphy_wk;
struct work_struct roc_done_wk; struct work_struct roc_done_wk;
unsigned long init_status; unsigned long init_status;
......
...@@ -267,11 +267,15 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm, ...@@ -267,11 +267,15 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
* it will be called from a worker with mvm->mutex held. * it will be called from a worker with mvm->mutex held.
* @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
* mutex itself, it will be called from a worker without mvm->mutex held. * mutex itself, it will be called from a worker without mvm->mutex held.
* @RX_HANDLER_ASYNC_LOCKED_WIPHY: If the handler needs to hold the wiphy lock
* and mvm->mutex. Will be handled with the wiphy_work queue infra
* instead of regular work queue.
*/ */
enum iwl_rx_handler_context { enum iwl_rx_handler_context {
RX_HANDLER_SYNC, RX_HANDLER_SYNC,
RX_HANDLER_ASYNC_LOCKED, RX_HANDLER_ASYNC_LOCKED,
RX_HANDLER_ASYNC_UNLOCKED, RX_HANDLER_ASYNC_UNLOCKED,
RX_HANDLER_ASYNC_LOCKED_WIPHY,
}; };
/** /**
...@@ -673,6 +677,8 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = { ...@@ -673,6 +677,8 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
/* this forward declaration can avoid to export the function */ /* this forward declaration can avoid to export the function */
static void iwl_mvm_async_handlers_wk(struct work_struct *wk); static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
struct wiphy_work *work);
static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm) static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
{ {
...@@ -1265,6 +1271,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -1265,6 +1271,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_LIST_HEAD(&mvm->add_stream_txqs); INIT_LIST_HEAD(&mvm->add_stream_txqs);
spin_lock_init(&mvm->add_stream_lock); spin_lock_init(&mvm->add_stream_lock);
wiphy_work_init(&mvm->async_handlers_wiphy_wk,
iwl_mvm_async_handlers_wiphy_wk);
init_waitqueue_head(&mvm->rx_sync_waitq); init_waitqueue_head(&mvm->rx_sync_waitq);
mvm->queue_sync_state = 0; mvm->queue_sync_state = 0;
...@@ -1551,35 +1559,62 @@ void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm) ...@@ -1551,35 +1559,62 @@ void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
spin_unlock_bh(&mvm->async_handlers_lock); spin_unlock_bh(&mvm->async_handlers_lock);
} }
static void iwl_mvm_async_handlers_wk(struct work_struct *wk) /*
* This function receives a bitmap of rx async handler contexts
* (&iwl_rx_handler_context) to handle, and runs only them
*/
static void iwl_mvm_async_handlers_by_context(struct iwl_mvm *mvm,
u8 contexts)
{ {
struct iwl_mvm *mvm =
container_of(wk, struct iwl_mvm, async_handlers_wk);
struct iwl_async_handler_entry *entry, *tmp; struct iwl_async_handler_entry *entry, *tmp;
LIST_HEAD(local_list); LIST_HEAD(local_list);
/* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
/* /*
* Sync with Rx path with a lock. Remove all the entries from this list, * Sync with Rx path with a lock. Remove all the entries of the
* add them to a local one (lock free), and then handle them. * wanted contexts from this list, add them to a local one (lock free),
* and then handle them.
*/ */
spin_lock_bh(&mvm->async_handlers_lock); spin_lock_bh(&mvm->async_handlers_lock);
list_splice_init(&mvm->async_handlers_list, &local_list); list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
if (!(BIT(entry->context) & contexts))
continue;
list_del(&entry->list);
list_add_tail(&entry->list, &local_list);
}
spin_unlock_bh(&mvm->async_handlers_lock); spin_unlock_bh(&mvm->async_handlers_lock);
list_for_each_entry_safe(entry, tmp, &local_list, list) { list_for_each_entry_safe(entry, tmp, &local_list, list) {
if (entry->context == RX_HANDLER_ASYNC_LOCKED) if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
mutex_lock(&mvm->mutex); mutex_lock(&mvm->mutex);
entry->fn(mvm, &entry->rxb); entry->fn(mvm, &entry->rxb);
iwl_free_rxb(&entry->rxb); iwl_free_rxb(&entry->rxb);
list_del(&entry->list); list_del(&entry->list);
if (entry->context == RX_HANDLER_ASYNC_LOCKED) if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
mutex_unlock(&mvm->mutex); mutex_unlock(&mvm->mutex);
kfree(entry); kfree(entry);
} }
} }
static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
struct wiphy_work *wk)
{
struct iwl_mvm *mvm =
container_of(wk, struct iwl_mvm, async_handlers_wiphy_wk);
u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED_WIPHY);
iwl_mvm_async_handlers_by_context(mvm, contexts);
}
static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
{
struct iwl_mvm *mvm =
container_of(wk, struct iwl_mvm, async_handlers_wk);
u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED) |
BIT(RX_HANDLER_ASYNC_UNLOCKED);
iwl_mvm_async_handlers_by_context(mvm, contexts);
}
static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt) struct iwl_rx_packet *pkt)
{ {
...@@ -1659,7 +1694,11 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm, ...@@ -1659,7 +1694,11 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
spin_lock(&mvm->async_handlers_lock); spin_lock(&mvm->async_handlers_lock);
list_add_tail(&entry->list, &mvm->async_handlers_list); list_add_tail(&entry->list, &mvm->async_handlers_list);
spin_unlock(&mvm->async_handlers_lock); spin_unlock(&mvm->async_handlers_lock);
schedule_work(&mvm->async_handlers_wk); if (rx_h->context == RX_HANDLER_ASYNC_LOCKED_WIPHY)
wiphy_work_queue(mvm->hw->wiphy,
&mvm->async_handlers_wiphy_wk);
else
schedule_work(&mvm->async_handlers_wk);
break; break;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment