Commit 25edc8f2 authored by Johannes Berg's avatar Johannes Berg Committed by Luca Coelho

iwlwifi: pcie: properly implement NAPI

Instead of pretending to have NAPI and then relying entirely on
interrupts anyway, properly implement NAPI and schedule the poll
when we get an interrupt, re-enabling the interrupt only after
the poll completed.
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
Link: https://lore.kernel.org/r/iwlwifi.20210117130510.a5951ac4fc06.I9c84a147288fcfb1b019572c6758f2d92949f5d7@changeidSigned-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
parent d4e3a341
......@@ -418,8 +418,7 @@ IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
return (void *)trans->trans_specific;
}
static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
struct msix_entry *entry)
static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, int queue)
{
/*
* Before sending the interrupt the HW disables it to prevent
......@@ -429,7 +428,7 @@ static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
* write 1 clear (W1C) register, meaning that it's being clear
* by writing 1 to the bit.
*/
iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(queue));
}
static inline struct iwl_trans *
......@@ -462,7 +461,6 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans);
void iwl_pcie_rx_free(struct iwl_trans *trans);
void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
struct iwl_rxq *rxq);
......@@ -569,9 +567,9 @@ static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
spin_lock(&trans_pcie->irq_lock);
spin_lock_bh(&trans_pcie->irq_lock);
_iwl_disable_interrupts(trans);
spin_unlock(&trans_pcie->irq_lock);
spin_unlock_bh(&trans_pcie->irq_lock);
}
static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
......@@ -601,9 +599,9 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
spin_lock(&trans_pcie->irq_lock);
spin_lock_bh(&trans_pcie->irq_lock);
_iwl_enable_interrupts(trans);
spin_unlock(&trans_pcie->irq_lock);
spin_unlock_bh(&trans_pcie->irq_lock);
}
static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
{
......
......@@ -213,9 +213,9 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
trans->cfg->min_txq_size);
/* TODO: most of the logic can be removed in A0 - but not in Z0 */
spin_lock(&trans_pcie->irq_lock);
spin_lock_bh(&trans_pcie->irq_lock);
iwl_pcie_gen2_apm_init(trans);
spin_unlock(&trans_pcie->irq_lock);
spin_unlock_bh(&trans_pcie->irq_lock);
iwl_op_mode_nic_config(trans->op_mode);
......
......@@ -511,9 +511,9 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
int ret;
/* nic_init */
spin_lock(&trans_pcie->irq_lock);
spin_lock_bh(&trans_pcie->irq_lock);
ret = iwl_pcie_apm_init(trans);
spin_unlock(&trans_pcie->irq_lock);
spin_unlock_bh(&trans_pcie->irq_lock);
if (ret)
return ret;
......
......@@ -393,7 +393,7 @@ static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
int ch, ret;
u32 mask = 0;
spin_lock(&trans_pcie->irq_lock);
spin_lock_bh(&trans_pcie->irq_lock);
if (!iwl_trans_grab_nic_access(trans, &flags))
goto out;
......@@ -414,7 +414,7 @@ static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
iwl_trans_release_nic_access(trans, &flags);
out:
spin_unlock(&trans_pcie->irq_lock);
spin_unlock_bh(&trans_pcie->irq_lock);
}
/*
......@@ -571,7 +571,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
alloc = true;
}
spin_lock(&trans_pcie->irq_lock);
spin_lock_bh(&trans_pcie->irq_lock);
/* Turn off all Tx DMA fifos */
iwl_scd_deactivate_fifos(trans);
......@@ -580,7 +580,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
trans_pcie->kw.dma >> 4);
spin_unlock(&trans_pcie->irq_lock);
spin_unlock_bh(&trans_pcie->irq_lock);
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment