Commit 0964fc8f authored by Stanislaw Kardach's avatar Stanislaw Kardach Committed by David S. Miller

octeontx2-af: Relax resource lock into mutex

Mailbox message handling is done in a workqueue context scheduled
from interrupt handler. So resource locks does not need to be a spinlock.
Therefore relax them into a mutex so that later on we may use them
in routines that might sleep.
Signed-off-by: default avatarStanislaw Kardach <skardach@marvell.com>
Signed-off-by: default avatarSunil Goutham <sgoutham@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 34425e8c
...@@ -153,17 +153,17 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot) ...@@ -153,17 +153,17 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot)
u16 match = 0; u16 match = 0;
int lf; int lf;
spin_lock(&rvu->rsrc_lock); mutex_lock(&rvu->rsrc_lock);
for (lf = 0; lf < block->lf.max; lf++) { for (lf = 0; lf < block->lf.max; lf++) {
if (block->fn_map[lf] == pcifunc) { if (block->fn_map[lf] == pcifunc) {
if (slot == match) { if (slot == match) {
spin_unlock(&rvu->rsrc_lock); mutex_unlock(&rvu->rsrc_lock);
return lf; return lf;
} }
match++; match++;
} }
} }
spin_unlock(&rvu->rsrc_lock); mutex_unlock(&rvu->rsrc_lock);
return -ENODEV; return -ENODEV;
} }
...@@ -597,6 +597,8 @@ static void rvu_free_hw_resources(struct rvu *rvu) ...@@ -597,6 +597,8 @@ static void rvu_free_hw_resources(struct rvu *rvu)
dma_unmap_resource(rvu->dev, rvu->msix_base_iova, dma_unmap_resource(rvu->dev, rvu->msix_base_iova,
max_msix * PCI_MSIX_ENTRY_SIZE, max_msix * PCI_MSIX_ENTRY_SIZE,
DMA_BIDIRECTIONAL, 0); DMA_BIDIRECTIONAL, 0);
mutex_destroy(&rvu->rsrc_lock);
} }
static int rvu_setup_hw_resources(struct rvu *rvu) static int rvu_setup_hw_resources(struct rvu *rvu)
...@@ -752,7 +754,7 @@ static int rvu_setup_hw_resources(struct rvu *rvu) ...@@ -752,7 +754,7 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
if (!rvu->hwvf) if (!rvu->hwvf)
return -ENOMEM; return -ENOMEM;
spin_lock_init(&rvu->rsrc_lock); mutex_init(&rvu->rsrc_lock);
err = rvu_setup_msix_resources(rvu); err = rvu_setup_msix_resources(rvu);
if (err) if (err)
...@@ -926,7 +928,7 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach, ...@@ -926,7 +928,7 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
struct rvu_block *block; struct rvu_block *block;
int blkid; int blkid;
spin_lock(&rvu->rsrc_lock); mutex_lock(&rvu->rsrc_lock);
/* Check for partial resource detach */ /* Check for partial resource detach */
if (detach && detach->partial) if (detach && detach->partial)
...@@ -956,7 +958,7 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach, ...@@ -956,7 +958,7 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach,
rvu_detach_block(rvu, pcifunc, block->type); rvu_detach_block(rvu, pcifunc, block->type);
} }
spin_unlock(&rvu->rsrc_lock); mutex_unlock(&rvu->rsrc_lock);
return 0; return 0;
} }
...@@ -1119,7 +1121,7 @@ static int rvu_mbox_handler_attach_resources(struct rvu *rvu, ...@@ -1119,7 +1121,7 @@ static int rvu_mbox_handler_attach_resources(struct rvu *rvu,
if (!attach->modify) if (!attach->modify)
rvu_detach_rsrcs(rvu, NULL, pcifunc); rvu_detach_rsrcs(rvu, NULL, pcifunc);
spin_lock(&rvu->rsrc_lock); mutex_lock(&rvu->rsrc_lock);
/* Check if the request can be accommodated */ /* Check if the request can be accommodated */
err = rvu_check_rsrc_availability(rvu, attach, pcifunc); err = rvu_check_rsrc_availability(rvu, attach, pcifunc);
...@@ -1163,7 +1165,7 @@ static int rvu_mbox_handler_attach_resources(struct rvu *rvu, ...@@ -1163,7 +1165,7 @@ static int rvu_mbox_handler_attach_resources(struct rvu *rvu,
} }
exit: exit:
spin_unlock(&rvu->rsrc_lock); mutex_unlock(&rvu->rsrc_lock);
return err; return err;
} }
......
...@@ -64,7 +64,7 @@ struct nix_mcast { ...@@ -64,7 +64,7 @@ struct nix_mcast {
struct qmem *mcast_buf; struct qmem *mcast_buf;
int replay_pkind; int replay_pkind;
int next_free_mce; int next_free_mce;
spinlock_t mce_lock; /* Serialize MCE updates */ struct mutex mce_lock; /* Serialize MCE updates */
}; };
struct nix_mce_list { struct nix_mce_list {
...@@ -74,7 +74,7 @@ struct nix_mce_list { ...@@ -74,7 +74,7 @@ struct nix_mce_list {
}; };
struct npc_mcam { struct npc_mcam {
spinlock_t lock; /* MCAM entries and counters update lock */ struct mutex lock; /* MCAM entries and counters update lock */
u8 keysize; /* MCAM keysize 112/224/448 bits */ u8 keysize; /* MCAM keysize 112/224/448 bits */
u8 banks; /* Number of MCAM banks */ u8 banks; /* Number of MCAM banks */
u8 banks_per_entry;/* Number of keywords in key */ u8 banks_per_entry;/* Number of keywords in key */
...@@ -174,7 +174,7 @@ struct rvu { ...@@ -174,7 +174,7 @@ struct rvu {
struct rvu_hwinfo *hw; struct rvu_hwinfo *hw;
struct rvu_pfvf *pf; struct rvu_pfvf *pf;
struct rvu_pfvf *hwvf; struct rvu_pfvf *hwvf;
spinlock_t rsrc_lock; /* Serialize resource alloc/free */ struct mutex rsrc_lock; /* Serialize resource alloc/free */
/* Mbox */ /* Mbox */
struct otx2_mbox mbox; struct otx2_mbox mbox;
......
...@@ -109,12 +109,12 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, ...@@ -109,12 +109,12 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
if (schq >= txsch->schq.max) if (schq >= txsch->schq.max)
return false; return false;
spin_lock(&rvu->rsrc_lock); mutex_lock(&rvu->rsrc_lock);
if (txsch->pfvf_map[schq] != pcifunc) { if (txsch->pfvf_map[schq] != pcifunc) {
spin_unlock(&rvu->rsrc_lock); mutex_unlock(&rvu->rsrc_lock);
return false; return false;
} }
spin_unlock(&rvu->rsrc_lock); mutex_unlock(&rvu->rsrc_lock);
return true; return true;
} }
...@@ -953,7 +953,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, ...@@ -953,7 +953,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
if (!nix_hw) if (!nix_hw)
return -EINVAL; return -EINVAL;
spin_lock(&rvu->rsrc_lock); mutex_lock(&rvu->rsrc_lock);
for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
txsch = &nix_hw->txsch[lvl]; txsch = &nix_hw->txsch[lvl];
req_schq = req->schq_contig[lvl] + req->schq[lvl]; req_schq = req->schq_contig[lvl] + req->schq[lvl];
...@@ -1009,7 +1009,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, ...@@ -1009,7 +1009,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu,
err: err:
rc = NIX_AF_ERR_TLX_ALLOC_FAIL; rc = NIX_AF_ERR_TLX_ALLOC_FAIL;
exit: exit:
spin_unlock(&rvu->rsrc_lock); mutex_unlock(&rvu->rsrc_lock);
return rc; return rc;
} }
...@@ -1034,7 +1034,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) ...@@ -1034,7 +1034,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
return NIX_AF_ERR_AF_LF_INVALID; return NIX_AF_ERR_AF_LF_INVALID;
/* Disable TL2/3 queue links before SMQ flush*/ /* Disable TL2/3 queue links before SMQ flush*/
spin_lock(&rvu->rsrc_lock); mutex_lock(&rvu->rsrc_lock);
for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) { for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4) if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4)
continue; continue;
...@@ -1076,7 +1076,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) ...@@ -1076,7 +1076,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
txsch->pfvf_map[schq] = 0; txsch->pfvf_map[schq] = 0;
} }
} }
spin_unlock(&rvu->rsrc_lock); mutex_unlock(&rvu->rsrc_lock);
/* Sync cached info for this LF in NDC-TX to LLC/DRAM */ /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
...@@ -1308,7 +1308,7 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list, ...@@ -1308,7 +1308,7 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list,
return 0; return 0;
/* Add a new one to the list, at the tail */ /* Add a new one to the list, at the tail */
mce = kzalloc(sizeof(*mce), GFP_ATOMIC); mce = kzalloc(sizeof(*mce), GFP_KERNEL);
if (!mce) if (!mce)
return -ENOMEM; return -ENOMEM;
mce->idx = idx; mce->idx = idx;
...@@ -1354,7 +1354,7 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) ...@@ -1354,7 +1354,7 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
return -EINVAL; return -EINVAL;
} }
spin_lock(&mcast->mce_lock); mutex_lock(&mcast->mce_lock);
err = nix_update_mce_list(mce_list, pcifunc, idx, add); err = nix_update_mce_list(mce_list, pcifunc, idx, add);
if (err) if (err)
...@@ -1384,7 +1384,7 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) ...@@ -1384,7 +1384,7 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add)
} }
end: end:
spin_unlock(&mcast->mce_lock); mutex_unlock(&mcast->mce_lock);
return err; return err;
} }
...@@ -1469,7 +1469,7 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) ...@@ -1469,7 +1469,7 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr)
BIT_ULL(63) | (mcast->replay_pkind << 24) | BIT_ULL(63) | (mcast->replay_pkind << 24) |
BIT_ULL(20) | MC_BUF_CNT); BIT_ULL(20) | MC_BUF_CNT);
spin_lock_init(&mcast->mce_lock); mutex_init(&mcast->mce_lock);
return nix_setup_bcast_tables(rvu, nix_hw); return nix_setup_bcast_tables(rvu, nix_hw);
} }
...@@ -1869,7 +1869,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, ...@@ -1869,7 +1869,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
/* Update min/maxlen in each of the SMQ attached to this PF/VF */ /* Update min/maxlen in each of the SMQ attached to this PF/VF */
txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
spin_lock(&rvu->rsrc_lock); mutex_lock(&rvu->rsrc_lock);
for (schq = 0; schq < txsch->schq.max; schq++) { for (schq = 0; schq < txsch->schq.max; schq++) {
if (txsch->pfvf_map[schq] != pcifunc) if (txsch->pfvf_map[schq] != pcifunc)
continue; continue;
...@@ -1879,7 +1879,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, ...@@ -1879,7 +1879,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req,
cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F); cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F);
rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg);
} }
spin_unlock(&rvu->rsrc_lock); mutex_unlock(&rvu->rsrc_lock);
rx_frscfg: rx_frscfg:
/* Check if config is for SDP link */ /* Check if config is for SDP link */
...@@ -2162,5 +2162,6 @@ void rvu_nix_freemem(struct rvu *rvu) ...@@ -2162,5 +2162,6 @@ void rvu_nix_freemem(struct rvu *rvu)
mcast = &nix_hw->mcast; mcast = &nix_hw->mcast;
qmem_free(rvu->dev, mcast->mce_ctx); qmem_free(rvu->dev, mcast->mce_ctx);
qmem_free(rvu->dev, mcast->mcast_buf); qmem_free(rvu->dev, mcast->mcast_buf);
mutex_destroy(&mcast->mce_lock);
} }
} }
...@@ -732,7 +732,7 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) ...@@ -732,7 +732,7 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
mcam->nixlf_offset = mcam->entries; mcam->nixlf_offset = mcam->entries;
mcam->pf_offset = mcam->nixlf_offset + nixlf_count; mcam->pf_offset = mcam->nixlf_offset + nixlf_count;
spin_lock_init(&mcam->lock); mutex_init(&mcam->lock);
return 0; return 0;
} }
...@@ -811,6 +811,8 @@ int rvu_npc_init(struct rvu *rvu) ...@@ -811,6 +811,8 @@ int rvu_npc_init(struct rvu *rvu)
void rvu_npc_freemem(struct rvu *rvu) void rvu_npc_freemem(struct rvu *rvu)
{ {
struct npc_pkind *pkind = &rvu->hw->pkind; struct npc_pkind *pkind = &rvu->hw->pkind;
struct npc_mcam *mcam = &rvu->hw->mcam;
kfree(pkind->rsrc.bmap); kfree(pkind->rsrc.bmap);
mutex_destroy(&mcam->lock);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment