Commit 149f3b73 authored by Srujana Challa's avatar Srujana Challa Committed by Jakub Kicinski

octeontx2-af: Add support to flush full CPT CTX cache

Adds support to flush or invalidate CPT CTX entries as part of FLR
and also provides a mailbox to flush CPT CTX entries in case of
graceful exit.
This patch also adds support for AF -> CPT PF uplink mailbox messages
and adds a new mbox message to submit a CPT instruction from AF.
Signed-off-by: default avatarSrujana Challa <schalla@marvell.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 7054d39c
...@@ -191,6 +191,7 @@ M(CPT_INLINE_IPSEC_CFG, 0xA04, cpt_inline_ipsec_cfg, \ ...@@ -191,6 +191,7 @@ M(CPT_INLINE_IPSEC_CFG, 0xA04, cpt_inline_ipsec_cfg, \
M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \ M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \
M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \ M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \
msg_rsp) \ msg_rsp) \
M(CPT_CTX_CACHE_SYNC, 0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp) \
/* SDP mbox IDs (range 0x1000 - 0x11FF) */ \ /* SDP mbox IDs (range 0x1000 - 0x11FF) */ \
M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \ M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \
M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \ M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \
...@@ -292,10 +293,14 @@ M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \ ...@@ -292,10 +293,14 @@ M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
#define MBOX_UP_CGX_MESSAGES \ #define MBOX_UP_CGX_MESSAGES \
M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp) M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
#define MBOX_UP_CPT_MESSAGES \
M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp)
enum { enum {
#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id, #define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES MBOX_UP_CGX_MESSAGES
MBOX_UP_CPT_MESSAGES
#undef M #undef M
}; };
...@@ -1562,6 +1567,13 @@ struct cpt_rxc_time_cfg_req { ...@@ -1562,6 +1567,13 @@ struct cpt_rxc_time_cfg_req {
u16 active_limit; u16 active_limit;
}; };
/* Mailbox message request format to request for CPT_INST_S lmtst. */
struct cpt_inst_lmtst_req {
struct mbox_msghdr hdr;
u64 inst[8];
u64 rsvd;
};
struct sdp_node_info { struct sdp_node_info {
/* Node to which this PF belons to */ /* Node to which this PF belons to */
u8 node_id; u8 node_id;
......
...@@ -817,6 +817,7 @@ int rvu_cpt_register_interrupts(struct rvu *rvu); ...@@ -817,6 +817,7 @@ int rvu_cpt_register_interrupts(struct rvu *rvu);
void rvu_cpt_unregister_interrupts(struct rvu *rvu); void rvu_cpt_unregister_interrupts(struct rvu *rvu);
int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
int slot); int slot);
int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
/* CN10K RVU */ /* CN10K RVU */
int rvu_set_channels_base(struct rvu *rvu); int rvu_set_channels_base(struct rvu *rvu);
......
...@@ -795,6 +795,58 @@ int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu, ...@@ -795,6 +795,58 @@ int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu,
return 0; return 0;
} }
int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc);
}
static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
{
struct cpt_rxc_time_cfg_req req;
int timeout = 2000;
u64 reg;
if (is_rvu_otx2(rvu))
return;
/* Set time limit to minimum values, so that rxc entries will be
* flushed out quickly.
*/
req.step = 1;
req.zombie_thres = 1;
req.zombie_limit = 1;
req.active_thres = 1;
req.active_limit = 1;
cpt_rxc_time_cfg(rvu, &req, blkaddr);
do {
reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
udelay(1);
if (FIELD_GET(RXC_ACTIVE_COUNT, reg))
timeout--;
else
break;
} while (timeout);
if (timeout == 0)
dev_warn(rvu->dev, "Poll for RXC active count hits hard loop counter\n");
timeout = 2000;
do {
reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
udelay(1);
if (FIELD_GET(RXC_ZOMBIE_COUNT, reg))
timeout--;
else
break;
} while (timeout);
if (timeout == 0)
dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n");
}
#define INPROG_INFLIGHT(reg) ((reg) & 0x1FF) #define INPROG_INFLIGHT(reg) ((reg) & 0x1FF)
#define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31)) #define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31))
#define INPROG_GRB(reg) (((reg) >> 32) & 0xFF) #define INPROG_GRB(reg) (((reg) >> 32) & 0xFF)
...@@ -863,6 +915,9 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int s ...@@ -863,6 +915,9 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int s
{ {
u64 reg; u64 reg;
if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc))
cpt_rxc_teardown(rvu, blkaddr);
/* Enable BAR2 ALIAS for this pcifunc. */ /* Enable BAR2 ALIAS for this pcifunc. */
reg = BIT_ULL(16) | pcifunc; reg = BIT_ULL(16) | pcifunc;
rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg); rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
...@@ -878,3 +933,154 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int s ...@@ -878,3 +933,154 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int s
return 0; return 0;
} }
#define CPT_RES_LEN 16
#define CPT_SE_IE_EGRP 1ULL
static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr,
int nix_blkaddr)
{
int cpt_pf_num = get_cpt_pf_num(rvu);
struct cpt_inst_lmtst_req *req;
dma_addr_t res_daddr;
int timeout = 3000;
u8 cpt_idx;
u64 *inst;
u16 *res;
int rc;
res = kzalloc(CPT_RES_LEN, GFP_KERNEL);
if (!res)
return -ENOMEM;
res_daddr = dma_map_single(rvu->dev, res, CPT_RES_LEN,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(rvu->dev, res_daddr)) {
dev_err(rvu->dev, "DMA mapping failed for CPT result\n");
rc = -EFAULT;
goto res_free;
}
*res = 0xFFFF;
/* Send mbox message to CPT PF */
req = (struct cpt_inst_lmtst_req *)
otx2_mbox_alloc_msg_rsp(&rvu->afpf_wq_info.mbox_up,
cpt_pf_num, sizeof(*req),
sizeof(struct msg_rsp));
if (!req) {
rc = -ENOMEM;
goto res_daddr_unmap;
}
req->hdr.sig = OTX2_MBOX_REQ_SIG;
req->hdr.id = MBOX_MSG_CPT_INST_LMTST;
inst = req->inst;
/* Prepare CPT_INST_S */
inst[0] = 0;
inst[1] = res_daddr;
/* AF PF FUNC */
inst[2] = 0;
/* Set QORD */
inst[3] = 1;
inst[4] = 0;
inst[5] = 0;
inst[6] = 0;
/* Set EGRP */
inst[7] = CPT_SE_IE_EGRP << 61;
/* Subtract 1 from the NIX-CPT credit count to preserve
* credit counts.
*/
cpt_idx = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
rvu_write64(rvu, nix_blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
BIT_ULL(22) - 1);
otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
rc = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
if (rc)
dev_warn(rvu->dev, "notification to pf %d failed\n",
cpt_pf_num);
/* Wait for CPT instruction to be completed */
do {
mdelay(1);
if (*res == 0xFFFF)
timeout--;
else
break;
} while (timeout);
if (timeout == 0)
dev_warn(rvu->dev, "Poll for result hits hard loop counter\n");
res_daddr_unmap:
dma_unmap_single(rvu->dev, res_daddr, CPT_RES_LEN, DMA_BIDIRECTIONAL);
res_free:
kfree(res);
return 0;
}
#define CTX_CAM_PF_FUNC GENMASK_ULL(61, 46)
#define CTX_CAM_CPTR GENMASK_ULL(45, 0)
int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
{
int nix_blkaddr, blkaddr;
u16 max_ctx_entries, i;
int slot = 0, num_lfs;
u64 reg, cam_data;
int rc;
nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (nix_blkaddr < 0)
return -EINVAL;
if (is_rvu_otx2(rvu))
return 0;
blkaddr = (nix_blkaddr == BLKADDR_NIX1) ? BLKADDR_CPT1 : BLKADDR_CPT0;
/* Submit CPT_INST_S to track when all packets have been
* flushed through for the NIX PF FUNC in inline inbound case.
*/
rc = cpt_inline_inb_lf_cmd_send(rvu, blkaddr, nix_blkaddr);
if (rc)
return rc;
/* Wait for rxc entries to be flushed out */
cpt_rxc_teardown(rvu, blkaddr);
reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
max_ctx_entries = (reg >> 48) & 0xFFF;
mutex_lock(&rvu->rsrc_lock);
num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
blkaddr);
if (num_lfs == 0) {
dev_warn(rvu->dev, "CPT LF is not configured\n");
goto unlock;
}
/* Enable BAR2 ALIAS for this pcifunc. */
reg = BIT_ULL(16) | pcifunc;
rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
for (i = 0; i < max_ctx_entries; i++) {
cam_data = rvu_read64(rvu, blkaddr, CPT_AF_CTX_CAM_DATA(i));
if ((FIELD_GET(CTX_CAM_PF_FUNC, cam_data) == pcifunc) &&
FIELD_GET(CTX_CAM_CPTR, cam_data)) {
reg = BIT_ULL(46) | FIELD_GET(CTX_CAM_CPTR, cam_data);
rvu_write64(rvu, blkaddr,
CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTX_FLUSH),
reg);
}
}
rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
unlock:
mutex_unlock(&rvu->rsrc_lock);
return 0;
}
...@@ -4512,6 +4512,8 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, ...@@ -4512,6 +4512,8 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
return rvu_cgx_start_stop_io(rvu, pcifunc, false); return rvu_cgx_start_stop_io(rvu, pcifunc, false);
} }
#define RX_SA_BASE GENMASK_ULL(52, 7)
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
{ {
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
...@@ -4519,6 +4521,7 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) ...@@ -4519,6 +4521,7 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
int pf = rvu_get_pf(pcifunc); int pf = rvu_get_pf(pcifunc);
struct mac_ops *mac_ops; struct mac_ops *mac_ops;
u8 cgx_id, lmac_id; u8 cgx_id, lmac_id;
u64 sa_base;
void *cgxd; void *cgxd;
int err; int err;
...@@ -4575,6 +4578,14 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) ...@@ -4575,6 +4578,14 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
nix_ctx_free(rvu, pfvf); nix_ctx_free(rvu, pfvf);
nix_free_all_bandprof(rvu, pcifunc); nix_free_all_bandprof(rvu, pcifunc);
sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
if (FIELD_GET(RX_SA_BASE, sa_base)) {
err = rvu_cpt_ctx_flush(rvu, pcifunc);
if (err)
dev_err(rvu->dev,
"CPT ctx flush failed with error: %d\n", err);
}
} }
#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32) #define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
......
...@@ -527,6 +527,7 @@ ...@@ -527,6 +527,7 @@
#define CPT_AF_CTX_WBACK_LATENCY_PC (0x49448ull) #define CPT_AF_CTX_WBACK_LATENCY_PC (0x49448ull)
#define CPT_AF_CTX_PSH_PC (0x49450ull) #define CPT_AF_CTX_PSH_PC (0x49450ull)
#define CPT_AF_CTX_PSH_LATENCY_PC (0x49458ull) #define CPT_AF_CTX_PSH_LATENCY_PC (0x49458ull)
#define CPT_AF_CTX_CAM_DATA(a) (0x49800ull | (u64)(a) << 3)
#define CPT_AF_RXC_TIME (0x50010ull) #define CPT_AF_RXC_TIME (0x50010ull)
#define CPT_AF_RXC_TIME_CFG (0x50018ull) #define CPT_AF_RXC_TIME_CFG (0x50018ull)
#define CPT_AF_RXC_DFRG (0x50020ull) #define CPT_AF_RXC_DFRG (0x50020ull)
...@@ -544,6 +545,7 @@ ...@@ -544,6 +545,7 @@
#define CPT_LF_CTL 0x10 #define CPT_LF_CTL 0x10
#define CPT_LF_INPROG 0x40 #define CPT_LF_INPROG 0x40
#define CPT_LF_Q_GRP_PTR 0x120 #define CPT_LF_Q_GRP_PTR 0x120
#define CPT_LF_CTX_FLUSH 0x510
#define NPC_AF_BLK_RST (0x00040) #define NPC_AF_BLK_RST (0x00040)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment