Commit d5b2e0a2 authored by Nithin Dabilpuram's avatar Nithin Dabilpuram Committed by David S. Miller

octeontx2-af: restore rxc conf after teardown sequence

CN10K CPT coprocessor includes a component named RXC which
is responsible for reassembly of inner IP packets. RXC has
the feature to evict oldest entries based on age/threshold.
The age/threshold is being set to minimum values to evict
all entries at the time of teardown.
This patch adds code to restore timeout and threshold config
after teardown sequence is complete as it is global config.
Signed-off-by: default avatarNithin Dabilpuram <ndabilpuram@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9adb04ff
...@@ -812,10 +812,21 @@ int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req, ...@@ -812,10 +812,21 @@ int rvu_mbox_handler_cpt_sts(struct rvu *rvu, struct cpt_sts_req *req,
#define RXC_ZOMBIE_COUNT GENMASK_ULL(60, 48) #define RXC_ZOMBIE_COUNT GENMASK_ULL(60, 48)
static void cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req, static void cpt_rxc_time_cfg(struct rvu *rvu, struct cpt_rxc_time_cfg_req *req,
int blkaddr) int blkaddr, struct cpt_rxc_time_cfg_req *save)
{ {
u64 dfrg_reg; u64 dfrg_reg;
if (save) {
/* Save older config */
dfrg_reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_DFRG);
save->zombie_thres = FIELD_GET(RXC_ZOMBIE_THRES, dfrg_reg);
save->zombie_limit = FIELD_GET(RXC_ZOMBIE_LIMIT, dfrg_reg);
save->active_thres = FIELD_GET(RXC_ACTIVE_THRES, dfrg_reg);
save->active_limit = FIELD_GET(RXC_ACTIVE_LIMIT, dfrg_reg);
save->step = rvu_read64(rvu, blkaddr, CPT_AF_RXC_TIME_CFG);
}
dfrg_reg = FIELD_PREP(RXC_ZOMBIE_THRES, req->zombie_thres); dfrg_reg = FIELD_PREP(RXC_ZOMBIE_THRES, req->zombie_thres);
dfrg_reg |= FIELD_PREP(RXC_ZOMBIE_LIMIT, req->zombie_limit); dfrg_reg |= FIELD_PREP(RXC_ZOMBIE_LIMIT, req->zombie_limit);
dfrg_reg |= FIELD_PREP(RXC_ACTIVE_THRES, req->active_thres); dfrg_reg |= FIELD_PREP(RXC_ACTIVE_THRES, req->active_thres);
...@@ -840,7 +851,7 @@ int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu, ...@@ -840,7 +851,7 @@ int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu,
!is_cpt_vf(rvu, req->hdr.pcifunc)) !is_cpt_vf(rvu, req->hdr.pcifunc))
return CPT_AF_ERR_ACCESS_DENIED; return CPT_AF_ERR_ACCESS_DENIED;
cpt_rxc_time_cfg(rvu, req, blkaddr); cpt_rxc_time_cfg(rvu, req, blkaddr, NULL);
return 0; return 0;
} }
...@@ -886,7 +897,7 @@ int rvu_mbox_handler_cpt_lf_reset(struct rvu *rvu, struct cpt_lf_rst_req *req, ...@@ -886,7 +897,7 @@ int rvu_mbox_handler_cpt_lf_reset(struct rvu *rvu, struct cpt_lf_rst_req *req,
static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr) static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
{ {
struct cpt_rxc_time_cfg_req req; struct cpt_rxc_time_cfg_req req, prev;
int timeout = 2000; int timeout = 2000;
u64 reg; u64 reg;
...@@ -902,7 +913,7 @@ static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr) ...@@ -902,7 +913,7 @@ static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
req.active_thres = 1; req.active_thres = 1;
req.active_limit = 1; req.active_limit = 1;
cpt_rxc_time_cfg(rvu, &req, blkaddr); cpt_rxc_time_cfg(rvu, &req, blkaddr, &prev);
do { do {
reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS); reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
...@@ -928,6 +939,9 @@ static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr) ...@@ -928,6 +939,9 @@ static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
if (timeout == 0) if (timeout == 0)
dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n"); dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n");
/* Restore config */
cpt_rxc_time_cfg(rvu, &prev, blkaddr, NULL);
} }
#define INFLIGHT GENMASK_ULL(8, 0) #define INFLIGHT GENMASK_ULL(8, 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment