Commit 08ab4d74 authored by David S. Miller's avatar David S. Miller

Merge branch 'occteontx2-rate-limit-offload'

Subbaraya Sundeep says:

====================
octeontx2: Add ingress ratelimit offload

This patchset adds ingress rate limiting hardware
offload support for CN10K silicons. Police actions
are added for TC matchall and flower filters.
CN10K has ingress rate limiting feature where
a receive queue is mapped to bandwidth profile
and the profile is configured with rate and burst
parameters by software. CN10K hardware supports
three levels of ingress policing or ratelimiting.
Multiple leaf profiles can  point to a single mid
level profile and multiple mid level profile can
point to a single top level one. Only leaf level
profiles are used for configuring rate limiting.

Patch 1 adds the new bandwidth profile contexts
in AF driver similar to other hardware contexts
Patch 2 adds the debugfs changes to dump bandwidth
profile contexts
Patch 3 adds support for police action with TC matchall filter
Patch 4 uses NL_SET_ERR_MSG_MOD for tc code
Patch 5 adds support for police action with TC flower filter
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ad5645d7 68fbff68
......@@ -260,7 +260,11 @@ M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
M(NIX_CN10K_AQ_ENQ, 0x8019, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \
nix_cn10k_aq_enq_rsp) \
M(NIX_GET_HW_INFO, 0x801a, nix_get_hw_info, msg_req, nix_hw_info)
M(NIX_GET_HW_INFO, 0x801c, nix_get_hw_info, msg_req, nix_hw_info) \
M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc, nix_bandprof_alloc_req, \
nix_bandprof_alloc_rsp) \
M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
msg_rsp)
/* Messages initiated by AF (range 0xC00 - 0xDFF) */
#define MBOX_UP_CGX_MESSAGES \
......@@ -615,6 +619,9 @@ enum nix_af_status {
NIX_AF_ERR_PTP_CONFIG_FAIL = -423,
NIX_AF_ERR_NPC_KEY_NOT_SUPP = -424,
NIX_AF_ERR_INVALID_NIXBLK = -425,
NIX_AF_ERR_INVALID_BANDPROF = -426,
NIX_AF_ERR_IPOLICER_NOTSUPP = -427,
NIX_AF_ERR_BANDPROF_INVAL_REQ = -428,
};
/* For NIX RX vtag action */
......@@ -683,6 +690,7 @@ struct nix_cn10k_aq_enq_req {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
struct nix_bandprof_s prof;
};
union {
struct nix_cn10k_rq_ctx_s rq_mask;
......@@ -690,6 +698,7 @@ struct nix_cn10k_aq_enq_req {
struct nix_cq_ctx_s cq_mask;
struct nix_rsse_s rss_mask;
struct nix_rx_mce_s mce_mask;
struct nix_bandprof_s prof_mask;
};
};
......@@ -701,6 +710,7 @@ struct nix_cn10k_aq_enq_rsp {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
struct nix_bandprof_s prof;
};
};
......@@ -716,6 +726,7 @@ struct nix_aq_enq_req {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
u64 prof;
};
union {
struct nix_rq_ctx_s rq_mask;
......@@ -723,6 +734,7 @@ struct nix_aq_enq_req {
struct nix_cq_ctx_s cq_mask;
struct nix_rsse_s rss_mask;
struct nix_rx_mce_s mce_mask;
u64 prof_mask;
};
};
......@@ -734,6 +746,7 @@ struct nix_aq_enq_rsp {
struct nix_cq_ctx_s cq;
struct nix_rsse_s rss;
struct nix_rx_mce_s mce;
u64 prof;
};
};
......@@ -975,6 +988,31 @@ struct nix_hw_info {
u16 min_mtu;
};
struct nix_bandprof_alloc_req {
struct mbox_msghdr hdr;
/* Count of profiles needed per layer */
u16 prof_count[BAND_PROF_NUM_LAYERS];
};
struct nix_bandprof_alloc_rsp {
struct mbox_msghdr hdr;
u16 prof_count[BAND_PROF_NUM_LAYERS];
/* There is no need to allocate morethan 1 bandwidth profile
* per RQ of a PF_FUNC's NIXLF. So limit the maximum
* profiles to 64 per PF_FUNC.
*/
#define MAX_BANDPROF_PER_PFFUNC 64
u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC];
};
struct nix_bandprof_free_req {
struct mbox_msghdr hdr;
u8 free_all;
u16 prof_count[BAND_PROF_NUM_LAYERS];
u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC];
};
/* NPC mbox message structs */
#define NPC_MCAM_ENTRY_INVALID 0xFFFF
......
......@@ -184,6 +184,14 @@ int rvu_rsrc_free_count(struct rsrc_bmap *rsrc)
return (rsrc->max - used);
}
bool is_rsrc_free(struct rsrc_bmap *rsrc, int id)
{
if (!rsrc->bmap)
return false;
return !test_bit(id, rsrc->bmap);
}
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc)
{
rsrc->bmap = kcalloc(BITS_TO_LONGS(rsrc->max),
......
......@@ -296,6 +296,13 @@ struct nix_txvlan {
struct mutex rsrc_lock; /* Serialize resource alloc/free */
};
struct nix_ipolicer {
struct rsrc_bmap band_prof;
u16 *pfvf_map;
u16 *match_id;
u16 *ref_count;
};
struct nix_hw {
int blkaddr;
struct rvu *rvu;
......@@ -305,6 +312,7 @@ struct nix_hw {
struct nix_mark_format mark_format;
struct nix_lso lso;
struct nix_txvlan txvlan;
struct nix_ipolicer *ipolicer;
};
/* RVU block's capabilities or functionality,
......@@ -322,6 +330,7 @@ struct hw_cap {
bool nix_rx_multicast; /* Rx packet replication support */
bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */
bool programmable_chans; /* Channels programmable ? */
bool ipolicer;
};
struct rvu_hwinfo {
......@@ -587,6 +596,7 @@ static inline bool is_rvu_fwdata_valid(struct rvu *rvu)
int rvu_alloc_bitmap(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc(struct rsrc_bmap *rsrc);
void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
bool is_rsrc_free(struct rsrc_bmap *rsrc, int id);
int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
......@@ -672,6 +682,12 @@ int rvu_get_next_nix_blkaddr(struct rvu *rvu, int blkaddr);
void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc);
int nix_get_struct_ptrs(struct rvu *rvu, u16 pcifunc,
struct nix_hw **nix_hw, int *blkaddr);
int rvu_nix_setup_ratelimit_aggr(struct rvu *rvu, u16 pcifunc,
u16 rq_idx, u16 match_id);
int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw,
struct nix_cn10k_aq_enq_req *aq_req,
struct nix_cn10k_aq_enq_rsp *aq_rsp,
u16 pcifunc, u8 ctype, u32 qidx);
/* NPC APIs */
int rvu_npc_init(struct rvu *rvu);
......
......@@ -1632,6 +1632,165 @@ static int rvu_dbg_nix_qsize_display(struct seq_file *filp, void *unused)
RVU_DEBUG_SEQ_FOPS(nix_qsize, nix_qsize_display, nix_qsize_write);
static void print_band_prof_ctx(struct seq_file *m,
struct nix_bandprof_s *prof)
{
char *str;
switch (prof->pc_mode) {
case NIX_RX_PC_MODE_VLAN:
str = "VLAN";
break;
case NIX_RX_PC_MODE_DSCP:
str = "DSCP";
break;
case NIX_RX_PC_MODE_GEN:
str = "Generic";
break;
case NIX_RX_PC_MODE_RSVD:
str = "Reserved";
break;
}
seq_printf(m, "W0: pc_mode\t\t%s\n", str);
str = (prof->icolor == 3) ? "Color blind" :
(prof->icolor == 0) ? "Green" :
(prof->icolor == 1) ? "Yellow" : "Red";
seq_printf(m, "W0: icolor\t\t%s\n", str);
seq_printf(m, "W0: tnl_ena\t\t%d\n", prof->tnl_ena);
seq_printf(m, "W0: peir_exponent\t%d\n", prof->peir_exponent);
seq_printf(m, "W0: pebs_exponent\t%d\n", prof->pebs_exponent);
seq_printf(m, "W0: cir_exponent\t%d\n", prof->cir_exponent);
seq_printf(m, "W0: cbs_exponent\t%d\n", prof->cbs_exponent);
seq_printf(m, "W0: peir_mantissa\t%d\n", prof->peir_mantissa);
seq_printf(m, "W0: pebs_mantissa\t%d\n", prof->pebs_mantissa);
seq_printf(m, "W0: cir_mantissa\t%d\n", prof->cir_mantissa);
seq_printf(m, "W1: cbs_mantissa\t%d\n", prof->cbs_mantissa);
str = (prof->lmode == 0) ? "byte" : "packet";
seq_printf(m, "W1: lmode\t\t%s\n", str);
seq_printf(m, "W1: l_select\t\t%d\n", prof->l_sellect);
seq_printf(m, "W1: rdiv\t\t%d\n", prof->rdiv);
seq_printf(m, "W1: adjust_exponent\t%d\n", prof->adjust_exponent);
seq_printf(m, "W1: adjust_mantissa\t%d\n", prof->adjust_mantissa);
str = (prof->gc_action == 0) ? "PASS" :
(prof->gc_action == 1) ? "DROP" : "RED";
seq_printf(m, "W1: gc_action\t\t%s\n", str);
str = (prof->yc_action == 0) ? "PASS" :
(prof->yc_action == 1) ? "DROP" : "RED";
seq_printf(m, "W1: yc_action\t\t%s\n", str);
str = (prof->rc_action == 0) ? "PASS" :
(prof->rc_action == 1) ? "DROP" : "RED";
seq_printf(m, "W1: rc_action\t\t%s\n", str);
seq_printf(m, "W1: meter_algo\t\t%d\n", prof->meter_algo);
seq_printf(m, "W1: band_prof_id\t%d\n", prof->band_prof_id);
seq_printf(m, "W1: hl_en\t\t%d\n", prof->hl_en);
seq_printf(m, "W2: ts\t\t\t%lld\n", (u64)prof->ts);
seq_printf(m, "W3: pe_accum\t\t%d\n", prof->pe_accum);
seq_printf(m, "W3: c_accum\t\t%d\n", prof->c_accum);
seq_printf(m, "W4: green_pkt_pass\t%lld\n",
(u64)prof->green_pkt_pass);
seq_printf(m, "W5: yellow_pkt_pass\t%lld\n",
(u64)prof->yellow_pkt_pass);
seq_printf(m, "W6: red_pkt_pass\t%lld\n", (u64)prof->red_pkt_pass);
seq_printf(m, "W7: green_octs_pass\t%lld\n",
(u64)prof->green_octs_pass);
seq_printf(m, "W8: yellow_octs_pass\t%lld\n",
(u64)prof->yellow_octs_pass);
seq_printf(m, "W9: red_octs_pass\t%lld\n", (u64)prof->red_octs_pass);
seq_printf(m, "W10: green_pkt_drop\t%lld\n",
(u64)prof->green_pkt_drop);
seq_printf(m, "W11: yellow_pkt_drop\t%lld\n",
(u64)prof->yellow_pkt_drop);
seq_printf(m, "W12: red_pkt_drop\t%lld\n", (u64)prof->red_pkt_drop);
seq_printf(m, "W13: green_octs_drop\t%lld\n",
(u64)prof->green_octs_drop);
seq_printf(m, "W14: yellow_octs_drop\t%lld\n",
(u64)prof->yellow_octs_drop);
seq_printf(m, "W15: red_octs_drop\t%lld\n", (u64)prof->red_octs_drop);
seq_puts(m, "==============================\n");
}
static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
{
struct nix_hw *nix_hw = m->private;
struct nix_cn10k_aq_enq_req aq_req;
struct nix_cn10k_aq_enq_rsp aq_rsp;
struct rvu *rvu = nix_hw->rvu;
struct nix_ipolicer *ipolicer;
int layer, prof_idx, idx, rc;
u16 pcifunc;
char *str;
for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
if (layer == BAND_PROF_INVAL_LAYER)
continue;
str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
(layer == BAND_PROF_MID_LAYER) ? "Mid" : "Top";
seq_printf(m, "\n%s bandwidth profiles\n", str);
seq_puts(m, "=======================\n");
ipolicer = &nix_hw->ipolicer[layer];
for (idx = 0; idx < ipolicer->band_prof.max; idx++) {
if (is_rsrc_free(&ipolicer->band_prof, idx))
continue;
prof_idx = (idx & 0x3FFF) | (layer << 14);
rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
0x00, NIX_AQ_CTYPE_BANDPROF,
prof_idx);
if (rc) {
dev_err(rvu->dev,
"%s: Failed to fetch context of %s profile %d, err %d\n",
__func__, str, idx, rc);
return 0;
}
seq_printf(m, "\n%s bandwidth profile:: %d\n", str, idx);
pcifunc = ipolicer->pfvf_map[idx];
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
seq_printf(m, "Allocated to :: PF %d\n",
rvu_get_pf(pcifunc));
else
seq_printf(m, "Allocated to :: PF %d VF %d\n",
rvu_get_pf(pcifunc),
(pcifunc & RVU_PFVF_FUNC_MASK) - 1);
print_band_prof_ctx(m, &aq_rsp.prof);
}
}
return 0;
}
RVU_DEBUG_SEQ_FOPS(nix_band_prof_ctx, nix_band_prof_ctx_display, NULL);
static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
{
struct nix_hw *nix_hw = m->private;
struct nix_ipolicer *ipolicer;
int layer;
char *str;
seq_puts(m, "\nBandwidth profile resource free count\n");
seq_puts(m, "=====================================\n");
for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
if (layer == BAND_PROF_INVAL_LAYER)
continue;
str = (layer == BAND_PROF_LEAF_LAYER) ? "Leaf" :
(layer == BAND_PROF_MID_LAYER) ? "Mid " : "Top ";
ipolicer = &nix_hw->ipolicer[layer];
seq_printf(m, "%s :: Max: %4d Free: %4d\n", str,
ipolicer->band_prof.max,
rvu_rsrc_free_count(&ipolicer->band_prof));
}
seq_puts(m, "=====================================\n");
return 0;
}
RVU_DEBUG_SEQ_FOPS(nix_band_prof_rsrc, nix_band_prof_rsrc_display, NULL);
static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
{
struct nix_hw *nix_hw;
......@@ -1664,6 +1823,10 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
&rvu_dbg_nix_ndc_rx_hits_miss_fops);
debugfs_create_file("qsize", 0600, rvu->rvu_dbg.nix, rvu,
&rvu_dbg_nix_qsize_fops);
debugfs_create_file("ingress_policer_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_band_prof_ctx_fops);
debugfs_create_file("ingress_policer_rsrc", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_band_prof_rsrc_fops);
}
static void rvu_dbg_npa_init(struct rvu *rvu)
......
......@@ -1110,6 +1110,11 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
req->vtag0_type == NIX_AF_LFX_RX_VTAG_TYPE7)
rule->vfvlan_cfg = true;
if (is_npc_intf_rx(req->intf) && req->match_id &&
(req->op == NIX_RX_ACTIONOP_UCAST || req->op == NIX_RX_ACTIONOP_RSS))
return rvu_nix_setup_ratelimit_aggr(rvu, req->hdr.pcifunc,
req->index, req->match_id);
return 0;
}
......
......@@ -171,6 +171,7 @@
#define NIX_AF_SQ_CONST (0x0040)
#define NIX_AF_CQ_CONST (0x0048)
#define NIX_AF_RQ_CONST (0x0050)
#define NIX_AF_PL_CONST (0x0058)
#define NIX_AF_PSE_CONST (0x0060)
#define NIX_AF_TL1_CONST (0x0070)
#define NIX_AF_TL2_CONST (0x0078)
......@@ -181,6 +182,7 @@
#define NIX_AF_LSO_CFG (0x00A8)
#define NIX_AF_BLK_RST (0x00B0)
#define NIX_AF_TX_TSTMP_CFG (0x00C0)
#define NIX_AF_PL_TS (0x00C8)
#define NIX_AF_RX_CFG (0x00D0)
#define NIX_AF_AVG_DELAY (0x00E0)
#define NIX_AF_CINT_DELAY (0x00F0)
......@@ -212,7 +214,9 @@
#define NIX_AF_RX_DEF_OL2 (0x0200)
#define NIX_AF_RX_DEF_OIP4 (0x0210)
#define NIX_AF_RX_DEF_IIP4 (0x0220)
#define NIX_AF_RX_DEF_VLAN0_PCP_DEI (0x0228)
#define NIX_AF_RX_DEF_OIP6 (0x0230)
#define NIX_AF_RX_DEF_VLAN1_PCP_DEI (0x0238)
#define NIX_AF_RX_DEF_IIP6 (0x0240)
#define NIX_AF_RX_DEF_OTCP (0x0250)
#define NIX_AF_RX_DEF_ITCP (0x0260)
......@@ -223,6 +227,10 @@
#define NIX_AF_RX_DEF_ISCTP (0x02A0)
#define NIX_AF_RX_DEF_IPSECX (0x02B0)
#define NIX_AF_RX_DEF_CST_APAD1 (0x02A8)
#define NIX_AF_RX_DEF_IIP4_DSCP (0x02E0)
#define NIX_AF_RX_DEF_OIP4_DSCP (0x02E8)
#define NIX_AF_RX_DEF_IIP6_DSCP (0x02F0)
#define NIX_AF_RX_DEF_OIP6_DSCP (0x02F8)
#define NIX_AF_RX_IPSEC_GEN_CFG (0x0300)
#define NIX_AF_RX_CPTX_INST_ADDR (0x0310)
#define NIX_AF_NDC_TX_SYNC (0x03F0)
......
......@@ -286,7 +286,7 @@ enum nix_aq_ctype {
NIX_AQ_CTYPE_MCE = 0x3,
NIX_AQ_CTYPE_RSS = 0x4,
NIX_AQ_CTYPE_DYNO = 0x5,
NIX_AQ_CTYPE_BAND_PROF = 0x6,
NIX_AQ_CTYPE_BANDPROF = 0x6,
};
/* NIX admin queue instruction opcodes */
......@@ -665,6 +665,89 @@ struct nix_rx_mce_s {
uint64_t next : 16;
};
enum nix_band_prof_layers {
BAND_PROF_LEAF_LAYER = 0,
BAND_PROF_INVAL_LAYER = 1,
BAND_PROF_MID_LAYER = 2,
BAND_PROF_TOP_LAYER = 3,
BAND_PROF_NUM_LAYERS = 4,
};
enum NIX_RX_BAND_PROF_ACTIONRESULT_E {
NIX_RX_BAND_PROF_ACTIONRESULT_PASS = 0x0,
NIX_RX_BAND_PROF_ACTIONRESULT_DROP = 0x1,
NIX_RX_BAND_PROF_ACTIONRESULT_RED = 0x2,
};
enum nix_band_prof_pc_mode {
NIX_RX_PC_MODE_VLAN = 0,
NIX_RX_PC_MODE_DSCP = 1,
NIX_RX_PC_MODE_GEN = 2,
NIX_RX_PC_MODE_RSVD = 3,
};
/* NIX ingress policer bandwidth profile structure */
struct nix_bandprof_s {
uint64_t pc_mode : 2; /* W0 */
uint64_t icolor : 2;
uint64_t tnl_ena : 1;
uint64_t reserved_5_7 : 3;
uint64_t peir_exponent : 5;
uint64_t reserved_13_15 : 3;
uint64_t pebs_exponent : 5;
uint64_t reserved_21_23 : 3;
uint64_t cir_exponent : 5;
uint64_t reserved_29_31 : 3;
uint64_t cbs_exponent : 5;
uint64_t reserved_37_39 : 3;
uint64_t peir_mantissa : 8;
uint64_t pebs_mantissa : 8;
uint64_t cir_mantissa : 8;
uint64_t cbs_mantissa : 8; /* W1 */
uint64_t lmode : 1;
uint64_t l_sellect : 3;
uint64_t rdiv : 4;
uint64_t adjust_exponent : 5;
uint64_t reserved_85_86 : 2;
uint64_t adjust_mantissa : 9;
uint64_t gc_action : 2;
uint64_t yc_action : 2;
uint64_t rc_action : 2;
uint64_t meter_algo : 2;
uint64_t band_prof_id : 7;
uint64_t reserved_111_118 : 8;
uint64_t hl_en : 1;
uint64_t reserved_120_127 : 8;
uint64_t ts : 48; /* W2 */
uint64_t reserved_176_191 : 16;
uint64_t pe_accum : 32; /* W3 */
uint64_t c_accum : 32;
uint64_t green_pkt_pass : 48; /* W4 */
uint64_t reserved_304_319 : 16;
uint64_t yellow_pkt_pass : 48; /* W5 */
uint64_t reserved_368_383 : 16;
uint64_t red_pkt_pass : 48; /* W6 */
uint64_t reserved_432_447 : 16;
uint64_t green_octs_pass : 48; /* W7 */
uint64_t reserved_496_511 : 16;
uint64_t yellow_octs_pass : 48; /* W8 */
uint64_t reserved_560_575 : 16;
uint64_t red_octs_pass : 48; /* W9 */
uint64_t reserved_624_639 : 16;
uint64_t green_pkt_drop : 48; /* W10 */
uint64_t reserved_688_703 : 16;
uint64_t yellow_pkt_drop : 48; /* W11 */
uint64_t reserved_752_767 : 16;
uint64_t red_pkt_drop : 48; /* W12 */
uint64_t reserved_816_831 : 16;
uint64_t green_octs_drop : 48; /* W13 */
uint64_t reserved_880_895 : 16;
uint64_t yellow_octs_drop : 48; /* W14 */
uint64_t reserved_944_959 : 16;
uint64_t red_octs_drop : 48; /* W15 */
uint64_t reserved_1008_1023 : 16;
};
enum nix_lsoalg {
NIX_LSOALG_NOP,
NIX_LSOALG_ADD_SEGNUM,
......
......@@ -179,3 +179,326 @@ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
sq->head++;
sq->head &= (sq->sqe_cnt - 1);
}
int cn10k_free_all_ipolicers(struct otx2_nic *pfvf)
{
struct nix_bandprof_free_req *req;
int rc;
if (is_dev_otx2(pfvf->pdev))
return 0;
mutex_lock(&pfvf->mbox.lock);
req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox);
if (!req) {
rc = -ENOMEM;
goto out;
}
/* Free all bandwidth profiles allocated */
req->free_all = true;
rc = otx2_sync_mbox_msg(&pfvf->mbox);
out:
mutex_unlock(&pfvf->mbox.lock);
return rc;
}
int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf)
{
struct nix_bandprof_alloc_req *req;
struct nix_bandprof_alloc_rsp *rsp;
int rc;
req = otx2_mbox_alloc_msg_nix_bandprof_alloc(&pfvf->mbox);
if (!req)
return -ENOMEM;
req->prof_count[BAND_PROF_LEAF_LAYER] = 1;
rc = otx2_sync_mbox_msg(&pfvf->mbox);
if (rc)
goto out;
rsp = (struct nix_bandprof_alloc_rsp *)
otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
if (!rsp->prof_count[BAND_PROF_LEAF_LAYER]) {
rc = -EIO;
goto out;
}
*leaf = rsp->prof_idx[BAND_PROF_LEAF_LAYER][0];
out:
if (rc) {
dev_warn(pfvf->dev,
"Failed to allocate ingress bandwidth policer\n");
}
return rc;
}
int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
int ret;
mutex_lock(&pfvf->mbox.lock);
ret = cn10k_alloc_leaf_profile(pfvf, &hw->matchall_ipolicer);
mutex_unlock(&pfvf->mbox.lock);
return ret;
}
#define POLICER_TIMESTAMP 1 /* 1 second */
#define MAX_RATE_EXP 22 /* Valid rate exponent range: 0 - 22 */
static void cn10k_get_ingress_burst_cfg(u32 burst, u32 *burst_exp,
u32 *burst_mantissa)
{
int tmp;
/* Burst is calculated as
* (1+[BURST_MANTISSA]/256)*2^[BURST_EXPONENT]
* This is the upper limit on number tokens (bytes) that
* can be accumulated in the bucket.
*/
*burst_exp = ilog2(burst);
if (burst < 256) {
/* No float: can't express mantissa in this case */
*burst_mantissa = 0;
return;
}
if (*burst_exp > MAX_RATE_EXP)
*burst_exp = MAX_RATE_EXP;
/* Calculate mantissa
* Find remaining bytes 'burst - 2^burst_exp'
* mantissa = (remaining bytes) / 2^ (burst_exp - 8)
*/
tmp = burst - rounddown_pow_of_two(burst);
*burst_mantissa = tmp / (1UL << (*burst_exp - 8));
}
static void cn10k_get_ingress_rate_cfg(u64 rate, u32 *rate_exp,
u32 *rate_mantissa, u32 *rdiv)
{
u32 div = 0;
u32 exp = 0;
u64 tmp;
/* Figure out mantissa, exponent and divider from given max pkt rate
*
* To achieve desired rate HW adds
* (1+[RATE_MANTISSA]/256)*2^[RATE_EXPONENT] tokens (bytes) at every
* policer timeunit * 2^rdiv ie 2 * 2^rdiv usecs, to the token bucket.
* Here policer timeunit is 2 usecs and rate is in bits per sec.
* Since floating point cannot be used below algorithm uses 1000000
* scale factor to support rates upto 100Gbps.
*/
tmp = rate * 32 * 2;
if (tmp < 256000000) {
while (tmp < 256000000) {
tmp = tmp * 2;
div++;
}
} else {
for (exp = 0; tmp >= 512000000 && exp <= MAX_RATE_EXP; exp++)
tmp = tmp / 2;
if (exp > MAX_RATE_EXP)
exp = MAX_RATE_EXP;
}
*rate_mantissa = (tmp - 256000000) / 1000000;
*rate_exp = exp;
*rdiv = div;
}
int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx,
u16 policer, bool map)
{
struct nix_cn10k_aq_enq_req *aq;
aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
if (!aq)
return -ENOMEM;
/* Enable policing and set the bandwidth profile (policer) index */
if (map)
aq->rq.policer_ena = 1;
else
aq->rq.policer_ena = 0;
aq->rq_mask.policer_ena = 1;
aq->rq.band_prof_id = policer;
aq->rq_mask.band_prof_id = GENMASK(9, 0);
/* Fill AQ info */
aq->qidx = rq_idx;
aq->ctype = NIX_AQ_CTYPE_RQ;
aq->op = NIX_AQ_INSTOP_WRITE;
return otx2_sync_mbox_msg(&pfvf->mbox);
}
int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf)
{
struct nix_bandprof_free_req *req;
req = otx2_mbox_alloc_msg_nix_bandprof_free(&pfvf->mbox);
if (!req)
return -ENOMEM;
req->prof_count[BAND_PROF_LEAF_LAYER] = 1;
req->prof_idx[BAND_PROF_LEAF_LAYER][0] = leaf;
return otx2_sync_mbox_msg(&pfvf->mbox);
}
int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf)
{
struct otx2_hw *hw = &pfvf->hw;
int qidx, rc;
mutex_lock(&pfvf->mbox.lock);
/* Remove RQ's policer mapping */
for (qidx = 0; qidx < hw->rx_queues; qidx++)
cn10k_map_unmap_rq_policer(pfvf, qidx,
hw->matchall_ipolicer, false);
rc = cn10k_free_leaf_profile(pfvf, hw->matchall_ipolicer);
mutex_unlock(&pfvf->mbox.lock);
return rc;
}
int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
u32 burst, u64 rate, bool pps)
{
struct nix_cn10k_aq_enq_req *aq;
u32 burst_exp, burst_mantissa;
u32 rate_exp, rate_mantissa;
u32 rdiv;
/* Get exponent and mantissa values for the desired rate */
cn10k_get_ingress_burst_cfg(burst, &burst_exp, &burst_mantissa);
cn10k_get_ingress_rate_cfg(rate, &rate_exp, &rate_mantissa, &rdiv);
/* Init bandwidth profile */
aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
if (!aq)
return -ENOMEM;
/* Set initial color mode to blind */
aq->prof.icolor = 0x03;
aq->prof_mask.icolor = 0x03;
/* Set rate and burst values */
aq->prof.cir_exponent = rate_exp;
aq->prof_mask.cir_exponent = 0x1F;
aq->prof.cir_mantissa = rate_mantissa;
aq->prof_mask.cir_mantissa = 0xFF;
aq->prof.cbs_exponent = burst_exp;
aq->prof_mask.cbs_exponent = 0x1F;
aq->prof.cbs_mantissa = burst_mantissa;
aq->prof_mask.cbs_mantissa = 0xFF;
aq->prof.rdiv = rdiv;
aq->prof_mask.rdiv = 0xF;
if (pps) {
/* The amount of decremented tokens is calculated according to
* the following equation:
* max([ LMODE ? 0 : (packet_length - LXPTR)] +
* ([ADJUST_MANTISSA]/256 - 1) * 2^[ADJUST_EXPONENT],
* 1/256)
* if LMODE is 1 then rate limiting will be based on
* PPS otherwise bps.
* The aim of the ADJUST value is to specify a token cost per
* packet in contrary to the packet length that specifies a
* cost per byte. To rate limit based on PPS adjust mantissa
* is set as 384 and exponent as 1 so that number of tokens
* decremented becomes 1 i.e, 1 token per packeet.
*/
aq->prof.adjust_exponent = 1;
aq->prof_mask.adjust_exponent = 0x1F;
aq->prof.adjust_mantissa = 384;
aq->prof_mask.adjust_mantissa = 0x1FF;
aq->prof.lmode = 0x1;
aq->prof_mask.lmode = 0x1;
}
/* Two rate three color marker
* With PEIR/EIR set to zero, color will be either green or red
*/
aq->prof.meter_algo = 2;
aq->prof_mask.meter_algo = 0x3;
aq->prof.rc_action = NIX_RX_BAND_PROF_ACTIONRESULT_DROP;
aq->prof_mask.rc_action = 0x3;
aq->prof.yc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS;
aq->prof_mask.yc_action = 0x3;
aq->prof.gc_action = NIX_RX_BAND_PROF_ACTIONRESULT_PASS;
aq->prof_mask.gc_action = 0x3;
/* Setting exponent value as 24 and mantissa as 0 configures
* the bucket with zero values making bucket unused. Peak
* information rate and Excess information rate buckets are
* unused here.
*/
aq->prof.peir_exponent = 24;
aq->prof_mask.peir_exponent = 0x1F;
aq->prof.peir_mantissa = 0;
aq->prof_mask.peir_mantissa = 0xFF;
aq->prof.pebs_exponent = 24;
aq->prof_mask.pebs_exponent = 0x1F;
aq->prof.pebs_mantissa = 0;
aq->prof_mask.pebs_mantissa = 0xFF;
/* Fill AQ info */
aq->qidx = profile;
aq->ctype = NIX_AQ_CTYPE_BANDPROF;
aq->op = NIX_AQ_INSTOP_WRITE;
return otx2_sync_mbox_msg(&pfvf->mbox);
}
int cn10k_set_matchall_ipolicer_rate(struct otx2_nic *pfvf,
u32 burst, u64 rate)
{
struct otx2_hw *hw = &pfvf->hw;
int qidx, rc;
mutex_lock(&pfvf->mbox.lock);
rc = cn10k_set_ipolicer_rate(pfvf, hw->matchall_ipolicer, burst,
rate, false);
if (rc)
goto out;
for (qidx = 0; qidx < hw->rx_queues; qidx++) {
rc = cn10k_map_unmap_rq_policer(pfvf, qidx,
hw->matchall_ipolicer, true);
if (rc)
break;
}
out:
mutex_unlock(&pfvf->mbox.lock);
return rc;
}
......@@ -14,4 +14,15 @@ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
int cn10k_pf_lmtst_init(struct otx2_nic *pf);
int cn10k_vf_lmtst_init(struct otx2_nic *vf);
int cn10k_free_all_ipolicers(struct otx2_nic *pfvf);
int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf);
int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf);
int cn10k_set_matchall_ipolicer_rate(struct otx2_nic *pfvf,
u32 burst, u64 rate);
int cn10k_map_unmap_rq_policer(struct otx2_nic *pfvf, int rq_idx,
u16 policer, bool map);
int cn10k_alloc_leaf_profile(struct otx2_nic *pfvf, u16 *leaf);
int cn10k_set_ipolicer_rate(struct otx2_nic *pfvf, u16 profile,
u32 burst, u64 rate, bool pps);
int cn10k_free_leaf_profile(struct otx2_nic *pfvf, u16 leaf);
#endif /* CN10K_H */
......@@ -180,6 +180,7 @@ struct otx2_hw {
/* NIX */
u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC];
u16 matchall_ipolicer;
/* HW settings, coalescing etc */
u16 rx_chan_base;
......@@ -327,6 +328,7 @@ struct otx2_nic {
#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
#define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11)
#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
u64 flags;
struct otx2_qset qset;
......@@ -370,6 +372,7 @@ struct otx2_nic {
struct otx2_flow_config *flow_cfg;
struct otx2_tc_info tc_info;
unsigned long rq_bmap;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
......
......@@ -286,6 +286,12 @@ static int otx2_set_channels(struct net_device *dev,
if (!channel->rx_count || !channel->tx_count)
return -EINVAL;
if (bitmap_weight(&pfvf->rq_bmap, pfvf->hw.rx_queues) > 1) {
netdev_err(dev,
"Receive queues are in use by TC police action\n");
return -EINVAL;
}
if (if_up)
dev->netdev_ops->ndo_stop(dev);
......
......@@ -1461,6 +1461,9 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
otx2_free_cq_res(pf);
/* Free all ingress bandwidth profiles allocated */
cn10k_free_all_ipolicers(pf);
mutex_lock(&mbox->lock);
/* Reset NIX LF */
free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment