Commit 21f49681 authored by David S. Miller's avatar David S. Miller

Merge branch 'octeontx2-multicast-mirror-offload'

Suman Ghosh says:

====================
octeontx2: Multicast/mirror offload changes

This patchset includes changes to support TC multicast/mirror offload.

Patch #1: Adds changes to support new mailbox to offload multicast/mirror
offload.

Patch #2: Adds TC related changes which uses the newly added mailboxes to
offload multicast/mirror rules.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8470e436 df094d8f
...@@ -304,6 +304,13 @@ M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \ ...@@ -304,6 +304,13 @@ M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
nix_bandprof_get_hwinfo_rsp) \ nix_bandprof_get_hwinfo_rsp) \
M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \ M(NIX_READ_INLINE_IPSEC_CFG, 0x8023, nix_read_inline_ipsec_cfg, \
msg_req, nix_inline_ipsec_cfg) \ msg_req, nix_inline_ipsec_cfg) \
M(NIX_MCAST_GRP_CREATE, 0x802b, nix_mcast_grp_create, nix_mcast_grp_create_req, \
nix_mcast_grp_create_rsp) \
M(NIX_MCAST_GRP_DESTROY, 0x802c, nix_mcast_grp_destroy, nix_mcast_grp_destroy_req, \
msg_rsp) \
M(NIX_MCAST_GRP_UPDATE, 0x802d, nix_mcast_grp_update, \
nix_mcast_grp_update_req, \
nix_mcast_grp_update_rsp) \
/* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \ /* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \
M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \ M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
mcs_alloc_rsrc_rsp) \ mcs_alloc_rsrc_rsp) \
...@@ -830,6 +837,9 @@ enum nix_af_status { ...@@ -830,6 +837,9 @@ enum nix_af_status {
NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429, NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430, NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
NIX_AF_ERR_LINK_CREDITS = -431, NIX_AF_ERR_LINK_CREDITS = -431,
NIX_AF_ERR_INVALID_MCAST_GRP = -436,
NIX_AF_ERR_INVALID_MCAST_DEL_REQ = -437,
NIX_AF_ERR_NON_CONTIG_MCE_LIST = -438,
}; };
/* For NIX RX vtag action */ /* For NIX RX vtag action */
...@@ -1204,6 +1214,68 @@ struct nix_bp_cfg_rsp { ...@@ -1204,6 +1214,68 @@ struct nix_bp_cfg_rsp {
u8 chan_cnt; /* Number of channel for which bpids are assigned */ u8 chan_cnt; /* Number of channel for which bpids are assigned */
}; };
struct nix_mcast_grp_create_req {
struct mbox_msghdr hdr;
#define NIX_MCAST_INGRESS 0
#define NIX_MCAST_EGRESS 1
u8 dir;
u8 reserved[11];
/* Reserving few bytes for future requirement */
};
struct nix_mcast_grp_create_rsp {
struct mbox_msghdr hdr;
/* This mcast_grp_idx should be passed during MCAM
* write entry for multicast. AF will identify the
* corresponding multicast table index associated
* with the group id and program the same to MCAM entry.
* This group id is also needed during group delete
* and update request.
*/
u32 mcast_grp_idx;
};
struct nix_mcast_grp_destroy_req {
struct mbox_msghdr hdr;
/* Group id returned by nix_mcast_grp_create_rsp */
u32 mcast_grp_idx;
/* If AF is requesting for destroy, then set
* it to '1'. Otherwise keep it to '0'
*/
u8 is_af;
};
struct nix_mcast_grp_update_req {
struct mbox_msghdr hdr;
/* Group id returned by nix_mcast_grp_create_rsp */
u32 mcast_grp_idx;
/* Number of multicast/mirror entries requested */
u32 num_mce_entry;
#define NIX_MCE_ENTRY_MAX 64
#define NIX_RX_RQ 0
#define NIX_RX_RSS 1
/* Receive queue or RSS index within pf_func */
u32 rq_rss_index[NIX_MCE_ENTRY_MAX];
/* pcifunc is required for both ingress and egress multicast */
u16 pcifunc[NIX_MCE_ENTRY_MAX];
/* channel is required for egress multicast */
u16 channel[NIX_MCE_ENTRY_MAX];
#define NIX_MCAST_OP_ADD_ENTRY 0
#define NIX_MCAST_OP_DEL_ENTRY 1
/* Destination type. 0:Receive queue, 1:RSS*/
u8 dest_type[NIX_MCE_ENTRY_MAX];
u8 op;
/* If AF is requesting for update, then set
* it to '1'. Otherwise keep it to '0'
*/
u8 is_af;
};
struct nix_mcast_grp_update_rsp {
struct mbox_msghdr hdr;
u32 mce_start_index;
};
/* Global NIX inline IPSec configuration */ /* Global NIX inline IPSec configuration */
struct nix_inline_ipsec_cfg { struct nix_inline_ipsec_cfg {
struct mbox_msghdr hdr; struct mbox_msghdr hdr;
......
...@@ -156,7 +156,7 @@ int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc) ...@@ -156,7 +156,7 @@ int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc)
return start; return start;
} }
static void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start) void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start)
{ {
if (!rsrc->bmap) if (!rsrc->bmap)
return; return;
...@@ -2614,6 +2614,10 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) ...@@ -2614,6 +2614,10 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
* 2. Flush and reset SSO/SSOW * 2. Flush and reset SSO/SSOW
* 3. Cleanup pools (NPA) * 3. Cleanup pools (NPA)
*/ */
/* Free multicast/mirror node associated with the 'pcifunc' */
rvu_nix_mcast_flr_free_entries(rvu, pcifunc);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX1);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0);
......
...@@ -116,11 +116,12 @@ struct rvu_block { ...@@ -116,11 +116,12 @@ struct rvu_block {
}; };
struct nix_mcast { struct nix_mcast {
struct qmem *mce_ctx; struct qmem *mce_ctx;
struct qmem *mcast_buf; struct qmem *mcast_buf;
int replay_pkind; int replay_pkind;
int next_free_mce; struct rsrc_bmap mce_counter[2];
struct mutex mce_lock; /* Serialize MCE updates */ /* Counters for both ingress and egress mcast lists */
struct mutex mce_lock; /* Serialize MCE updates */
}; };
struct nix_mce_list { struct nix_mce_list {
...@@ -129,6 +130,23 @@ struct nix_mce_list { ...@@ -129,6 +130,23 @@ struct nix_mce_list {
int max; int max;
}; };
struct nix_mcast_grp_elem {
struct nix_mce_list mcast_mce_list;
u32 mcast_grp_idx;
u32 pcifunc;
int mcam_index;
int mce_start_index;
struct list_head list;
u8 dir;
};
struct nix_mcast_grp {
struct list_head mcast_grp_head;
int count;
int next_grp_index;
struct mutex mcast_grp_lock; /* Serialize MCE updates */
};
/* layer metadata to uniquely identify a packet header field */ /* layer metadata to uniquely identify a packet header field */
struct npc_layer_mdata { struct npc_layer_mdata {
u8 lid; u8 lid;
...@@ -339,6 +357,7 @@ struct nix_hw { ...@@ -339,6 +357,7 @@ struct nix_hw {
struct rvu *rvu; struct rvu *rvu;
struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */ struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */
struct nix_mcast mcast; struct nix_mcast mcast;
struct nix_mcast_grp mcast_grp;
struct nix_flowkey flowkey; struct nix_flowkey flowkey;
struct nix_mark_format mark_format; struct nix_mark_format mark_format;
struct nix_lso lso; struct nix_lso lso;
...@@ -741,6 +760,7 @@ void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id); ...@@ -741,6 +760,7 @@ void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id);
bool is_rsrc_free(struct rsrc_bmap *rsrc, int id); bool is_rsrc_free(struct rsrc_bmap *rsrc, int id);
int rvu_rsrc_free_count(struct rsrc_bmap *rsrc); int rvu_rsrc_free_count(struct rsrc_bmap *rsrc);
int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc); int rvu_alloc_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc);
void rvu_free_rsrc_contig(struct rsrc_bmap *rsrc, int nrsrc, int start);
bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc); bool rvu_rsrc_check_contig(struct rsrc_bmap *rsrc, int nrsrc);
u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr); u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blkaddr);
int rvu_get_pf(u16 pcifunc); int rvu_get_pf(u16 pcifunc);
...@@ -847,6 +867,11 @@ u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu); ...@@ -847,6 +867,11 @@ u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu);
u32 convert_bytes_to_dwrr_mtu(u32 bytes); u32 convert_bytes_to_dwrr_mtu(u32 bytes);
void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc,
struct nix_txsch *txsch, bool enable); struct nix_txsch *txsch, bool enable);
void rvu_nix_mcast_flr_free_entries(struct rvu *rvu, u16 pcifunc);
int rvu_nix_mcast_get_mce_index(struct rvu *rvu, u16 pcifunc,
u32 mcast_grp_idx);
int rvu_nix_mcast_update_mcam_entry(struct rvu *rvu, u16 pcifunc,
u32 mcast_grp_idx, u16 mcam_index);
/* NPC APIs */ /* NPC APIs */
void rvu_npc_freemem(struct rvu *rvu); void rvu_npc_freemem(struct rvu *rvu);
...@@ -895,6 +920,10 @@ void npc_mcam_enable_flows(struct rvu *rvu, u16 target); ...@@ -895,6 +920,10 @@ void npc_mcam_enable_flows(struct rvu *rvu, u16 target);
void npc_mcam_disable_flows(struct rvu *rvu, u16 target); void npc_mcam_disable_flows(struct rvu *rvu, u16 target);
void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index, bool enable); int blkaddr, int index, bool enable);
u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index);
void npc_set_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index, u64 cfg);
void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 src, struct mcam_entry *entry, int blkaddr, u16 src, struct mcam_entry *entry,
u8 *intf, u8 *ena); u8 *intf, u8 *ena);
......
...@@ -589,8 +589,8 @@ static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, ...@@ -589,8 +589,8 @@ static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg); NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg);
} }
static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index) int blkaddr, int index)
{ {
int bank = npc_get_bank(mcam, index); int bank = npc_get_bank(mcam, index);
...@@ -599,6 +599,16 @@ static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, ...@@ -599,6 +599,16 @@ static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); NPC_AF_MCAMEX_BANKX_ACTION(index, bank));
} }
void npc_set_mcam_action(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index, u64 cfg)
{
int bank = npc_get_bank(mcam, index);
index &= (mcam->banksize - 1);
return rvu_write64(rvu, blkaddr,
NPC_AF_MCAMEX_BANKX_ACTION(index, bank), cfg);
}
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr) int nixlf, u64 chan, u8 *mac_addr)
{ {
......
...@@ -1117,13 +1117,40 @@ static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc, ...@@ -1117,13 +1117,40 @@ static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc,
} }
} }
static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, static int npc_mcast_update_action_index(struct rvu *rvu, struct npc_install_flow_req *req,
struct mcam_entry *entry, u64 op, void *action)
struct npc_install_flow_req *req, {
u16 target, bool pf_set_vfs_mac) int mce_index;
/* If a PF/VF is installing a multicast rule then it is expected
* that the PF/VF should have created a group for the multicast/mirror
* list. Otherwise reject the configuration.
* During this scenario, req->index is set as multicast/mirror
* group index.
*/
if (req->hdr.pcifunc &&
(op == NIX_RX_ACTIONOP_MCAST || op == NIX_TX_ACTIONOP_MCAST)) {
mce_index = rvu_nix_mcast_get_mce_index(rvu, req->hdr.pcifunc, req->index);
if (mce_index < 0)
return mce_index;
if (op == NIX_RX_ACTIONOP_MCAST)
((struct nix_rx_action *)action)->index = mce_index;
else
((struct nix_tx_action *)action)->index = mce_index;
}
return 0;
}
static int npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct mcam_entry *entry,
struct npc_install_flow_req *req,
u16 target, bool pf_set_vfs_mac)
{ {
struct rvu_switch *rswitch = &rvu->rswitch; struct rvu_switch *rswitch = &rvu->rswitch;
struct nix_rx_action action; struct nix_rx_action action;
int ret;
if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac) if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac)
req->chan_mask = 0x0; /* Do not care channel */ req->chan_mask = 0x0; /* Do not care channel */
...@@ -1135,6 +1162,11 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, ...@@ -1135,6 +1162,11 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
action.pf_func = target; action.pf_func = target;
action.op = req->op; action.op = req->op;
action.index = req->index; action.index = req->index;
ret = npc_mcast_update_action_index(rvu, req, action.op, (void *)&action);
if (ret)
return ret;
action.match_id = req->match_id; action.match_id = req->match_id;
action.flow_key_alg = req->flow_key_alg; action.flow_key_alg = req->flow_key_alg;
...@@ -1166,14 +1198,17 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, ...@@ -1166,14 +1198,17 @@ static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) | FIELD_PREP(RX_VTAG1_TYPE_MASK, req->vtag1_type) |
FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) | FIELD_PREP(RX_VTAG1_LID_MASK, NPC_LID_LB) |
FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4); FIELD_PREP(RX_VTAG1_RELPTR_MASK, 4);
return 0;
} }
static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, static int npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct mcam_entry *entry, struct mcam_entry *entry,
struct npc_install_flow_req *req, u16 target) struct npc_install_flow_req *req, u16 target)
{ {
struct nix_tx_action action; struct nix_tx_action action;
u64 mask = ~0ULL; u64 mask = ~0ULL;
int ret;
/* If AF is installing then do not care about /* If AF is installing then do not care about
* PF_FUNC in Send Descriptor * PF_FUNC in Send Descriptor
...@@ -1187,6 +1222,11 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, ...@@ -1187,6 +1222,11 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
*(u64 *)&action = 0x00; *(u64 *)&action = 0x00;
action.op = req->op; action.op = req->op;
action.index = req->index; action.index = req->index;
ret = npc_mcast_update_action_index(rvu, req, action.op, (void *)&action);
if (ret)
return ret;
action.match_id = req->match_id; action.match_id = req->match_id;
entry->action = *(u64 *)&action; entry->action = *(u64 *)&action;
...@@ -1202,6 +1242,8 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, ...@@ -1202,6 +1242,8 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) | FIELD_PREP(TX_VTAG1_OP_MASK, req->vtag1_op) |
FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) | FIELD_PREP(TX_VTAG1_LID_MASK, NPC_LID_LA) |
FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24); FIELD_PREP(TX_VTAG1_RELPTR_MASK, 24);
return 0;
} }
static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
...@@ -1231,10 +1273,15 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, ...@@ -1231,10 +1273,15 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy, npc_update_flow(rvu, entry, features, &req->packet, &req->mask, &dummy,
req->intf, blkaddr); req->intf, blkaddr);
if (is_npc_intf_rx(req->intf)) if (is_npc_intf_rx(req->intf)) {
npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac); err = npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac);
else if (err)
npc_update_tx_entry(rvu, pfvf, entry, req, target); return err;
} else {
err = npc_update_tx_entry(rvu, pfvf, entry, req, target);
if (err)
return err;
}
/* Default unicast rules do not exist for TX */ /* Default unicast rules do not exist for TX */
if (is_npc_intf_tx(req->intf)) if (is_npc_intf_tx(req->intf))
...@@ -1351,6 +1398,10 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, ...@@ -1351,6 +1398,10 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target,
return rvu_nix_setup_ratelimit_aggr(rvu, req->hdr.pcifunc, return rvu_nix_setup_ratelimit_aggr(rvu, req->hdr.pcifunc,
req->index, req->match_id); req->index, req->match_id);
if (owner && req->op == NIX_RX_ACTIONOP_MCAST)
return rvu_nix_mcast_update_mcam_entry(rvu, req->hdr.pcifunc,
req->index, entry_index);
return 0; return 0;
} }
......
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
#define OTX2_UNSUPP_LSE_DEPTH GENMASK(6, 4) #define OTX2_UNSUPP_LSE_DEPTH GENMASK(6, 4)
#define MCAST_INVALID_GRP (-1U)
struct otx2_tc_flow_stats { struct otx2_tc_flow_stats {
u64 bytes; u64 bytes;
u64 pkts; u64 pkts;
...@@ -47,6 +49,7 @@ struct otx2_tc_flow { ...@@ -47,6 +49,7 @@ struct otx2_tc_flow {
bool is_act_police; bool is_act_police;
u32 prio; u32 prio;
struct npc_install_flow_req req; struct npc_install_flow_req req;
u32 mcast_grp_idx;
u64 rate; u64 rate;
u32 burst; u32 burst;
bool is_pps; bool is_pps;
...@@ -355,22 +358,96 @@ static int otx2_tc_act_set_police(struct otx2_nic *nic, ...@@ -355,22 +358,96 @@ static int otx2_tc_act_set_police(struct otx2_nic *nic,
return rc; return rc;
} }
static int otx2_tc_update_mcast(struct otx2_nic *nic,
struct npc_install_flow_req *req,
struct netlink_ext_ack *extack,
struct otx2_tc_flow *node,
struct nix_mcast_grp_update_req *ureq,
u8 num_intf)
{
struct nix_mcast_grp_update_req *grp_update_req;
struct nix_mcast_grp_create_req *creq;
struct nix_mcast_grp_create_rsp *crsp;
u32 grp_index;
int rc;
mutex_lock(&nic->mbox.lock);
creq = otx2_mbox_alloc_msg_nix_mcast_grp_create(&nic->mbox);
if (!creq) {
rc = -ENOMEM;
goto error;
}
creq->dir = NIX_MCAST_INGRESS;
/* Send message to AF */
rc = otx2_sync_mbox_msg(&nic->mbox);
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to create multicast group");
goto error;
}
crsp = (struct nix_mcast_grp_create_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
0,
&creq->hdr);
if (IS_ERR(crsp)) {
rc = PTR_ERR(crsp);
goto error;
}
grp_index = crsp->mcast_grp_idx;
grp_update_req = otx2_mbox_alloc_msg_nix_mcast_grp_update(&nic->mbox);
if (!grp_update_req) {
NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group");
rc = -ENOMEM;
goto error;
}
ureq->op = NIX_MCAST_OP_ADD_ENTRY;
ureq->mcast_grp_idx = grp_index;
ureq->num_mce_entry = num_intf;
ureq->pcifunc[0] = nic->pcifunc;
ureq->channel[0] = nic->hw.tx_chan_base;
ureq->dest_type[0] = NIX_RX_RSS;
ureq->rq_rss_index[0] = 0;
memcpy(&ureq->hdr, &grp_update_req->hdr, sizeof(struct mbox_msghdr));
memcpy(grp_update_req, ureq, sizeof(struct nix_mcast_grp_update_req));
/* Send message to AF */
rc = otx2_sync_mbox_msg(&nic->mbox);
if (rc) {
NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group");
goto error;
}
mutex_unlock(&nic->mbox.lock);
req->op = NIX_RX_ACTIONOP_MCAST;
req->index = grp_index;
node->mcast_grp_idx = grp_index;
return 0;
error:
mutex_unlock(&nic->mbox.lock);
return rc;
}
static int otx2_tc_parse_actions(struct otx2_nic *nic, static int otx2_tc_parse_actions(struct otx2_nic *nic,
struct flow_action *flow_action, struct flow_action *flow_action,
struct npc_install_flow_req *req, struct npc_install_flow_req *req,
struct flow_cls_offload *f, struct flow_cls_offload *f,
struct otx2_tc_flow *node) struct otx2_tc_flow *node)
{ {
struct nix_mcast_grp_update_req dummy_grp_update_req = { 0 };
struct netlink_ext_ack *extack = f->common.extack; struct netlink_ext_ack *extack = f->common.extack;
bool pps = false, mcast = false;
struct flow_action_entry *act; struct flow_action_entry *act;
struct net_device *target; struct net_device *target;
struct otx2_nic *priv; struct otx2_nic *priv;
u32 burst, mark = 0; u32 burst, mark = 0;
u8 nr_police = 0; u8 nr_police = 0;
bool pps = false; u8 num_intf = 1;
int err, i;
u64 rate; u64 rate;
int err;
int i;
if (!flow_action_has_entries(flow_action)) { if (!flow_action_has_entries(flow_action)) {
NL_SET_ERR_MSG_MOD(extack, "no tc actions specified"); NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
...@@ -442,11 +519,30 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, ...@@ -442,11 +519,30 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
req->index = act->rx_queue; req->index = act->rx_queue;
break; break;
case FLOW_ACTION_MIRRED_INGRESS:
target = act->dev;
priv = netdev_priv(target);
dummy_grp_update_req.pcifunc[num_intf] = priv->pcifunc;
dummy_grp_update_req.channel[num_intf] = priv->hw.tx_chan_base;
dummy_grp_update_req.dest_type[num_intf] = NIX_RX_RSS;
dummy_grp_update_req.rq_rss_index[num_intf] = 0;
mcast = true;
num_intf++;
break;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
} }
if (mcast) {
err = otx2_tc_update_mcast(nic, req, extack, node,
&dummy_grp_update_req,
num_intf);
if (err)
return err;
}
if (nr_police > 1) { if (nr_police > 1) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"rate limit police offload requires a single action"); "rate limit police offload requires a single action");
...@@ -1066,6 +1162,7 @@ static int otx2_tc_del_flow(struct otx2_nic *nic, ...@@ -1066,6 +1162,7 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
struct flow_cls_offload *tc_flow_cmd) struct flow_cls_offload *tc_flow_cmd)
{ {
struct otx2_flow_config *flow_cfg = nic->flow_cfg; struct otx2_flow_config *flow_cfg = nic->flow_cfg;
struct nix_mcast_grp_destroy_req *grp_destroy_req;
struct otx2_tc_flow *flow_node; struct otx2_tc_flow *flow_node;
int err; int err;
...@@ -1099,6 +1196,15 @@ static int otx2_tc_del_flow(struct otx2_nic *nic, ...@@ -1099,6 +1196,15 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
mutex_unlock(&nic->mbox.lock); mutex_unlock(&nic->mbox.lock);
} }
/* Remove the multicast/mirror related nodes */
if (flow_node->mcast_grp_idx != MCAST_INVALID_GRP) {
mutex_lock(&nic->mbox.lock);
grp_destroy_req = otx2_mbox_alloc_msg_nix_mcast_grp_destroy(&nic->mbox);
grp_destroy_req->mcast_grp_idx = flow_node->mcast_grp_idx;
otx2_sync_mbox_msg(&nic->mbox);
mutex_unlock(&nic->mbox.lock);
}
free_mcam_flow: free_mcam_flow:
otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL); otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
...@@ -1138,6 +1244,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, ...@@ -1138,6 +1244,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic,
spin_lock_init(&new_node->lock); spin_lock_init(&new_node->lock);
new_node->cookie = tc_flow_cmd->cookie; new_node->cookie = tc_flow_cmd->cookie;
new_node->prio = tc_flow_cmd->common.prio; new_node->prio = tc_flow_cmd->common.prio;
new_node->mcast_grp_idx = MCAST_INVALID_GRP;
memset(&dummy, 0, sizeof(struct npc_install_flow_req)); memset(&dummy, 0, sizeof(struct npc_install_flow_req));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment