Commit fe1939bb authored by Radha Mohan Chintakuntla's avatar Radha Mohan Chintakuntla Committed by David S. Miller

octeontx2-af: Add SDP interface support

Added support for packet IO via SDK links which is used when
Octeon is connected as a end-point. Traffic host to end-point
and vice versa flow through SDP links. This patch also support
dual SDP blocks supported in 98xx silicon.
Signed-off-by: default avatarRadha Mohan Chintakuntla <radhac@marvell.com>
Signed-off-by: default avatarNalla Pradeep <pnalla@marvell.com>
Signed-off-by: default avatarSubrahmanyam Nilla <snilla@marvell.com>
Signed-off-by: default avatarSunil Goutham <sgoutham@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent aefaa8c7
......@@ -10,4 +10,5 @@ obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o
rvu_mbox-y := mbox.o rvu_trace.o
rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o
rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \
rvu_sdp.o
......@@ -185,13 +185,16 @@ enum nix_scheduler {
#define NIX_INTF_TYPE_CGX 0
#define NIX_INTF_TYPE_LBK 1
#define NIX_INTF_TYPE_SDP 2
#define MAX_LMAC_PKIND 12
#define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b))
#define NIX_LINK_LBK(a) (12 + (a))
#define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c))
#define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b))
#define NIX_CHAN_SDP_CH_START (0x700ull)
#define NIX_CHAN_SDP_CH_START (0x700ull)
#define NIX_CHAN_SDP_CHX(a) (NIX_CHAN_SDP_CH_START + (a))
#define NIX_CHAN_SDP_NUM_CHANS 256
/* The mask is to extract lower 10-bits of channel number
* which CPT will pass to X2P.
......
......@@ -191,6 +191,9 @@ M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \
M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \
M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \
msg_rsp) \
/* SDP mbox IDs (range 0x1000 - 0x11FF) */ \
M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \
M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \
/* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \
M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\
npc_mcam_alloc_entry_rsp) \
......@@ -1446,6 +1449,27 @@ struct cpt_rxc_time_cfg_req {
u16 active_limit;
};
struct sdp_node_info {
/* Node to which this PF belons to */
u8 node_id;
u8 max_vfs;
u8 num_pf_rings;
u8 pf_srn;
#define SDP_MAX_VFS 128
u8 vf_rings[SDP_MAX_VFS];
};
struct sdp_chan_info_msg {
struct mbox_msghdr hdr;
struct sdp_node_info info;
};
struct sdp_get_chan_info_msg {
struct mbox_msghdr hdr;
u16 chan_base;
u16 num_chan;
};
/* CGX mailbox error codes
* Range 1101 - 1200.
*/
......
......@@ -1118,6 +1118,12 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
goto nix_err;
}
err = rvu_sdp_init(rvu);
if (err) {
dev_err(rvu->dev, "%s: Failed to initialize sdp\n", __func__);
goto nix_err;
}
rvu_program_channels(rvu);
return 0;
......@@ -1370,9 +1376,10 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
int blkaddr = BLKADDR_NIX0, vf;
struct rvu_pfvf *pf;
pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
/* All CGX mapped PFs are set with assigned NIX block during init */
if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) {
pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK);
blkaddr = pf->nix_blkaddr;
} else if (is_afvf(pcifunc)) {
vf = pcifunc - 1;
......@@ -1385,6 +1392,10 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc)
blkaddr = BLKADDR_NIX0;
}
/* if SDP1 then the blkaddr is NIX1 */
if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1)
blkaddr = BLKADDR_NIX1;
switch (blkaddr) {
case BLKADDR_NIX1:
pfvf->nix_blkaddr = BLKADDR_NIX1;
......
......@@ -246,6 +246,7 @@ struct rvu_pfvf {
u8 lbkid; /* NIX0/1 lbk link ID */
u64 lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/
unsigned long flags;
struct sdp_node_info *sdp_info;
};
enum rvu_pfvf_flags {
......@@ -597,6 +598,16 @@ static inline u16 rvu_nix_chan_lbk(struct rvu *rvu, u8 lbkid,
return rvu->hw->lbk_chan_base + lbkid * lbk_chans + chan;
}
static inline u16 rvu_nix_chan_sdp(struct rvu *rvu, u8 chan)
{
struct rvu_hwinfo *hw = rvu->hw;
if (!hw->cap.programmable_chans)
return NIX_CHAN_SDP_CHX(chan);
return hw->sdp_chan_base + chan;
}
static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan)
{
return rvu->hw->cpt_chan_base + chan;
......@@ -659,10 +670,17 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue,
int qsize, int inst_size, int res_size);
void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq);
/* SDP APIs */
int rvu_sdp_init(struct rvu *rvu);
bool is_sdp_pfvf(u16 pcifunc);
bool is_sdp_pf(u16 pcifunc);
bool is_sdp_vf(u16 pcifunc);
/* CGX APIs */
static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf)
{
return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs);
return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs) &&
!is_sdp_pf(pf << RVU_PFVF_PF_SHIFT);
}
static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id)
......
......@@ -293,15 +293,19 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr,
static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
struct nix_lf_alloc_rsp *rsp, bool loop)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc);
u16 req_chan_base, req_chan_end, req_chan_cnt;
struct rvu_hwinfo *hw = rvu->hw;
struct sdp_node_info *sdp_info;
int pkind, pf, vf, lbkid, vfid;
struct mac_ops *mac_ops;
int pkind, pf, vf, lbkid;
u8 cgx_id, lmac_id;
bool from_vf;
int err;
pf = rvu_get_pf(pcifunc);
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
type != NIX_INTF_TYPE_SDP)
return 0;
switch (type) {
......@@ -325,6 +329,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
rvu_npc_set_pkind(rvu, pkind, pfvf);
mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
/* By default we enable pause frames */
if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0)
mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id,
......@@ -376,6 +381,45 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf,
rsp->tx_link = hw->cgx_links + lbkid;
pfvf->lbkid = lbkid;
rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf);
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base,
pfvf->rx_chan_cnt);
break;
case NIX_INTF_TYPE_SDP:
from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
sdp_info = parent_pf->sdp_info;
if (!sdp_info) {
dev_err(rvu->dev, "Invalid sdp_info pointer\n");
return -EINVAL;
}
if (from_vf) {
req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn +
sdp_info->num_pf_rings;
vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1;
for (vfid = 0; vfid < vf; vfid++)
req_chan_base += sdp_info->vf_rings[vfid];
req_chan_cnt = sdp_info->vf_rings[vf];
req_chan_end = req_chan_base + req_chan_cnt - 1;
if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) ||
req_chan_end > rvu_nix_chan_sdp(rvu, 255)) {
dev_err(rvu->dev,
"PF_Func 0x%x: Invalid channel base and count\n",
pcifunc);
return -EINVAL;
}
} else {
req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn;
req_chan_cnt = sdp_info->num_pf_rings;
}
pfvf->rx_chan_base = req_chan_base;
pfvf->rx_chan_cnt = req_chan_cnt;
pfvf->tx_chan_base = pfvf->rx_chan_base;
pfvf->tx_chan_cnt = pfvf->rx_chan_cnt;
rsp->tx_link = hw->cgx_links + hw->lbk_links;
rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf,
pfvf->rx_chan_base,
pfvf->rx_chan_cnt);
......@@ -459,9 +503,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
int type, int chan_id)
{
int bpid, blkaddr, lmac_chan_cnt;
int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt;
u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt;
struct rvu_hwinfo *hw = rvu->hw;
u16 cgx_bpid_cnt, lbk_bpid_cnt;
struct rvu_pfvf *pfvf;
u8 cgx_id, lmac_id;
u64 cfg;
......@@ -470,8 +514,12 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
lmac_chan_cnt = cfg & 0xFF;
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
sdp_chan_cnt = cfg & 0xFFF;
cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
......@@ -509,6 +557,17 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt))
return -EINVAL;
break;
case NIX_INTF_TYPE_SDP:
if ((req->chan_base + req->chan_cnt) > 255)
return -EINVAL;
bpid = sdp_bpid_cnt + req->chan_base;
if (req->bpid_per_chan)
bpid += chan_id;
if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt))
return -EINVAL;
break;
default:
return -EINVAL;
}
......@@ -528,9 +587,12 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
pf = rvu_get_pf(pcifunc);
type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (is_sdp_pfvf(pcifunc))
type = NIX_INTF_TYPE_SDP;
/* Enable backpressure only for CGX mapped PFs and LBK interface */
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK)
/* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */
if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK &&
type != NIX_INTF_TYPE_SDP)
return 0;
pfvf = rvu_get_pfvf(rvu, pcifunc);
......@@ -547,8 +609,9 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu,
}
cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan));
cfg &= ~GENMASK_ULL(8, 0);
rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan),
cfg | (bpid & 0xFF) | BIT_ULL(16));
cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16));
chan_id++;
bpid = rvu_nix_get_bpid(rvu, req, type, chan_id);
}
......@@ -1329,6 +1392,9 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg);
intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX;
if (is_sdp_pfvf(pcifunc))
intf = NIX_INTF_TYPE_SDP;
err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp,
!!(req->flags & NIX_LF_LBK_BLK_SEL));
if (err)
......@@ -2772,14 +2838,19 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
struct npc_mcam *mcam = &rvu->hw->mcam;
struct rvu_hwinfo *hw = rvu->hw;
struct nix_mce_list *mce_list;
int pf;
/* skip multicast pkt replication for AF's VFs */
if (is_afvf(pcifunc))
/* skip multicast pkt replication for AF's VFs & SDP links */
if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc))
return 0;
if (!hw->cap.nix_rx_multicast)
return 0;
pf = rvu_get_pf(pcifunc);
if (!is_pf_cgxmapped(rvu, pf))
return 0;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
if (blkaddr < 0)
return -EINVAL;
......
......@@ -634,8 +634,8 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
struct nix_rx_action action;
int blkaddr, index;
/* AF's VFs work in promiscuous mode */
if (is_afvf(pcifunc))
/* AF's and SDP VFs work in promiscuous mode */
if (is_afvf(pcifunc) || is_sdp_vf(pcifunc))
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
......@@ -863,7 +863,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
u16 vf_func;
/* Only CGX PF/VF can add allmulticast entry */
if (is_afvf(pcifunc))
if (is_afvf(pcifunc) && is_sdp_vf(pcifunc))
return;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
......
// SPDX-License-Identifier: GPL-2.0
/* Marvell OcteonTx2 RVU Admin Function driver
*
* Copyright (C) 2021 Marvell.
*
*/
#include <linux/pci.h>
#include "rvu.h"
/* SDP PF device id */
#define PCI_DEVID_OTX2_SDP_PF 0xA0F6
/* Maximum SDP blocks in a chip */
#define MAX_SDP 2
/* SDP PF number */
static int sdp_pf_num[MAX_SDP] = {-1, -1};
bool is_sdp_pfvf(u16 pcifunc)
{
u16 pf = rvu_get_pf(pcifunc);
u32 found = 0, i = 0;
while (i < MAX_SDP) {
if (pf == sdp_pf_num[i])
found = 1;
i++;
}
if (!found)
return false;
return true;
}
bool is_sdp_pf(u16 pcifunc)
{
return (is_sdp_pfvf(pcifunc) &&
!(pcifunc & RVU_PFVF_FUNC_MASK));
}
bool is_sdp_vf(u16 pcifunc)
{
return (is_sdp_pfvf(pcifunc) &&
!!(pcifunc & RVU_PFVF_FUNC_MASK));
}
int rvu_sdp_init(struct rvu *rvu)
{
struct pci_dev *pdev = NULL;
struct rvu_pfvf *pfvf;
u32 i = 0;
while ((i < MAX_SDP) && (pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
PCI_DEVID_OTX2_SDP_PF,
pdev)) != NULL) {
/* The RVU PF number is one less than bus number */
sdp_pf_num[i] = pdev->bus->number - 1;
pfvf = &rvu->pf[sdp_pf_num[i]];
pfvf->sdp_info = devm_kzalloc(rvu->dev,
sizeof(struct sdp_node_info),
GFP_KERNEL);
if (!pfvf->sdp_info)
return -ENOMEM;
dev_info(rvu->dev, "SDP PF number:%d\n", sdp_pf_num[i]);
put_device(&pdev->dev);
i++;
}
return 0;
}
int
rvu_mbox_handler_set_sdp_chan_info(struct rvu *rvu,
struct sdp_chan_info_msg *req,
struct msg_rsp *rsp)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
memcpy(pfvf->sdp_info, &req->info, sizeof(struct sdp_node_info));
dev_info(rvu->dev, "AF: SDP%d max_vfs %d num_pf_rings %d pf_srn %d\n",
req->info.node_id, req->info.max_vfs, req->info.num_pf_rings,
req->info.pf_srn);
return 0;
}
int
rvu_mbox_handler_get_sdp_chan_info(struct rvu *rvu, struct msg_req *req,
struct sdp_get_chan_info_msg *rsp)
{
struct rvu_hwinfo *hw = rvu->hw;
int blkaddr;
if (!hw->cap.programmable_chans) {
rsp->chan_base = NIX_CHAN_SDP_CH_START;
rsp->num_chan = NIX_CHAN_SDP_NUM_CHANS;
} else {
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
rsp->chan_base = hw->sdp_chan_base;
rsp->num_chan = rvu_read64(rvu, blkaddr, NIX_AF_CONST1) & 0xFFFUL;
}
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment