Commit ca7f49ff authored by Geetha sowjanya's avatar Geetha sowjanya Committed by David S. Miller

octeontx2-af: cn10k: Introduce driver for macsec block.

CN10K-B and CNF10K-B has macsec block(MCS) to encrypt and
decrypt packets at MAC level. This block is a global resource
with hardware resources like SecYs, SCs and SAs and is in
between NIX block and RPM LMAC. CN10K-B silicon has only one MCS
block which receives packets from all LMACS whereas CNF10K-B has
seven MCS blocks for seven LMACs. Both MCS blocks are
similar in operation except for few register offsets and some
configurations require writing to different registers. Those
differences between IPs are handled using separate ops.
This patch adds basic driver and does the initial hardware
calibration and parser configuration.
Signed-off-by: default avatarGeetha sowjanya <gakula@marvell.com>
Signed-off-by: default avatarVamsi Attunuru <vattunuru@marvell.com>
Signed-off-by: default avatarSunil Goutham <sgoutham@marvell.com>
Signed-off-by: default avatarSubbaraya Sundeep <sbhatta@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 99507e76
...@@ -11,4 +11,4 @@ rvu_mbox-y := mbox.o rvu_trace.o ...@@ -11,4 +11,4 @@ rvu_mbox-y := mbox.o rvu_trace.o
rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \ rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \
rvu_sdp.o rvu_npc_hash.o rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o
// SPDX-License-Identifier: GPL-2.0
/* Marvell MCS driver
*
* Copyright (C) 2022 Marvell.
*/
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "mcs.h"
#include "mcs_reg.h"
#define DRV_NAME "Marvell MCS Driver"
#define PCI_CFG_REG_BAR_NUM 0
static const struct pci_device_id mcs_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_MCS) },
{ 0, } /* end of table */
};
static LIST_HEAD(mcs_list);
static void *alloc_mem(struct mcs *mcs, int n)
{
return devm_kcalloc(mcs->dev, n, sizeof(u16), GFP_KERNEL);
}
static int mcs_alloc_struct_mem(struct mcs *mcs, struct mcs_rsrc_map *res)
{
struct hwinfo *hw = mcs->hw;
int err;
res->flowid2pf_map = alloc_mem(mcs, hw->tcam_entries);
if (!res->flowid2pf_map)
return -ENOMEM;
res->secy2pf_map = alloc_mem(mcs, hw->secy_entries);
if (!res->secy2pf_map)
return -ENOMEM;
res->sc2pf_map = alloc_mem(mcs, hw->sc_entries);
if (!res->sc2pf_map)
return -ENOMEM;
res->sa2pf_map = alloc_mem(mcs, hw->sa_entries);
if (!res->sa2pf_map)
return -ENOMEM;
res->flowid2secy_map = alloc_mem(mcs, hw->tcam_entries);
if (!res->flowid2secy_map)
return -ENOMEM;
res->flow_ids.max = hw->tcam_entries - MCS_RSRC_RSVD_CNT;
err = rvu_alloc_bitmap(&res->flow_ids);
if (err)
return err;
res->secy.max = hw->secy_entries - MCS_RSRC_RSVD_CNT;
err = rvu_alloc_bitmap(&res->secy);
if (err)
return err;
res->sc.max = hw->sc_entries;
err = rvu_alloc_bitmap(&res->sc);
if (err)
return err;
res->sa.max = hw->sa_entries;
err = rvu_alloc_bitmap(&res->sa);
if (err)
return err;
return 0;
}
int mcs_get_blkcnt(void)
{
struct mcs *mcs;
int idmax = -ENODEV;
/* Check MCS block is present in hardware */
if (!pci_dev_present(mcs_id_table))
return 0;
list_for_each_entry(mcs, &mcs_list, mcs_list)
if (mcs->mcs_id > idmax)
idmax = mcs->mcs_id;
if (idmax < 0)
return 0;
return idmax + 1;
}
struct mcs *mcs_get_pdata(int mcs_id)
{
struct mcs *mcs_dev;
list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
if (mcs_dev->mcs_id == mcs_id)
return mcs_dev;
}
return NULL;
}
/* Set lmac to bypass/operational mode */
void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode)
{
u64 reg;
reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(lmac_id * 2);
mcs_reg_write(mcs, reg, (u64)mode);
}
void cn10kb_mcs_parser_cfg(struct mcs *mcs)
{
u64 reg, val;
/* VLAN CTag */
val = BIT_ULL(0) | (0x8100ull & 0xFFFF) << 1 | BIT_ULL(17);
/* RX */
reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(0);
mcs_reg_write(mcs, reg, val);
/* TX */
reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(0);
mcs_reg_write(mcs, reg, val);
/* VLAN STag */
val = BIT_ULL(0) | (0x88a8ull & 0xFFFF) << 1 | BIT_ULL(18);
/* RX */
reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(1);
mcs_reg_write(mcs, reg, val);
/* TX */
reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(1);
mcs_reg_write(mcs, reg, val);
}
static void mcs_lmac_init(struct mcs *mcs, int lmac_id)
{
u64 reg;
/* Port mode 25GB */
reg = MCSX_PAB_RX_SLAVE_PORT_CFGX(lmac_id);
mcs_reg_write(mcs, reg, 0);
if (mcs->hw->mcs_blks > 1) {
reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(lmac_id);
mcs_reg_write(mcs, reg, 0xe000e);
return;
}
reg = MCSX_PAB_TX_SLAVE_PORT_CFGX(lmac_id);
mcs_reg_write(mcs, reg, 0);
}
int mcs_set_lmac_channels(int mcs_id, u16 base)
{
struct mcs *mcs;
int lmac;
u64 cfg;
mcs = mcs_get_pdata(mcs_id);
if (!mcs)
return -ENODEV;
for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
cfg = mcs_reg_read(mcs, MCSX_LINK_LMACX_CFG(lmac));
cfg &= ~(MCSX_LINK_LMAC_BASE_MASK | MCSX_LINK_LMAC_RANGE_MASK);
cfg |= FIELD_PREP(MCSX_LINK_LMAC_RANGE_MASK, ilog2(16));
cfg |= FIELD_PREP(MCSX_LINK_LMAC_BASE_MASK, base);
mcs_reg_write(mcs, MCSX_LINK_LMACX_CFG(lmac), cfg);
base += 16;
}
return 0;
}
static int mcs_x2p_calibration(struct mcs *mcs)
{
unsigned long timeout = jiffies + usecs_to_jiffies(20000);
int i, err = 0;
u64 val;
/* set X2P calibration */
val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
val |= BIT_ULL(5);
mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
/* Wait for calibration to complete */
while (!(mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS) & BIT_ULL(0))) {
if (time_before(jiffies, timeout)) {
usleep_range(80, 100);
continue;
} else {
err = -EBUSY;
dev_err(mcs->dev, "MCS X2P calibration failed..ignoring\n");
return err;
}
}
val = mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS);
for (i = 0; i < mcs->hw->mcs_x2p_intf; i++) {
if (val & BIT_ULL(1 + i))
continue;
err = -EBUSY;
dev_err(mcs->dev, "MCS:%d didn't respond to X2P calibration\n", i);
}
/* Clear X2P calibrate */
mcs_reg_write(mcs, MCSX_MIL_GLOBAL, mcs_reg_read(mcs, MCSX_MIL_GLOBAL) & ~BIT_ULL(5));
return err;
}
static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
{
u64 val;
/* Set MCS to external bypass */
val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
if (bypass)
val |= BIT_ULL(6);
else
val &= ~BIT_ULL(6);
mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
}
static void mcs_global_cfg(struct mcs *mcs)
{
/* Disable external bypass */
mcs_set_external_bypass(mcs, false);
/* Set MCS to perform standard IEEE802.1AE macsec processing */
if (mcs->hw->mcs_blks == 1) {
mcs_reg_write(mcs, MCSX_IP_MODE, BIT_ULL(3));
return;
}
mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_ENTRY, 0xe4);
mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_LEN, 4);
}
void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs)
{
struct hwinfo *hw = mcs->hw;
hw->tcam_entries = 128; /* TCAM entries */
hw->secy_entries = 128; /* SecY entries */
hw->sc_entries = 128; /* SC CAM entries */
hw->sa_entries = 256; /* SA entries */
hw->lmac_cnt = 20; /* lmacs/ports per mcs block */
hw->mcs_x2p_intf = 5; /* x2p clabration intf */
hw->mcs_blks = 1; /* MCS blocks */
}
static struct mcs_ops cn10kb_mcs_ops = {
.mcs_set_hw_capabilities = cn10kb_mcs_set_hw_capabilities,
.mcs_parser_cfg = cn10kb_mcs_parser_cfg,
};
static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
int lmac, err = 0;
struct mcs *mcs;
mcs = devm_kzalloc(dev, sizeof(*mcs), GFP_KERNEL);
if (!mcs)
return -ENOMEM;
mcs->hw = devm_kzalloc(dev, sizeof(struct hwinfo), GFP_KERNEL);
if (!mcs->hw)
return -ENOMEM;
err = pci_enable_device(pdev);
if (err) {
dev_err(dev, "Failed to enable PCI device\n");
pci_set_drvdata(pdev, NULL);
return err;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(dev, "PCI request regions failed 0x%x\n", err);
goto exit;
}
mcs->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
if (!mcs->reg_base) {
dev_err(dev, "mcs: Cannot map CSR memory space, aborting\n");
err = -ENOMEM;
goto exit;
}
pci_set_drvdata(pdev, mcs);
mcs->pdev = pdev;
mcs->dev = &pdev->dev;
if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B)
mcs->mcs_ops = &cn10kb_mcs_ops;
else
mcs->mcs_ops = cnf10kb_get_mac_ops();
/* Set hardware capabilities */
mcs->mcs_ops->mcs_set_hw_capabilities(mcs);
mcs_global_cfg(mcs);
/* Perform X2P clibration */
err = mcs_x2p_calibration(mcs);
if (err)
goto err_x2p;
mcs->mcs_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
& MCS_ID_MASK;
/* Set mcs tx side resources */
err = mcs_alloc_struct_mem(mcs, &mcs->tx);
if (err)
goto err_x2p;
/* Set mcs rx side resources */
err = mcs_alloc_struct_mem(mcs, &mcs->rx);
if (err)
goto err_x2p;
/* per port config */
for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
mcs_lmac_init(mcs, lmac);
/* Parser configuration */
mcs->mcs_ops->mcs_parser_cfg(mcs);
list_add(&mcs->mcs_list, &mcs_list);
return 0;
err_x2p:
/* Enable external bypass */
mcs_set_external_bypass(mcs, true);
exit:
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
return err;
}
static void mcs_remove(struct pci_dev *pdev)
{
struct mcs *mcs = pci_get_drvdata(pdev);
/* Set MCS to external bypass */
mcs_set_external_bypass(mcs, true);
pci_free_irq_vectors(pdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
struct pci_driver mcs_driver = {
.name = DRV_NAME,
.id_table = mcs_id_table,
.probe = mcs_probe,
.remove = mcs_remove,
};
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell CN10K MCS driver
*
* Copyright (C) 2022 Marvell.
*/
#ifndef MCS_H
#define MCS_H
#include <linux/bits.h>
#include "rvu.h"
#define PCI_DEVID_CN10K_MCS 0xA096
#define MCSX_LINK_LMAC_RANGE_MASK GENMASK_ULL(19, 16)
#define MCSX_LINK_LMAC_BASE_MASK GENMASK_ULL(11, 0)
#define MCS_ID_MASK 0x7
/* Reserved resources for default bypass entry */
#define MCS_RSRC_RSVD_CNT 1
struct mcs_rsrc_map {
u16 *flowid2pf_map;
u16 *secy2pf_map;
u16 *sc2pf_map;
u16 *sa2pf_map;
u16 *flowid2secy_map; /* bitmap flowid mapped to secy*/
struct rsrc_bmap flow_ids;
struct rsrc_bmap secy;
struct rsrc_bmap sc;
struct rsrc_bmap sa;
};
struct hwinfo {
u8 tcam_entries;
u8 secy_entries;
u8 sc_entries;
u16 sa_entries;
u8 mcs_x2p_intf;
u8 lmac_cnt;
u8 mcs_blks;
unsigned long lmac_bmap; /* bitmap of enabled mcs lmac */
};
struct mcs {
void __iomem *reg_base;
struct pci_dev *pdev;
struct device *dev;
struct hwinfo *hw;
struct mcs_rsrc_map tx;
struct mcs_rsrc_map rx;
u8 mcs_id;
struct mcs_ops *mcs_ops;
struct list_head mcs_list;
};
struct mcs_ops {
void (*mcs_set_hw_capabilities)(struct mcs *mcs);
void (*mcs_parser_cfg)(struct mcs *mcs);
};
extern struct pci_driver mcs_driver;
static inline void mcs_reg_write(struct mcs *mcs, u64 offset, u64 val)
{
writeq(val, mcs->reg_base + offset);
}
static inline u64 mcs_reg_read(struct mcs *mcs, u64 offset)
{
return readq(mcs->reg_base + offset);
}
/* MCS APIs */
struct mcs *mcs_get_pdata(int mcs_id);
int mcs_get_blkcnt(void);
int mcs_set_lmac_channels(int mcs_id, u16 base);
int mcs_install_flowid_bypass_entry(struct mcs *mcs);
void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode);
/* CN10K-B APIs */
void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs);
void cn10kb_mcs_parser_cfg(struct mcs *mcs);
/* CNF10K-B APIs */
struct mcs_ops *cnf10kb_get_mac_ops(void);
void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs);
void cnf10kb_mcs_parser_cfg(struct mcs *mcs);
#endif /* MCS_H */
// SPDX-License-Identifier: GPL-2.0
/* Marvell MCS driver
*
* Copyright (C) 2022 Marvell.
*/
#include "mcs.h"
#include "mcs_reg.h"
static struct mcs_ops cnf10kb_mcs_ops = {
.mcs_set_hw_capabilities = cnf10kb_mcs_set_hw_capabilities,
.mcs_parser_cfg = cnf10kb_mcs_parser_cfg,
};
struct mcs_ops *cnf10kb_get_mac_ops(void)
{
return &cnf10kb_mcs_ops;
}
void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs)
{
struct hwinfo *hw = mcs->hw;
hw->tcam_entries = 64; /* TCAM entries */
hw->secy_entries = 64; /* SecY entries */
hw->sc_entries = 64; /* SC CAM entries */
hw->sa_entries = 128; /* SA entries */
hw->lmac_cnt = 4; /* lmacs/ports per mcs block */
hw->mcs_x2p_intf = 1; /* x2p clabration intf */
hw->mcs_blks = 7; /* MCS blocks */
}
void cnf10kb_mcs_parser_cfg(struct mcs *mcs)
{
u64 reg, val;
/* VLAN Ctag */
val = (0x8100ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(22);
reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(0);
mcs_reg_write(mcs, reg, val);
reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(0);
mcs_reg_write(mcs, reg, val);
/* VLAN STag */
val = (0x88a8ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(23);
/* RX */
reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(1);
mcs_reg_write(mcs, reg, val);
/* TX */
reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(1);
mcs_reg_write(mcs, reg, val);
/* Enable custom tage 0 and 1 and sectag */
val = BIT_ULL(0) | BIT_ULL(1) | BIT_ULL(12);
reg = MCSX_PEX_RX_SLAVE_ETYPE_ENABLE;
mcs_reg_write(mcs, reg, val);
reg = MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
mcs_reg_write(mcs, reg, val);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell MCS driver
*
* Copyright (C) 2022 Marvell.
*/
#ifndef MCS_REG_H
#define MCS_REG_H
#include <linux/bits.h>
/* Registers */
#define MCSX_IP_MODE 0x900c8ull
#define MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(a) ({ \
u64 offset; \
\
offset = 0x808ull; \
if (mcs->hw->mcs_blks > 1) \
offset = 0xa68ull; \
offset += (a) * 0x8ull; \
offset; })
#define MCSX_MIL_GLOBAL ({ \
u64 offset; \
\
offset = 0x80000ull; \
if (mcs->hw->mcs_blks > 1) \
offset = 0x60000ull; \
offset; })
#define MCSX_LINK_LMACX_CFG(a) ({ \
u64 offset; \
\
offset = 0x90000ull; \
if (mcs->hw->mcs_blks > 1) \
offset = 0x70000ull; \
offset += (a) * 0x800ull; \
offset; })
#define MCSX_MIL_RX_GBL_STATUS ({ \
u64 offset; \
\
offset = 0x800c8ull; \
if (mcs->hw->mcs_blks > 1) \
offset = 0x600c8ull; \
offset; })
/* PAB */
#define MCSX_PAB_RX_SLAVE_PORT_CFGX(a) ({ \
u64 offset; \
\
offset = 0x1718ull; \
if (mcs->hw->mcs_blks > 1) \
offset = 0x280ull; \
offset += (a) * 0x40ull; \
offset; })
#define MCSX_PAB_TX_SLAVE_PORT_CFGX(a) (0x2930ull + (a) * 0x40ull)
/* PEX registers */
#define MCSX_PEX_RX_SLAVE_VLAN_CFGX(a) (0x3b58ull + (a) * 0x8ull)
#define MCSX_PEX_TX_SLAVE_VLAN_CFGX(a) (0x46f8ull + (a) * 0x8ull)
/* CNF10K-B */
#define MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(a) (0x4c8ull + (a) * 0x8ull)
#define MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(a) (0x748ull + (a) * 0x8ull)
#define MCSX_PEX_RX_SLAVE_ETYPE_ENABLE 0x6e8ull
#define MCSX_PEX_TX_SLAVE_ETYPE_ENABLE 0x968ull
/* BEE */
#define MCSX_BBE_RX_SLAVE_PADDING_CTL 0xe08ull
#define MCSX_BBE_TX_SLAVE_PADDING_CTL 0x12f8ull
#define MCSX_BBE_RX_SLAVE_CAL_ENTRY 0x180ull
#define MCSX_BBE_RX_SLAVE_CAL_LEN 0x188ull
#define MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(a) (0x290ull + (a) * 0x40ull)
#endif
// SPDX-License-Identifier: GPL-2.0
/* Marvell CN10K MCS driver
*
* Copyright (C) 2022 Marvell.
*/
#include <linux/types.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/pci.h>
#include "mcs.h"
#include "rvu.h"
#include "lmac_common.h"
static void rvu_mcs_set_lmac_bmap(struct rvu *rvu)
{
struct mcs *mcs = mcs_get_pdata(0);
unsigned long lmac_bmap;
int cgx, lmac, port;
for (port = 0; port < mcs->hw->lmac_cnt; port++) {
cgx = port / rvu->hw->lmac_per_cgx;
lmac = port % rvu->hw->lmac_per_cgx;
if (!is_lmac_valid(rvu_cgx_pdata(cgx, rvu), lmac))
continue;
set_bit(port, &lmac_bmap);
}
mcs->hw->lmac_bmap = lmac_bmap;
}
int rvu_mcs_init(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
int err = 0;
rvu->mcs_blk_cnt = mcs_get_blkcnt();
if (!rvu->mcs_blk_cnt)
return 0;
/* Needed only for CN10K-B */
if (rvu->mcs_blk_cnt == 1) {
err = mcs_set_lmac_channels(0, hw->cgx_chan_base);
if (err)
return err;
/* Set active lmacs */
rvu_mcs_set_lmac_bmap(rvu);
}
return err;
}
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include "rvu.h" #include "rvu.h"
#include "rvu_reg.h" #include "rvu_reg.h"
#include "ptp.h" #include "ptp.h"
#include "mcs.h"
#include "rvu_trace.h" #include "rvu_trace.h"
#include "rvu_npc_hash.h" #include "rvu_npc_hash.h"
...@@ -1159,6 +1160,12 @@ static int rvu_setup_hw_resources(struct rvu *rvu) ...@@ -1159,6 +1160,12 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
rvu_program_channels(rvu); rvu_program_channels(rvu);
err = rvu_mcs_init(rvu);
if (err) {
dev_err(rvu->dev, "%s: Failed to initialize mcs\n", __func__);
goto nix_err;
}
return 0; return 0;
nix_err: nix_err:
...@@ -3354,12 +3361,18 @@ static int __init rvu_init_module(void) ...@@ -3354,12 +3361,18 @@ static int __init rvu_init_module(void)
if (err < 0) if (err < 0)
goto ptp_err; goto ptp_err;
err = pci_register_driver(&mcs_driver);
if (err < 0)
goto mcs_err;
err = pci_register_driver(&rvu_driver); err = pci_register_driver(&rvu_driver);
if (err < 0) if (err < 0)
goto rvu_err; goto rvu_err;
return 0; return 0;
rvu_err: rvu_err:
pci_unregister_driver(&mcs_driver);
mcs_err:
pci_unregister_driver(&ptp_driver); pci_unregister_driver(&ptp_driver);
ptp_err: ptp_err:
pci_unregister_driver(&cgx_driver); pci_unregister_driver(&cgx_driver);
...@@ -3370,6 +3383,7 @@ static int __init rvu_init_module(void) ...@@ -3370,6 +3383,7 @@ static int __init rvu_init_module(void)
static void __exit rvu_cleanup_module(void) static void __exit rvu_cleanup_module(void)
{ {
pci_unregister_driver(&rvu_driver); pci_unregister_driver(&rvu_driver);
pci_unregister_driver(&mcs_driver);
pci_unregister_driver(&ptp_driver); pci_unregister_driver(&ptp_driver);
pci_unregister_driver(&cgx_driver); pci_unregister_driver(&cgx_driver);
} }
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
/* Subsystem Device ID */ /* Subsystem Device ID */
#define PCI_SUBSYS_DEVID_96XX 0xB200 #define PCI_SUBSYS_DEVID_96XX 0xB200
#define PCI_SUBSYS_DEVID_CN10K_A 0xB900 #define PCI_SUBSYS_DEVID_CN10K_A 0xB900
#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
#define PCI_SUBSYS_DEVID_CN10K_B 0xBD00
/* PCI BAR nos */ /* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0 #define PCI_AF_REG_BAR_NUM 0
...@@ -497,6 +499,8 @@ struct rvu { ...@@ -497,6 +499,8 @@ struct rvu {
struct ptp *ptp; struct ptp *ptp;
int mcs_blk_cnt;
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg; struct rvu_debugfs rvu_dbg;
#endif #endif
...@@ -868,4 +872,8 @@ void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc); ...@@ -868,4 +872,8 @@ void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir, int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
u64 pkind, u8 var_len_off, u8 var_len_off_mask, u64 pkind, u8 var_len_off, u8 var_len_off_mask,
u8 shift_dir); u8 shift_dir);
/* CN10K MCS */
int rvu_mcs_init(struct rvu *rvu);
#endif /* RVU_H */ #endif /* RVU_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment