Commit f75886a0 authored by David S. Miller's avatar David S. Miller

Merge branch 'octeontx2-macsec-offload'

Subbaraya Sundeep says:

====================
net: Introduce macsec hardware offload for cn10k platform

CN10K-B and CNF10K-B variaints of CN10K silicon has macsec block(MCS)
to encrypt and decrypt packets at MAC/hardware level. This block is a
global resource with hardware resources like SecYs, SCs and SAs
and is in between NIX block and RPM LMAC. CN10K-B silicon has only
one MCS block which receives packets from all LMACS whereas
CNF10K-B has seven MCS blocks for seven LMACs. Both MCS blocks are
similar in operation except for few register offsets and some
configurations require writing to different registers. This patchset
introduces macsec hardware offloading support. AF driver manages hardware
resources and PF driver consumes them when macsec hardware offloading
is needed.

Patch 1 adds basic pci driver for both CN10K-B and CNF10K-B
silicons and initializes hardware block.
Patches 2 and 3 adds mailboxes to init, reset and manage
resources of the MCS block
Patch 4 adds a low priority rule in MCS TCAM so that the
traffic which do not need macsec processing can be sent/received
Patch 5 adds macsec stats collection support
Patch 6 adds interrupt handling support and any event in which
AF consumer is interested can be notified via mbox notification
Patch 7 adds debugfs support which helps in debugging packet
path
Patch 8 introduces macsec hardware offload feature for
PF netdev driver.

v3 changes:
 Fixed clang and sparse warnings

v2 changes:
 Fix build error by changing #ifdef CONFIG_MACSEC to
 #if IS_ENABLED(CONFIG_MACSEC)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 99507e76 c54ffc73
...@@ -11,4 +11,4 @@ rvu_mbox-y := mbox.o rvu_trace.o ...@@ -11,4 +11,4 @@ rvu_mbox-y := mbox.o rvu_trace.o
rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \ rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \
rvu_sdp.o rvu_npc_hash.o rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/* Marvell CN10K MCS driver
*
* Copyright (C) 2022 Marvell.
*/
#ifndef MCS_H
#define MCS_H
#include <linux/bits.h>
#include "rvu.h"
#define PCI_DEVID_CN10K_MCS 0xA096
#define MCSX_LINK_LMAC_RANGE_MASK GENMASK_ULL(19, 16)
#define MCSX_LINK_LMAC_BASE_MASK GENMASK_ULL(11, 0)
#define MCS_ID_MASK 0x7
#define MCS_MAX_PFS 128
#define MCS_PORT_MODE_MASK 0x3
#define MCS_PORT_FIFO_SKID_MASK 0x3F
#define MCS_MAX_CUSTOM_TAGS 0x8
#define MCS_CTRLPKT_ETYPE_RULE_MAX 8
#define MCS_CTRLPKT_DA_RULE_MAX 8
#define MCS_CTRLPKT_DA_RANGE_RULE_MAX 4
#define MCS_CTRLPKT_COMBO_RULE_MAX 4
#define MCS_CTRLPKT_MAC_RULE_MAX 1
#define MCS_MAX_CTRLPKT_RULES (MCS_CTRLPKT_ETYPE_RULE_MAX + \
MCS_CTRLPKT_DA_RULE_MAX + \
MCS_CTRLPKT_DA_RANGE_RULE_MAX + \
MCS_CTRLPKT_COMBO_RULE_MAX + \
MCS_CTRLPKT_MAC_RULE_MAX)
#define MCS_CTRLPKT_ETYPE_RULE_OFFSET 0
#define MCS_CTRLPKT_DA_RULE_OFFSET 8
#define MCS_CTRLPKT_DA_RANGE_RULE_OFFSET 16
#define MCS_CTRLPKT_COMBO_RULE_OFFSET 20
#define MCS_CTRLPKT_MAC_EN_RULE_OFFSET 24
/* Reserved resources for default bypass entry */
#define MCS_RSRC_RSVD_CNT 1
/* MCS Interrupt Vector Enumeration */
enum mcs_int_vec_e {
MCS_INT_VEC_MIL_RX_GBL = 0x0,
MCS_INT_VEC_MIL_RX_LMACX = 0x1,
MCS_INT_VEC_MIL_TX_LMACX = 0x5,
MCS_INT_VEC_HIL_RX_GBL = 0x9,
MCS_INT_VEC_HIL_RX_LMACX = 0xa,
MCS_INT_VEC_HIL_TX_GBL = 0xe,
MCS_INT_VEC_HIL_TX_LMACX = 0xf,
MCS_INT_VEC_IP = 0x13,
MCS_INT_VEC_CNT = 0x14,
};
#define MCS_MAX_BBE_INT 8ULL
#define MCS_BBE_INT_MASK 0xFFULL
#define MCS_MAX_PAB_INT 4ULL
#define MCS_PAB_INT_MASK 0xFULL
#define MCS_BBE_RX_INT_ENA BIT_ULL(0)
#define MCS_BBE_TX_INT_ENA BIT_ULL(1)
#define MCS_CPM_RX_INT_ENA BIT_ULL(2)
#define MCS_CPM_TX_INT_ENA BIT_ULL(3)
#define MCS_PAB_RX_INT_ENA BIT_ULL(4)
#define MCS_PAB_TX_INT_ENA BIT_ULL(5)
#define MCS_CPM_TX_INT_PACKET_XPN_EQ0 BIT_ULL(0)
#define MCS_CPM_TX_INT_PN_THRESH_REACHED BIT_ULL(1)
#define MCS_CPM_TX_INT_SA_NOT_VALID BIT_ULL(2)
#define MCS_CPM_RX_INT_SECTAG_V_EQ1 BIT_ULL(0)
#define MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 BIT_ULL(1)
#define MCS_CPM_RX_INT_SL_GTE48 BIT_ULL(2)
#define MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 BIT_ULL(3)
#define MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 BIT_ULL(4)
#define MCS_CPM_RX_INT_PACKET_XPN_EQ0 BIT_ULL(5)
#define MCS_CPM_RX_INT_PN_THRESH_REACHED BIT_ULL(6)
#define MCS_CPM_RX_INT_ALL (MCS_CPM_RX_INT_SECTAG_V_EQ1 | \
MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 | \
MCS_CPM_RX_INT_SL_GTE48 | \
MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 | \
MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 | \
MCS_CPM_RX_INT_PACKET_XPN_EQ0 | \
MCS_CPM_RX_INT_PN_THRESH_REACHED)
struct mcs_pfvf {
u64 intr_mask; /* Enabled Interrupt mask */
};
struct mcs_intr_event {
u16 pcifunc;
u64 intr_mask;
u64 sa_id;
u8 mcs_id;
u8 lmac_id;
};
struct mcs_intrq_entry {
struct list_head node;
struct mcs_intr_event intr_event;
};
struct secy_mem_map {
u8 flow_id;
u8 secy;
u8 ctrl_pkt;
u8 sc;
u64 sci;
};
struct mcs_rsrc_map {
u16 *flowid2pf_map;
u16 *secy2pf_map;
u16 *sc2pf_map;
u16 *sa2pf_map;
u16 *flowid2secy_map; /* bitmap flowid mapped to secy*/
u16 *ctrlpktrule2pf_map;
struct rsrc_bmap flow_ids;
struct rsrc_bmap secy;
struct rsrc_bmap sc;
struct rsrc_bmap sa;
struct rsrc_bmap ctrlpktrule;
};
struct hwinfo {
u8 tcam_entries;
u8 secy_entries;
u8 sc_entries;
u16 sa_entries;
u8 mcs_x2p_intf;
u8 lmac_cnt;
u8 mcs_blks;
unsigned long lmac_bmap; /* bitmap of enabled mcs lmac */
};
struct mcs {
void __iomem *reg_base;
struct pci_dev *pdev;
struct device *dev;
struct hwinfo *hw;
struct mcs_rsrc_map tx;
struct mcs_rsrc_map rx;
u16 pf_map[MCS_MAX_PFS]; /* List of PCIFUNC mapped to MCS */
u8 mcs_id;
struct mcs_ops *mcs_ops;
struct list_head mcs_list;
/* Lock for mcs stats */
struct mutex stats_lock;
struct mcs_pfvf *pf;
struct mcs_pfvf *vf;
u16 num_vec;
void *rvu;
u16 *tx_sa_active;
};
struct mcs_ops {
void (*mcs_set_hw_capabilities)(struct mcs *mcs);
void (*mcs_parser_cfg)(struct mcs *mcs);
void (*mcs_tx_sa_mem_map_write)(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
void (*mcs_rx_sa_mem_map_write)(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
void (*mcs_flowid_secy_map)(struct mcs *mcs, struct secy_mem_map *map, int dir);
};
extern struct pci_driver mcs_driver;
static inline void mcs_reg_write(struct mcs *mcs, u64 offset, u64 val)
{
writeq(val, mcs->reg_base + offset);
}
static inline u64 mcs_reg_read(struct mcs *mcs, u64 offset)
{
return readq(mcs->reg_base + offset);
}
/* MCS APIs */
struct mcs *mcs_get_pdata(int mcs_id);
int mcs_get_blkcnt(void);
int mcs_set_lmac_channels(int mcs_id, u16 base);
int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc);
int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc);
int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flowid, u8 *secy_id,
u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir);
int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc);
void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir);
void mcs_ena_dis_flowid_entry(struct mcs *mcs, int id, int dir, int ena);
void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int id, int ena);
void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int id, int dir);
void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int id, int dir);
void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id);
void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa, int dir);
void mcs_map_sc_to_sa(struct mcs *mcs, u64 *sa_map, int sc, int dir);
void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir);
void mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
void mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
void mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn);
int mcs_install_flowid_bypass_entry(struct mcs *mcs);
void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode);
void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset);
void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req);
void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
struct mcs_port_cfg_get_rsp *rsp);
void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
struct mcs_custom_tag_cfg_get_rsp *rsp);
int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc);
int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req);
int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req);
/* CN10K-B APIs */
void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs);
void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
void cn10kb_mcs_parser_cfg(struct mcs *mcs);
/* CNF10K-B APIs */
struct mcs_ops *cnf10kb_get_mac_ops(void);
void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs);
void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
void cnf10kb_mcs_parser_cfg(struct mcs *mcs);
void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs);
void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs);
/* Stats APIs */
void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, int id, int dir);
void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir);
void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats, int id, int dir);
void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats, int id, int dir);
void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir);
int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir);
int mcs_set_force_clk_en(struct mcs *mcs, bool set);
int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event);
#endif /* MCS_H */
// SPDX-License-Identifier: GPL-2.0
/* Marvell MCS driver
*
* Copyright (C) 2022 Marvell.
*/
#include "mcs.h"
#include "mcs_reg.h"
static struct mcs_ops cnf10kb_mcs_ops = {
.mcs_set_hw_capabilities = cnf10kb_mcs_set_hw_capabilities,
.mcs_parser_cfg = cnf10kb_mcs_parser_cfg,
.mcs_tx_sa_mem_map_write = cnf10kb_mcs_tx_sa_mem_map_write,
.mcs_rx_sa_mem_map_write = cnf10kb_mcs_rx_sa_mem_map_write,
.mcs_flowid_secy_map = cnf10kb_mcs_flowid_secy_map,
};
struct mcs_ops *cnf10kb_get_mac_ops(void)
{
return &cnf10kb_mcs_ops;
}
void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs)
{
struct hwinfo *hw = mcs->hw;
hw->tcam_entries = 64; /* TCAM entries */
hw->secy_entries = 64; /* SecY entries */
hw->sc_entries = 64; /* SC CAM entries */
hw->sa_entries = 128; /* SA entries */
hw->lmac_cnt = 4; /* lmacs/ports per mcs block */
hw->mcs_x2p_intf = 1; /* x2p clabration intf */
hw->mcs_blks = 7; /* MCS blocks */
}
void cnf10kb_mcs_parser_cfg(struct mcs *mcs)
{
u64 reg, val;
/* VLAN Ctag */
val = (0x8100ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(22);
reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(0);
mcs_reg_write(mcs, reg, val);
reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(0);
mcs_reg_write(mcs, reg, val);
/* VLAN STag */
val = (0x88a8ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(23);
/* RX */
reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(1);
mcs_reg_write(mcs, reg, val);
/* TX */
reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(1);
mcs_reg_write(mcs, reg, val);
/* Enable custom tage 0 and 1 and sectag */
val = BIT_ULL(0) | BIT_ULL(1) | BIT_ULL(12);
reg = MCSX_PEX_RX_SLAVE_ETYPE_ENABLE;
mcs_reg_write(mcs, reg, val);
reg = MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
mcs_reg_write(mcs, reg, val);
}
void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
{
u64 reg, val;
val = (map->secy & 0x3F) | (map->ctrl_pkt & 0x1) << 6;
if (dir == MCS_RX) {
reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
} else {
reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
mcs_reg_write(mcs, reg, map->sci);
val |= (map->sc & 0x3F) << 7;
reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(map->flow_id);
}
mcs_reg_write(mcs, reg, val);
}
void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
{
u64 reg, val;
val = (map->sa_index0 & 0x7F) | (map->sa_index1 & 0x7F) << 7;
reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
mcs_reg_write(mcs, reg, val);
reg = MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0;
val = mcs_reg_read(mcs, reg);
if (map->rekey_ena)
val |= BIT_ULL(map->sc_id);
else
val &= ~BIT_ULL(map->sc_id);
mcs_reg_write(mcs, reg, val);
mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(map->sc_id), map->sa_index0_vld);
mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(map->sc_id), map->sa_index1_vld);
mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(map->sc_id), map->tx_sa_active);
}
void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
{
u64 val, reg;
val = (map->sa_index & 0x7F) | (map->sa_in_use << 7);
reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
mcs_reg_write(mcs, reg, val);
}
int mcs_set_force_clk_en(struct mcs *mcs, bool set)
{
unsigned long timeout = jiffies + usecs_to_jiffies(2000);
u64 val;
val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
if (set) {
val |= BIT_ULL(4);
mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
/* Poll till mcsx_mil_ip_gbl_status.mcs_ip_stats_ready value is 1 */
while (!(mcs_reg_read(mcs, MCSX_MIL_IP_GBL_STATUS) & BIT_ULL(0))) {
if (time_after(jiffies, timeout)) {
dev_err(mcs->dev, "MCS set force clk enable failed\n");
break;
}
}
} else {
val &= ~BIT_ULL(4);
mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
}
return 0;
}
/* TX SA interrupt is raised only if autorekey is enabled.
* MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
* one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
* SA in SA_index1 got expired else SA in SA_index0 got expired.
*/
void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
{
struct mcs_intr_event event;
struct rsrc_bmap *sc_bmap;
unsigned long rekey_ena;
u64 val, sa_status;
int sc;
sc_bmap = &mcs->tx.sc;
event.mcs_id = mcs->mcs_id;
event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
rekey_ena = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0);
for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
/* Auto rekey is enable */
if (!test_bit(sc, &rekey_ena))
continue;
sa_status = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(sc));
/* Check if tx_sa_active status had changed */
if (sa_status == mcs->tx_sa_active[sc])
continue;
/* SA_index0 is expired */
val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
if (sa_status)
event.sa_id = val & 0x7F;
else
event.sa_id = (val >> 7) & 0x7F;
event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
mcs_add_intr_wq_entry(mcs, &event);
}
}
void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
{
struct mcs_intr_event event = { 0 };
struct rsrc_bmap *sc_bmap;
u64 val;
int sc;
sc_bmap = &mcs->tx.sc;
event.mcs_id = mcs->mcs_id;
event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
if (mcs->tx_sa_active[sc])
/* SA_index1 was used and got expired */
event.sa_id = (val >> 7) & 0x7F;
else
/* SA_index0 was used and got expired */
event.sa_id = val & 0x7F;
event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
mcs_add_intr_wq_entry(mcs, &event);
}
}
This diff is collapsed.
This diff is collapsed.
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include "rvu.h" #include "rvu.h"
#include "rvu_reg.h" #include "rvu_reg.h"
#include "ptp.h" #include "ptp.h"
#include "mcs.h"
#include "rvu_trace.h" #include "rvu_trace.h"
#include "rvu_npc_hash.h" #include "rvu_npc_hash.h"
...@@ -23,8 +24,6 @@ ...@@ -23,8 +24,6 @@
#define DRV_NAME "rvu_af" #define DRV_NAME "rvu_af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver" #define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct rvu_block *block, int lf); struct rvu_block *block, int lf);
static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
...@@ -418,7 +417,7 @@ void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf) ...@@ -418,7 +417,7 @@ void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
*hwvf = cfg & 0xFFF; *hwvf = cfg & 0xFFF;
} }
static int rvu_get_hwvf(struct rvu *rvu, int pcifunc) int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
{ {
int pf, func; int pf, func;
u64 cfg; u64 cfg;
...@@ -1159,6 +1158,12 @@ static int rvu_setup_hw_resources(struct rvu *rvu) ...@@ -1159,6 +1158,12 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
rvu_program_channels(rvu); rvu_program_channels(rvu);
err = rvu_mcs_init(rvu);
if (err) {
dev_err(rvu->dev, "%s: Failed to initialize mcs\n", __func__);
goto nix_err;
}
return 0; return 0;
nix_err: nix_err:
...@@ -3293,6 +3298,7 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -3293,6 +3298,7 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_hwsetup: err_hwsetup:
rvu_cgx_exit(rvu); rvu_cgx_exit(rvu);
rvu_fwdata_exit(rvu); rvu_fwdata_exit(rvu);
rvu_mcs_exit(rvu);
rvu_reset_all_blocks(rvu); rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu); rvu_free_hw_resources(rvu);
rvu_clear_rvum_blk_revid(rvu); rvu_clear_rvum_blk_revid(rvu);
...@@ -3319,6 +3325,7 @@ static void rvu_remove(struct pci_dev *pdev) ...@@ -3319,6 +3325,7 @@ static void rvu_remove(struct pci_dev *pdev)
rvu_flr_wq_destroy(rvu); rvu_flr_wq_destroy(rvu);
rvu_cgx_exit(rvu); rvu_cgx_exit(rvu);
rvu_fwdata_exit(rvu); rvu_fwdata_exit(rvu);
rvu_mcs_exit(rvu);
rvu_mbox_destroy(&rvu->afpf_wq_info); rvu_mbox_destroy(&rvu->afpf_wq_info);
rvu_disable_sriov(rvu); rvu_disable_sriov(rvu);
rvu_reset_all_blocks(rvu); rvu_reset_all_blocks(rvu);
...@@ -3354,12 +3361,18 @@ static int __init rvu_init_module(void) ...@@ -3354,12 +3361,18 @@ static int __init rvu_init_module(void)
if (err < 0) if (err < 0)
goto ptp_err; goto ptp_err;
err = pci_register_driver(&mcs_driver);
if (err < 0)
goto mcs_err;
err = pci_register_driver(&rvu_driver); err = pci_register_driver(&rvu_driver);
if (err < 0) if (err < 0)
goto rvu_err; goto rvu_err;
return 0; return 0;
rvu_err: rvu_err:
pci_unregister_driver(&mcs_driver);
mcs_err:
pci_unregister_driver(&ptp_driver); pci_unregister_driver(&ptp_driver);
ptp_err: ptp_err:
pci_unregister_driver(&cgx_driver); pci_unregister_driver(&cgx_driver);
...@@ -3370,6 +3383,7 @@ static int __init rvu_init_module(void) ...@@ -3370,6 +3383,7 @@ static int __init rvu_init_module(void)
static void __exit rvu_cleanup_module(void) static void __exit rvu_cleanup_module(void)
{ {
pci_unregister_driver(&rvu_driver); pci_unregister_driver(&rvu_driver);
pci_unregister_driver(&mcs_driver);
pci_unregister_driver(&ptp_driver); pci_unregister_driver(&ptp_driver);
pci_unregister_driver(&cgx_driver); pci_unregister_driver(&cgx_driver);
} }
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
/* Subsystem Device ID */ /* Subsystem Device ID */
#define PCI_SUBSYS_DEVID_96XX 0xB200 #define PCI_SUBSYS_DEVID_96XX 0xB200
#define PCI_SUBSYS_DEVID_CN10K_A 0xB900 #define PCI_SUBSYS_DEVID_CN10K_A 0xB900
#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
#define PCI_SUBSYS_DEVID_CN10K_B 0xBD00
/* PCI BAR nos */ /* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0 #define PCI_AF_REG_BAR_NUM 0
...@@ -62,6 +64,10 @@ struct rvu_debugfs { ...@@ -62,6 +64,10 @@ struct rvu_debugfs {
struct dentry *nix; struct dentry *nix;
struct dentry *npc; struct dentry *npc;
struct dentry *cpt; struct dentry *cpt;
struct dentry *mcs_root;
struct dentry *mcs;
struct dentry *mcs_rx;
struct dentry *mcs_tx;
struct dump_ctx npa_aura_ctx; struct dump_ctx npa_aura_ctx;
struct dump_ctx npa_pool_ctx; struct dump_ctx npa_pool_ctx;
struct dump_ctx nix_cq_ctx; struct dump_ctx nix_cq_ctx;
...@@ -497,6 +503,8 @@ struct rvu { ...@@ -497,6 +503,8 @@ struct rvu {
struct ptp *ptp; struct ptp *ptp;
int mcs_blk_cnt;
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg; struct rvu_debugfs rvu_dbg;
#endif #endif
...@@ -504,6 +512,12 @@ struct rvu { ...@@ -504,6 +512,12 @@ struct rvu {
/* RVU switch implementation over NPC with DMAC rules */ /* RVU switch implementation over NPC with DMAC rules */
struct rvu_switch rswitch; struct rvu_switch rswitch;
struct work_struct mcs_intr_work;
struct workqueue_struct *mcs_intr_wq;
struct list_head mcs_intrq_head;
/* mcs interrupt queue lock */
spinlock_t mcs_intrq_lock;
}; };
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
...@@ -868,4 +882,11 @@ void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc); ...@@ -868,4 +882,11 @@ void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir, int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
u64 pkind, u8 var_len_off, u8 var_len_off_mask, u64 pkind, u8 var_len_off, u8 var_len_off_mask,
u8 shift_dir); u8 shift_dir);
int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
/* CN10K MCS */
int rvu_mcs_init(struct rvu *rvu);
int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc);
void rvu_mcs_exit(struct rvu *rvu);
#endif /* RVU_H */ #endif /* RVU_H */
...@@ -13,5 +13,6 @@ rvu_nicvf-y := otx2_vf.o otx2_devlink.o ...@@ -13,5 +13,6 @@ rvu_nicvf-y := otx2_vf.o otx2_devlink.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
This diff is collapsed.
...@@ -1827,4 +1827,5 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ ...@@ -1827,4 +1827,5 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
} \ } \
EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name); EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
MBOX_UP_CGX_MESSAGES MBOX_UP_CGX_MESSAGES
MBOX_UP_MCS_MESSAGES
#undef M #undef M
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <net/devlink.h> #include <net/devlink.h>
#include <linux/time64.h> #include <linux/time64.h>
#include <linux/dim.h> #include <linux/dim.h>
#include <uapi/linux/if_macsec.h>
#include <mbox.h> #include <mbox.h>
#include <npc.h> #include <npc.h>
...@@ -33,6 +34,7 @@ ...@@ -33,6 +34,7 @@
#define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8 #define PCI_DEVID_OCTEONTX2_RVU_AFVF 0xA0F8
#define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200 #define PCI_SUBSYS_DEVID_96XX_RVU_PFVF 0xB200
#define PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF 0xBD00
/* PCI BAR nos */ /* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 2 #define PCI_CFG_REG_BAR_NUM 2
...@@ -244,6 +246,7 @@ struct otx2_hw { ...@@ -244,6 +246,7 @@ struct otx2_hw {
#define CN10K_LMTST 2 #define CN10K_LMTST 2
#define CN10K_RPM 3 #define CN10K_RPM 3
#define CN10K_PTP_ONESTEP 4 #define CN10K_PTP_ONESTEP 4
#define CN10K_HW_MACSEC 5
unsigned long cap_flag; unsigned long cap_flag;
#define LMT_LINE_SIZE 128 #define LMT_LINE_SIZE 128
...@@ -351,6 +354,66 @@ struct dev_hw_ops { ...@@ -351,6 +354,66 @@ struct dev_hw_ops {
void (*aura_freeptr)(void *dev, int aura, u64 buf); void (*aura_freeptr)(void *dev, int aura, u64 buf);
}; };
#define CN10K_MCS_SA_PER_SC 4
/* Stats which need to be accumulated in software because
* of shared counters in hardware.
*/
struct cn10k_txsc_stats {
u64 InPktsUntagged;
u64 InPktsNoTag;
u64 InPktsBadTag;
u64 InPktsUnknownSCI;
u64 InPktsNoSCI;
u64 InPktsOverrun;
};
struct cn10k_rxsc_stats {
u64 InOctetsValidated;
u64 InOctetsDecrypted;
u64 InPktsUnchecked;
u64 InPktsDelayed;
u64 InPktsOK;
u64 InPktsInvalid;
u64 InPktsLate;
u64 InPktsNotValid;
u64 InPktsNotUsingSA;
u64 InPktsUnusedSA;
};
struct cn10k_mcs_txsc {
struct macsec_secy *sw_secy;
struct cn10k_txsc_stats stats;
struct list_head entry;
enum macsec_validation_type last_validate_frames;
bool last_protect_frames;
u16 hw_secy_id_tx;
u16 hw_secy_id_rx;
u16 hw_flow_id;
u16 hw_sc_id;
u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
u8 sa_bmap;
u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
u8 encoding_sa;
};
struct cn10k_mcs_rxsc {
struct macsec_secy *sw_secy;
struct macsec_rx_sc *sw_rxsc;
struct cn10k_rxsc_stats stats;
struct list_head entry;
u16 hw_flow_id;
u16 hw_sc_id;
u16 hw_sa_id[CN10K_MCS_SA_PER_SC];
u8 sa_bmap;
u8 sa_key[CN10K_MCS_SA_PER_SC][MACSEC_MAX_KEY_LEN];
};
struct cn10k_mcs_cfg {
struct list_head txsc_list;
struct list_head rxsc_list;
};
struct otx2_nic { struct otx2_nic {
void __iomem *reg_base; void __iomem *reg_base;
struct net_device *netdev; struct net_device *netdev;
...@@ -438,6 +501,10 @@ struct otx2_nic { ...@@ -438,6 +501,10 @@ struct otx2_nic {
/* napi event count. It is needed for adaptive irq coalescing. */ /* napi event count. It is needed for adaptive irq coalescing. */
u32 napi_events; u32 napi_events;
#if IS_ENABLED(CONFIG_MACSEC)
struct cn10k_mcs_cfg *macsec_cfg;
#endif
}; };
static inline bool is_otx2_lbkvf(struct pci_dev *pdev) static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
...@@ -477,6 +544,11 @@ static inline bool is_dev_otx2(struct pci_dev *pdev) ...@@ -477,6 +544,11 @@ static inline bool is_dev_otx2(struct pci_dev *pdev)
midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO); midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO);
} }
static inline bool is_dev_cn10kb(struct pci_dev *pdev)
{
return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF;
}
static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
{ {
struct otx2_hw *hw = &pfvf->hw; struct otx2_hw *hw = &pfvf->hw;
...@@ -508,6 +580,9 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf) ...@@ -508,6 +580,9 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
__set_bit(CN10K_RPM, &hw->cap_flag); __set_bit(CN10K_RPM, &hw->cap_flag);
__set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag); __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag);
} }
if (is_dev_cn10kb(pfvf->pdev))
__set_bit(CN10K_HW_MACSEC, &hw->cap_flag);
} }
/* Register read/write APIs */ /* Register read/write APIs */
...@@ -763,6 +838,7 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \ ...@@ -763,6 +838,7 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
struct _rsp_type *rsp); \ struct _rsp_type *rsp); \
MBOX_UP_CGX_MESSAGES MBOX_UP_CGX_MESSAGES
MBOX_UP_MCS_MESSAGES
#undef M #undef M
/* Time to wait before watchdog kicks off */ /* Time to wait before watchdog kicks off */
...@@ -945,4 +1021,18 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf); ...@@ -945,4 +1021,18 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf);
int otx2_pfc_txschq_update(struct otx2_nic *pfvf); int otx2_pfc_txschq_update(struct otx2_nic *pfvf);
int otx2_pfc_txschq_stop(struct otx2_nic *pfvf); int otx2_pfc_txschq_stop(struct otx2_nic *pfvf);
#endif #endif
#if IS_ENABLED(CONFIG_MACSEC)
/* MACSEC offload support */
int cn10k_mcs_init(struct otx2_nic *pfvf);
void cn10k_mcs_free(struct otx2_nic *pfvf);
void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event);
#else
static inline int cn10k_mcs_init(struct otx2_nic *pfvf) { return 0; }
static inline void cn10k_mcs_free(struct otx2_nic *pfvf) {}
static inline void cn10k_handle_mcs_event(struct otx2_nic *pfvf,
struct mcs_intr_info *event)
{}
#endif /* CONFIG_MACSEC */
#endif /* OTX2_COMMON_H */ #endif /* OTX2_COMMON_H */
...@@ -858,6 +858,15 @@ static void otx2_handle_link_event(struct otx2_nic *pf) ...@@ -858,6 +858,15 @@ static void otx2_handle_link_event(struct otx2_nic *pf)
} }
} }
int otx2_mbox_up_handler_mcs_intr_notify(struct otx2_nic *pf,
struct mcs_intr_info *event,
struct msg_rsp *rsp)
{
cn10k_handle_mcs_event(pf, event);
return 0;
}
int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf, int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
struct cgx_link_info_msg *msg, struct cgx_link_info_msg *msg,
struct msg_rsp *rsp) struct msg_rsp *rsp)
...@@ -917,6 +926,7 @@ static int otx2_process_mbox_msg_up(struct otx2_nic *pf, ...@@ -917,6 +926,7 @@ static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
return err; \ return err; \
} }
MBOX_UP_CGX_MESSAGES MBOX_UP_CGX_MESSAGES
MBOX_UP_MCS_MESSAGES
#undef M #undef M
break; break;
default: default:
...@@ -2764,6 +2774,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2764,6 +2774,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err) if (err)
goto err_ptp_destroy; goto err_ptp_destroy;
err = cn10k_mcs_init(pf);
if (err)
goto err_del_mcam_entries;
if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT) if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
netdev->hw_features |= NETIF_F_NTUPLE; netdev->hw_features |= NETIF_F_NTUPLE;
...@@ -2978,6 +2992,8 @@ static void otx2_remove(struct pci_dev *pdev) ...@@ -2978,6 +2992,8 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_config_pause_frm(pf); otx2_config_pause_frm(pf);
} }
cn10k_mcs_free(pf);
#ifdef CONFIG_DCB #ifdef CONFIG_DCB
/* Disable PFC config */ /* Disable PFC config */
if (pf->pfc_en) { if (pf->pfc_en) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment