Commit f2e664ad authored by Rakesh Babu Saladi's avatar Rakesh Babu Saladi Committed by Paolo Abeni

octeontx2-af: Support variable number of lmacs

Most of the code in CGX/RPM driver assumes that max lmacs per
given MAC as always, 4 and the number of MAC blocks also as 4.
With this assumption, the max number of interfaces supported is
hardcoded to 16. This creates a problem as next gen CN10KB silicon
MAC supports 8 lmacs per MAC block.

This patch solves the problem by using "max lmac per MAC block"
value from constant csrs and uses cgx_cnt_max value which is
populated based number of MAC blocks supported by silicon.
Signed-off-by: default avatarRakesh Babu Saladi <rsaladi2@marvell.com>
Signed-off-by: default avatarHariprasad Kelam <hkelam@marvell.com>
Signed-off-by: default avatarSunil Kovvuri Goutham <sgoutham@marvell.com>
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parent f82389ee
...@@ -78,7 +78,7 @@ static bool is_dev_rpm(void *cgxd) ...@@ -78,7 +78,7 @@ static bool is_dev_rpm(void *cgxd)
bool is_lmac_valid(struct cgx *cgx, int lmac_id) bool is_lmac_valid(struct cgx *cgx, int lmac_id)
{ {
if (!cgx || lmac_id < 0 || lmac_id >= MAX_LMAC_PER_CGX) if (!cgx || lmac_id < 0 || lmac_id >= cgx->max_lmac_per_mac)
return false; return false;
return test_bit(lmac_id, &cgx->lmac_bmap); return test_bit(lmac_id, &cgx->lmac_bmap);
} }
...@@ -90,7 +90,7 @@ static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id) ...@@ -90,7 +90,7 @@ static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
{ {
int tmp, id = 0; int tmp, id = 0;
for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) { for_each_set_bit(tmp, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
if (tmp == lmac_id) if (tmp == lmac_id)
break; break;
id++; id++;
...@@ -121,7 +121,7 @@ u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset) ...@@ -121,7 +121,7 @@ u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx) struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
{ {
if (!cgx || lmac_id >= MAX_LMAC_PER_CGX) if (!cgx || lmac_id >= cgx->max_lmac_per_mac)
return NULL; return NULL;
return cgx->lmac_idmap[lmac_id]; return cgx->lmac_idmap[lmac_id];
...@@ -1395,7 +1395,7 @@ int cgx_get_fwdata_base(u64 *base) ...@@ -1395,7 +1395,7 @@ int cgx_get_fwdata_base(u64 *base)
if (!cgx) if (!cgx)
return -ENXIO; return -ENXIO;
first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX); first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req); req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac); err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
if (!err) if (!err)
...@@ -1484,7 +1484,7 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable) ...@@ -1484,7 +1484,7 @@ static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx) static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
{ {
int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX); int first_lmac = find_first_bit(&cgx->lmac_bmap, cgx->max_lmac_per_mac);
u64 req = 0; u64 req = 0;
req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req); req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
...@@ -1522,7 +1522,7 @@ static void cgx_lmac_linkup_work(struct work_struct *work) ...@@ -1522,7 +1522,7 @@ static void cgx_lmac_linkup_work(struct work_struct *work)
int i, err; int i, err;
/* Do Link up for all the enabled lmacs */ /* Do Link up for all the enabled lmacs */
for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) { for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
err = cgx_fwi_link_change(cgx, i, true); err = cgx_fwi_link_change(cgx, i, true);
if (err) if (err)
dev_info(dev, "cgx port %d:%d Link up command failed\n", dev_info(dev, "cgx port %d:%d Link up command failed\n",
...@@ -1542,14 +1542,6 @@ int cgx_lmac_linkup_start(void *cgxd) ...@@ -1542,14 +1542,6 @@ int cgx_lmac_linkup_start(void *cgxd)
return 0; return 0;
} }
static void cgx_lmac_get_fifolen(struct cgx *cgx)
{
u64 cfg;
cfg = cgx_read(cgx, 0, CGX_CONST);
cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
}
static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac, static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
int cnt, bool req_free) int cnt, bool req_free)
{ {
...@@ -1604,17 +1596,14 @@ static int cgx_lmac_init(struct cgx *cgx) ...@@ -1604,17 +1596,14 @@ static int cgx_lmac_init(struct cgx *cgx)
u64 lmac_list; u64 lmac_list;
int i, err; int i, err;
cgx_lmac_get_fifolen(cgx);
cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
/* lmac_list specifies which lmacs are enabled /* lmac_list specifies which lmacs are enabled
* when bit n is set to 1, LMAC[n] is enabled * when bit n is set to 1, LMAC[n] is enabled
*/ */
if (cgx->mac_ops->non_contiguous_serdes_lane) if (cgx->mac_ops->non_contiguous_serdes_lane)
lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL; lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
if (cgx->lmac_count > MAX_LMAC_PER_CGX) if (cgx->lmac_count > cgx->max_lmac_per_mac)
cgx->lmac_count = MAX_LMAC_PER_CGX; cgx->lmac_count = cgx->max_lmac_per_mac;
for (i = 0; i < cgx->lmac_count; i++) { for (i = 0; i < cgx->lmac_count; i++) {
lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL); lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
...@@ -1692,7 +1681,7 @@ static int cgx_lmac_exit(struct cgx *cgx) ...@@ -1692,7 +1681,7 @@ static int cgx_lmac_exit(struct cgx *cgx)
} }
/* Free all lmac related resources */ /* Free all lmac related resources */
for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) { for_each_set_bit(i, &cgx->lmac_bmap, cgx->max_lmac_per_mac) {
lmac = cgx->lmac_idmap[i]; lmac = cgx->lmac_idmap[i];
if (!lmac) if (!lmac)
continue; continue;
...@@ -1708,6 +1697,12 @@ static int cgx_lmac_exit(struct cgx *cgx) ...@@ -1708,6 +1697,12 @@ static int cgx_lmac_exit(struct cgx *cgx)
static void cgx_populate_features(struct cgx *cgx) static void cgx_populate_features(struct cgx *cgx)
{ {
u64 cfg;
cfg = cgx_read(cgx, 0, CGX_CONST);
cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
cgx->max_lmac_per_mac = FIELD_GET(CGX_CONST_MAX_LMACS, cfg);
if (is_dev_rpm(cgx)) if (is_dev_rpm(cgx))
cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM | cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP); RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
......
...@@ -18,11 +18,8 @@ ...@@ -18,11 +18,8 @@
/* PCI BAR nos */ /* PCI BAR nos */
#define PCI_CFG_REG_BAR_NUM 0 #define PCI_CFG_REG_BAR_NUM 0
#define CGX_ID_MASK 0x7 #define CGX_ID_MASK 0xF
#define MAX_LMAC_PER_CGX 4
#define MAX_DMAC_ENTRIES_PER_CGX 32 #define MAX_DMAC_ENTRIES_PER_CGX 32
#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
/* Registers */ /* Registers */
#define CGXX_CMRX_CFG 0x00 #define CGXX_CMRX_CFG 0x00
...@@ -57,6 +54,7 @@ ...@@ -57,6 +54,7 @@
#define CGXX_SCRATCH1_REG 0x1058 #define CGXX_SCRATCH1_REG 0x1058
#define CGX_CONST 0x2000 #define CGX_CONST 0x2000
#define CGX_CONST_RXFIFO_SIZE GENMASK_ULL(23, 0) #define CGX_CONST_RXFIFO_SIZE GENMASK_ULL(23, 0)
#define CGX_CONST_MAX_LMACS GENMASK_ULL(31, 24)
#define CGXX_SPUX_CONTROL1 0x10000 #define CGXX_SPUX_CONTROL1 0x10000
#define CGXX_SPUX_LNX_FEC_CORR_BLOCKS 0x10700 #define CGXX_SPUX_LNX_FEC_CORR_BLOCKS 0x10700
#define CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS 0x10800 #define CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS 0x10800
......
...@@ -128,7 +128,10 @@ struct cgx { ...@@ -128,7 +128,10 @@ struct cgx {
struct pci_dev *pdev; struct pci_dev *pdev;
u8 cgx_id; u8 cgx_id;
u8 lmac_count; u8 lmac_count;
struct lmac *lmac_idmap[MAX_LMAC_PER_CGX]; /* number of LMACs per MAC could be 4 or 8 */
u8 max_lmac_per_mac;
#define MAX_LMAC_COUNT 8
struct lmac *lmac_idmap[MAX_LMAC_COUNT];
struct work_struct cgx_cmd_work; struct work_struct cgx_cmd_work;
struct workqueue_struct *cgx_cmd_workq; struct workqueue_struct *cgx_cmd_workq;
struct list_head cgx_list; struct list_head cgx_list;
......
...@@ -478,7 +478,7 @@ struct rvu { ...@@ -478,7 +478,7 @@ struct rvu {
u8 cgx_mapped_pfs; u8 cgx_mapped_pfs;
u8 cgx_cnt_max; /* CGX port count max */ u8 cgx_cnt_max; /* CGX port count max */
u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */ u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */
u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for u64 *cgxlmac2pf_map; /* bitmap of mapped pfs for
* every cgx lmac port * every cgx lmac port
*/ */
unsigned long pf_notify_bmap; /* Flags for PF notification */ unsigned long pf_notify_bmap; /* Flags for PF notification */
......
...@@ -55,8 +55,9 @@ bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature) ...@@ -55,8 +55,9 @@ bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
return (cgx_features_get(cgxd) & feature); return (cgx_features_get(cgxd) & feature);
} }
#define CGX_OFFSET(x) ((x) * rvu->hw->lmac_per_cgx)
/* Returns bitmap of mapped PFs */ /* Returns bitmap of mapped PFs */
static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) static u64 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
{ {
return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id]; return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
} }
...@@ -71,7 +72,8 @@ int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) ...@@ -71,7 +72,8 @@ int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
if (!pfmap) if (!pfmap)
return -ENODEV; return -ENODEV;
else else
return find_first_bit(&pfmap, 16); return find_first_bit(&pfmap,
rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
} }
static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id) static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
...@@ -129,14 +131,14 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) ...@@ -129,14 +131,14 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
if (!cgx_cnt_max) if (!cgx_cnt_max)
return 0; return 0;
if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF) if (cgx_cnt_max > 0xF || rvu->hw->lmac_per_cgx > 0xF)
return -EINVAL; return -EINVAL;
/* Alloc map table /* Alloc map table
* An additional entry is required since PF id starts from 1 and * An additional entry is required since PF id starts from 1 and
* hence entry at offset 0 is invalid. * hence entry at offset 0 is invalid.
*/ */
size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8); size = (cgx_cnt_max * rvu->hw->lmac_per_cgx + 1) * sizeof(u8);
rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL); rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
if (!rvu->pf2cgxlmac_map) if (!rvu->pf2cgxlmac_map)
return -ENOMEM; return -ENOMEM;
...@@ -145,9 +147,10 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) ...@@ -145,9 +147,10 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
memset(rvu->pf2cgxlmac_map, 0xFF, size); memset(rvu->pf2cgxlmac_map, 0xFF, size);
/* Reverse map table */ /* Reverse map table */
rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev, rvu->cgxlmac2pf_map =
cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16), devm_kzalloc(rvu->dev,
GFP_KERNEL); cgx_cnt_max * rvu->hw->lmac_per_cgx * sizeof(u64),
GFP_KERNEL);
if (!rvu->cgxlmac2pf_map) if (!rvu->cgxlmac2pf_map)
return -ENOMEM; return -ENOMEM;
...@@ -156,7 +159,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) ...@@ -156,7 +159,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
if (!rvu_cgx_pdata(cgx, rvu)) if (!rvu_cgx_pdata(cgx, rvu))
continue; continue;
lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) { for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu), lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
iter); iter);
rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
...@@ -235,7 +238,8 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) ...@@ -235,7 +238,8 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id); pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
do { do {
pfid = find_first_bit(&pfmap, 16); pfid = find_first_bit(&pfmap,
rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx);
clear_bit(pfid, &pfmap); clear_bit(pfid, &pfmap);
/* check if notification is enabled */ /* check if notification is enabled */
...@@ -310,7 +314,7 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu) ...@@ -310,7 +314,7 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu)
if (!cgxd) if (!cgxd)
continue; continue;
lmac_bmap = cgx_get_lmac_bmap(cgxd); lmac_bmap = cgx_get_lmac_bmap(cgxd);
for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) { for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx) {
err = cgx_lmac_evh_register(&cb, cgxd, lmac); err = cgx_lmac_evh_register(&cb, cgxd, lmac);
if (err) if (err)
dev_err(rvu->dev, dev_err(rvu->dev,
...@@ -396,7 +400,7 @@ int rvu_cgx_exit(struct rvu *rvu) ...@@ -396,7 +400,7 @@ int rvu_cgx_exit(struct rvu *rvu)
if (!cgxd) if (!cgxd)
continue; continue;
lmac_bmap = cgx_get_lmac_bmap(cgxd); lmac_bmap = cgx_get_lmac_bmap(cgxd);
for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) for_each_set_bit(lmac, &lmac_bmap, rvu->hw->lmac_per_cgx)
cgx_lmac_evh_unregister(cgxd, lmac); cgx_lmac_evh_unregister(cgxd, lmac);
} }
......
...@@ -2613,7 +2613,7 @@ static void rvu_dbg_cgx_init(struct rvu *rvu) ...@@ -2613,7 +2613,7 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
rvu->rvu_dbg.cgx = debugfs_create_dir(dname, rvu->rvu_dbg.cgx = debugfs_create_dir(dname,
rvu->rvu_dbg.cgx_root); rvu->rvu_dbg.cgx_root);
for_each_set_bit(lmac_id, &lmac_bmap, MAX_LMAC_PER_CGX) { for_each_set_bit(lmac_id, &lmac_bmap, rvu->hw->lmac_per_cgx) {
/* lmac debugfs dir */ /* lmac debugfs dir */
sprintf(dname, "lmac%d", lmac_id); sprintf(dname, "lmac%d", lmac_id);
rvu->rvu_dbg.lmac = rvu->rvu_dbg.lmac =
......
...@@ -4109,7 +4109,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr, ...@@ -4109,7 +4109,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr,
/* Get LMAC id's from bitmap */ /* Get LMAC id's from bitmap */
lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu)); lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) { for_each_set_bit(iter, &lmac_bmap, rvu->hw->lmac_per_cgx) {
lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter); lmac_fifo_len = rvu_cgx_get_lmac_fifolen(rvu, cgx, iter);
if (!lmac_fifo_len) { if (!lmac_fifo_len) {
dev_err(rvu->dev, dev_err(rvu->dev,
......
...@@ -1956,7 +1956,9 @@ int rvu_npc_exact_init(struct rvu *rvu) ...@@ -1956,7 +1956,9 @@ int rvu_npc_exact_init(struct rvu *rvu)
/* Install SDP drop rule */ /* Install SDP drop rule */
drop_mcam_idx = &table->num_drop_rules; drop_mcam_idx = &table->num_drop_rules;
max_lmac_cnt = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX + PF_CGXMAP_BASE; max_lmac_cnt = rvu->cgx_cnt_max * rvu->hw->lmac_per_cgx +
PF_CGXMAP_BASE;
for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) { for (i = PF_CGXMAP_BASE; i < max_lmac_cnt; i++) {
if (rvu->pf2cgxlmac_map[i] == 0xFF) if (rvu->pf2cgxlmac_map[i] == 0xFF)
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment