Commit 75fd6079 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'eth-bnxt-use-the-new-rss-api'

Jakub Kicinski says:

====================
eth: bnxt: use the new RSS API

Convert bnxt from using the set_rxfh API to separate create/modify/remove
callbacks.

Two small extensions to the core APIs are necessary:
 - the ability to discard contexts if for some catastrophic reasons
   device can no longer provide them;
 - the ability to reserve space in the context for RSS table growth.

The driver is adjusted to store indirection tables on u32 to make
it easier to use core structs directly.

With that out of the way the conversion is fairly straightforward.

Since the opposition to discarding contexts was relatively mild
and its what bnxt does already, I'm sticking to that. We may very
well need to revisit that at a later time.

v1: https://lore.kernel.org/all/20240702234757.4188344-1-kuba@kernel.org/
====================

Link: https://patch.msgid.link/20240711220713.283778-1-kuba@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 338a93cf 46e457a4
......@@ -5970,17 +5970,20 @@ bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
struct hwrm_cfa_ntuple_filter_alloc_input *req,
struct bnxt_ntuple_filter *fltr)
{
struct bnxt_rss_ctx *rss_ctx, *tmp;
u16 rxq = fltr->base.rxq;
if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) {
if (rss_ctx->index == fltr->base.fw_vnic_id) {
struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
struct ethtool_rxfh_context *ctx;
struct bnxt_rss_ctx *rss_ctx;
struct bnxt_vnic_info *vnic;
ctx = xa_load(&bp->dev->ethtool->rss_ctx,
fltr->base.fw_vnic_id);
if (ctx) {
rss_ctx = ethtool_rxfh_context_priv(ctx);
vnic = &rss_ctx->vnic;
req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
break;
}
}
return;
}
......@@ -6219,10 +6222,9 @@ static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
}
int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx)
static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
{
int entries;
u16 *tbl;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
......@@ -6230,22 +6232,19 @@ int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx)
entries = HW_HASH_INDEX_SIZE;
bp->rss_indir_tbl_entries = entries;
tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
if (!tbl)
bp->rss_indir_tbl =
kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
if (!bp->rss_indir_tbl)
return -ENOMEM;
if (rss_ctx)
rss_ctx->rss_indir_tbl = tbl;
else
bp->rss_indir_tbl = tbl;
return 0;
}
void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx)
void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
struct ethtool_rxfh_context *rss_ctx)
{
u16 max_rings, max_entries, pad, i;
u16 *rss_indir_tbl;
u32 *rss_indir_tbl;
if (!bp->rx_nr_rings)
return;
......@@ -6257,7 +6256,7 @@ void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx)
max_entries = bnxt_get_rxfh_indir_size(bp->dev);
if (rss_ctx)
rss_indir_tbl = &rss_ctx->rss_indir_tbl[0];
rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx);
else
rss_indir_tbl = &bp->rss_indir_tbl[0];
......@@ -6266,12 +6265,12 @@ void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx)
pad = bp->rss_indir_tbl_entries - max_entries;
if (pad)
memset(&rss_indir_tbl[i], 0, pad * sizeof(u16));
memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
}
static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
{
u16 i, tbl_size, max_ring = 0;
u32 i, tbl_size, max_ring = 0;
if (!bp->rss_indir_tbl)
return 0;
......@@ -6282,21 +6281,6 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
return max_ring;
}
u16 bnxt_get_max_rss_ctx_ring(struct bnxt *bp)
{
u16 i, tbl_size, max_ring = 0;
struct bnxt_rss_ctx *rss_ctx;
tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
list_for_each_entry(rss_ctx, &bp->rss_ctx_list, list) {
for (i = 0; i < tbl_size; i++)
max_ring = max(max_ring, rss_ctx->rss_indir_tbl[i]);
}
return max_ring;
}
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
{
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
......@@ -6338,7 +6322,7 @@ static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
j = vnic->rss_ctx->rss_indir_tbl[i];
j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
else
j = bp->rss_indir_tbl[i];
rxr = &bp->rx_ring[j];
......@@ -10210,11 +10194,13 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
struct bnxt_ntuple_filter *ntp_fltr;
int i;
if (netif_running(bp->dev)) {
bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
}
}
if (!all)
return;
......@@ -10234,19 +10220,17 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
vnic->rss_table,
vnic->rss_table_dma_addr);
kfree(rss_ctx->rss_indir_tbl);
list_del(&rss_ctx->list);
bp->num_rss_ctx--;
clear_bit(rss_ctx->index, bp->rss_ctx_bmap);
kfree(rss_ctx);
}
static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
{
bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
struct bnxt_rss_ctx *rss_ctx, *tmp;
struct ethtool_rxfh_context *ctx;
unsigned long context;
list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) {
xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
......@@ -10255,42 +10239,20 @@ static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
rss_ctx->index);
bnxt_del_one_rss_ctx(bp, rss_ctx, true);
ethtool_rxfh_context_lost(bp->dev, rss_ctx->index);
}
}
}
struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp)
{
struct bnxt_rss_ctx *rss_ctx = NULL;
rss_ctx = kzalloc(sizeof(*rss_ctx), GFP_KERNEL);
if (rss_ctx) {
rss_ctx->vnic.rss_ctx = rss_ctx;
list_add_tail(&rss_ctx->list, &bp->rss_ctx_list);
bp->num_rss_ctx++;
}
return rss_ctx;
}
void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all)
void bnxt_clear_rss_ctxs(struct bnxt *bp)
{
struct bnxt_rss_ctx *rss_ctx, *tmp;
list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list)
bnxt_del_one_rss_ctx(bp, rss_ctx, all);
struct ethtool_rxfh_context *ctx;
unsigned long context;
if (all)
bitmap_free(bp->rss_ctx_bmap);
}
xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
static void bnxt_init_multi_rss_ctx(struct bnxt *bp)
{
bp->rss_ctx_bmap = bitmap_zalloc(BNXT_RSS_CTX_BMAP_LEN, GFP_KERNEL);
if (bp->rss_ctx_bmap) {
/* burn index 0 since we cannot have context 0 */
__set_bit(0, bp->rss_ctx_bmap);
INIT_LIST_HEAD(&bp->rss_ctx_list);
bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
bnxt_del_one_rss_ctx(bp, rss_ctx, false);
}
}
......@@ -12337,7 +12299,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
msleep(20);
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
bnxt_clear_rss_ctxs(bp, false);
bnxt_clear_rss_ctxs(bp);
/* Flush rings and disable interrupts */
bnxt_shutdown_nic(bp, irq_re_init);
......@@ -15252,8 +15214,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
bnxt_free_l2_filters(bp, true);
bnxt_free_ntp_fltrs(bp, true);
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
bnxt_clear_rss_ctxs(bp, true);
WARN_ON(bp->num_rss_ctx);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
/* Flush any pending tasks */
cancel_work_sync(&bp->sp_task);
......@@ -15723,7 +15684,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bp->flags |= BNXT_FLAG_CHIP_P7;
}
rc = bnxt_alloc_rss_indir_tbl(bp, NULL);
rc = bnxt_alloc_rss_indir_tbl(bp);
if (rc)
goto init_err_pci_clean;
......@@ -15880,8 +15841,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_LIST_HEAD(&bp->usr_fltr_list);
if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
bnxt_init_multi_rss_ctx(bp);
bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
rc = register_netdev(dev);
if (rc)
......@@ -15904,8 +15864,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_clear_int_mode(bp);
init_err_pci_clean:
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
bnxt_clear_rss_ctxs(bp, true);
bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp);
bnxt_hwmon_uninit(bp);
......
......@@ -1286,19 +1286,16 @@ struct bnxt_vnic_info {
#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10
#define BNXT_VNIC_NTUPLE_FLAG 0x20
#define BNXT_VNIC_RSSCTX_FLAG 0x40
struct bnxt_rss_ctx *rss_ctx;
struct ethtool_rxfh_context *rss_ctx;
u32 vnic_id;
};
struct bnxt_rss_ctx {
struct list_head list;
struct bnxt_vnic_info vnic;
u16 *rss_indir_tbl;
u8 index;
};
#define BNXT_MAX_ETH_RSS_CTX 32
#define BNXT_RSS_CTX_BMAP_LEN (BNXT_MAX_ETH_RSS_CTX + 1)
#define BNXT_VNIC_ID_INVALID 0xffffffff
struct bnxt_hw_rings {
......@@ -2331,11 +2328,9 @@ struct bnxt {
/* grp_info indexed by completion ring index */
struct bnxt_ring_grp_info *grp_info;
struct bnxt_vnic_info *vnic_info;
struct list_head rss_ctx_list;
unsigned long *rss_ctx_bmap;
u32 num_rss_ctx;
int nr_vnics;
u16 *rss_indir_tbl;
u32 *rss_indir_tbl;
u16 rss_indir_tbl_entries;
u32 rss_hash_cfg;
u32 rss_hash_delta;
......@@ -2812,9 +2807,8 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
u32 tpa_flags);
void bnxt_fill_ipv6_mask(__be32 mask[4]);
int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx);
void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx);
u16 bnxt_get_max_rss_ctx_ring(struct bnxt *bp);
void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
struct ethtool_rxfh_context *rss_ctx);
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
......@@ -2848,8 +2842,7 @@ int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
bool all);
struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp);
void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all);
void bnxt_clear_rss_ctxs(struct bnxt *bp);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
......
......@@ -961,12 +961,6 @@ static int bnxt_set_channels(struct net_device *dev,
return rc;
}
if (req_rx_rings < bp->rx_nr_rings &&
req_rx_rings <= bnxt_get_max_rss_ctx_ring(bp)) {
netdev_warn(dev, "Can't deactivate rings used by RSS contexts\n");
return -EINVAL;
}
if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
netif_is_rxfh_configured(dev)) {
......@@ -976,7 +970,7 @@ static int bnxt_set_channels(struct net_device *dev,
bnxt_clear_usr_fltrs(bp, true);
if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
bnxt_clear_rss_ctxs(bp, false);
bnxt_clear_rss_ctxs(bp);
if (netif_running(dev)) {
if (BNXT_PF(bp)) {
/* TODO CHIMP_FW: Send message to all VF's
......@@ -1216,19 +1210,18 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp,
u32 index)
{
struct bnxt_rss_ctx *rss_ctx, *tmp;
struct ethtool_rxfh_context *ctx;
list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list)
if (rss_ctx->index == index)
return rss_ctx;
ctx = xa_load(&bp->dev->ethtool->rss_ctx, index);
if (!ctx)
return NULL;
return ethtool_rxfh_context_priv(ctx);
}
static int bnxt_alloc_rss_ctx_rss_table(struct bnxt *bp,
struct bnxt_rss_ctx *rss_ctx)
static int bnxt_alloc_vnic_rss_table(struct bnxt *bp,
struct bnxt_vnic_info *vnic)
{
int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev,
......@@ -1807,10 +1800,9 @@ static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
static int bnxt_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
u32 rss_context = rxfh->rss_context;
struct bnxt_rss_ctx *rss_ctx = NULL;
struct bnxt *bp = netdev_priv(dev);
u16 *indir_tbl = bp->rss_indir_tbl;
u32 *indir_tbl = bp->rss_indir_tbl;
struct bnxt_vnic_info *vnic;
u32 i, tbl_size;
......@@ -1821,10 +1813,13 @@ static int bnxt_get_rxfh(struct net_device *dev,
vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
if (rxfh->rss_context) {
rss_ctx = bnxt_get_rss_ctx_from_index(bp, rss_context);
if (!rss_ctx)
struct ethtool_rxfh_context *ctx;
ctx = xa_load(&bp->dev->ethtool->rss_ctx, rxfh->rss_context);
if (!ctx)
return -EINVAL;
indir_tbl = rss_ctx->rss_indir_tbl;
indir_tbl = ethtool_rxfh_context_indir(ctx);
rss_ctx = ethtool_rxfh_context_priv(ctx);
vnic = &rss_ctx->vnic;
}
......@@ -1840,8 +1835,9 @@ static int bnxt_get_rxfh(struct net_device *dev,
return 0;
}
static void bnxt_modify_rss(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
struct ethtool_rxfh_param *rxfh)
static void bnxt_modify_rss(struct bnxt *bp, struct ethtool_rxfh_context *ctx,
struct bnxt_rss_ctx *rss_ctx,
const struct ethtool_rxfh_param *rxfh)
{
if (rxfh->key) {
if (rss_ctx) {
......@@ -1854,29 +1850,21 @@ static void bnxt_modify_rss(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
}
if (rxfh->indir) {
u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
u16 *indir_tbl = bp->rss_indir_tbl;
u32 *indir_tbl = bp->rss_indir_tbl;
if (rss_ctx)
indir_tbl = rss_ctx->rss_indir_tbl;
indir_tbl = ethtool_rxfh_context_indir(ctx);
for (i = 0; i < tbl_size; i++)
indir_tbl[i] = rxfh->indir[i];
pad = bp->rss_indir_tbl_entries - tbl_size;
if (pad)
memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
memset(&indir_tbl[i], 0, pad * sizeof(*indir_tbl));
}
}
static int bnxt_set_rxfh_context(struct bnxt *bp,
struct ethtool_rxfh_param *rxfh,
static int bnxt_rxfh_context_check(struct bnxt *bp,
struct netlink_ext_ack *extack)
{
u32 *rss_context = &rxfh->rss_context;
struct bnxt_rss_ctx *rss_ctx;
struct bnxt_vnic_info *vnic;
bool modify = false;
int bit_id;
int rc;
if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) {
NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported");
return -EOPNOTSUPP;
......@@ -1887,21 +1875,22 @@ static int bnxt_set_rxfh_context(struct bnxt *bp,
return -EAGAIN;
}
if (*rss_context != ETH_RXFH_CONTEXT_ALLOC) {
rss_ctx = bnxt_get_rss_ctx_from_index(bp, *rss_context);
if (!rss_ctx) {
NL_SET_ERR_MSG_FMT_MOD(extack, "RSS context %u not found",
*rss_context);
return -EINVAL;
}
if (*rss_context && rxfh->rss_delete) {
bnxt_del_one_rss_ctx(bp, rss_ctx, true);
return 0;
}
modify = true;
vnic = &rss_ctx->vnic;
goto modify_context;
}
}
static int bnxt_create_rxfh_context(struct net_device *dev,
struct ethtool_rxfh_context *ctx,
const struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_rss_ctx *rss_ctx;
struct bnxt_vnic_info *vnic;
int rc;
rc = bnxt_rxfh_context_check(bp, extack);
if (rc)
return rc;
if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) {
NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u",
......@@ -1914,22 +1903,19 @@ static int bnxt_set_rxfh_context(struct bnxt *bp,
return -ENOMEM;
}
rss_ctx = bnxt_alloc_rss_ctx(bp);
if (!rss_ctx)
return -ENOMEM;
rss_ctx = ethtool_rxfh_context_priv(ctx);
bp->num_rss_ctx++;
vnic = &rss_ctx->vnic;
vnic->rss_ctx = ctx;
vnic->flags |= BNXT_VNIC_RSSCTX_FLAG;
vnic->vnic_id = BNXT_VNIC_ID_INVALID;
rc = bnxt_alloc_rss_ctx_rss_table(bp, rss_ctx);
if (rc)
goto out;
rc = bnxt_alloc_rss_indir_tbl(bp, rss_ctx);
rc = bnxt_alloc_vnic_rss_table(bp, vnic);
if (rc)
goto out;
bnxt_set_dflt_rss_indir_tbl(bp, rss_ctx);
bnxt_set_dflt_rss_indir_tbl(bp, ctx);
memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE);
rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings);
......@@ -1943,11 +1929,7 @@ static int bnxt_set_rxfh_context(struct bnxt *bp,
NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
goto out;
}
modify_context:
bnxt_modify_rss(bp, rss_ctx, rxfh);
if (modify)
return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
bnxt_modify_rss(bp, ctx, rss_ctx, rxfh);
rc = __bnxt_setup_vnic_p5(bp, vnic);
if (rc) {
......@@ -1955,21 +1937,47 @@ static int bnxt_set_rxfh_context(struct bnxt *bp,
goto out;
}
bit_id = bitmap_find_free_region(bp->rss_ctx_bmap,
BNXT_RSS_CTX_BMAP_LEN, 0);
if (bit_id < 0) {
rc = -ENOMEM;
goto out;
}
rss_ctx->index = (u16)bit_id;
*rss_context = rss_ctx->index;
rss_ctx->index = rxfh->rss_context;
return 0;
out:
bnxt_del_one_rss_ctx(bp, rss_ctx, true);
return rc;
}
static int bnxt_modify_rxfh_context(struct net_device *dev,
struct ethtool_rxfh_context *ctx,
const struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_rss_ctx *rss_ctx;
int rc;
rc = bnxt_rxfh_context_check(bp, extack);
if (rc)
return rc;
rss_ctx = ethtool_rxfh_context_priv(ctx);
bnxt_modify_rss(bp, ctx, rss_ctx, rxfh);
return bnxt_hwrm_vnic_rss_cfg_p5(bp, &rss_ctx->vnic);
}
static int bnxt_remove_rxfh_context(struct net_device *dev,
struct ethtool_rxfh_context *ctx,
u32 rss_context,
struct netlink_ext_ack *extack)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_rss_ctx *rss_ctx;
rss_ctx = ethtool_rxfh_context_priv(ctx);
bnxt_del_one_rss_ctx(bp, rss_ctx, true);
return 0;
}
static int bnxt_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
......@@ -1980,10 +1988,7 @@ static int bnxt_set_rxfh(struct net_device *dev,
if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (rxfh->rss_context)
return bnxt_set_rxfh_context(bp, rxfh, extack);
bnxt_modify_rss(bp, NULL, rxfh);
bnxt_modify_rss(bp, NULL, NULL, rxfh);
bnxt_clear_usr_fltrs(bp, false);
if (netif_running(bp->dev)) {
......@@ -5275,6 +5280,9 @@ void bnxt_ethtool_free(struct bnxt *bp)
const struct ethtool_ops bnxt_ethtool_ops = {
.cap_link_lanes_supported = 1,
.cap_rss_ctx_supported = 1,
.rxfh_max_context_id = BNXT_MAX_ETH_RSS_CTX,
.rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5,
.rxfh_priv_size = sizeof(struct bnxt_rss_ctx),
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USECS_IRQ |
......@@ -5312,6 +5320,9 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_rxfh_key_size = bnxt_get_rxfh_key_size,
.get_rxfh = bnxt_get_rxfh,
.set_rxfh = bnxt_set_rxfh,
.create_rxfh_context = bnxt_create_rxfh_context,
.modify_rxfh_context = bnxt_modify_rxfh_context,
.remove_rxfh_context = bnxt_remove_rxfh_context,
.flash_device = bnxt_flash_device,
.get_eeprom_len = bnxt_get_eeprom_len,
.get_eeprom = bnxt_get_eeprom,
......
......@@ -181,6 +181,7 @@ struct ethtool_rxfh_context {
/* private: driver private data, indirection table, and hash key are
* stored sequentially in @data area. Use below helpers to access.
*/
u32 key_off;
u8 data[] __aligned(sizeof(void *));
};
......@@ -196,19 +197,10 @@ static inline u32 *ethtool_rxfh_context_indir(struct ethtool_rxfh_context *ctx)
static inline u8 *ethtool_rxfh_context_key(struct ethtool_rxfh_context *ctx)
{
return (u8 *)(ethtool_rxfh_context_indir(ctx) + ctx->indir_size);
return &ctx->data[ctx->key_off];
}
static inline size_t ethtool_rxfh_context_size(u32 indir_size, u32 key_size,
u16 priv_size)
{
size_t indir_bytes = array_size(indir_size, sizeof(u32));
size_t flex_len;
flex_len = size_add(size_add(indir_bytes, key_size),
ALIGN(priv_size, sizeof(u32)));
return struct_size_t(struct ethtool_rxfh_context, data, flex_len);
}
void ethtool_rxfh_context_lost(struct net_device *dev, u32 context_id);
/* declare a link mode bitmap */
#define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \
......@@ -721,6 +713,10 @@ struct ethtool_rxfh_param {
* contexts.
* @cap_rss_sym_xor_supported: indicates if the driver supports symmetric-xor
* RSS.
* @rxfh_indir_space: max size of RSS indirection tables, if indirection table
* size as returned by @get_rxfh_indir_size may change during lifetime
* of the device. Leave as 0 if the table size is constant.
* @rxfh_key_space: same as @rxfh_indir_space, but for the key.
* @rxfh_priv_size: size of the driver private data area the core should
* allocate for an RSS context (in &struct ethtool_rxfh_context).
* @rxfh_max_context_id: maximum (exclusive) supported RSS context ID. If this
......@@ -938,6 +934,8 @@ struct ethtool_ops {
u32 cap_link_lanes_supported:1;
u32 cap_rss_ctx_supported:1;
u32 cap_rss_sym_xor_supported:1;
u32 rxfh_indir_space;
u16 rxfh_key_space;
u16 rxfh_priv_size;
u32 rxfh_max_context_id;
u32 supported_coalesce_params;
......
......@@ -741,3 +741,17 @@ ethtool_forced_speed_maps_init(struct ethtool_forced_speed_map *maps, u32 size)
}
}
EXPORT_SYMBOL_GPL(ethtool_forced_speed_maps_init);
void ethtool_rxfh_context_lost(struct net_device *dev, u32 context_id)
{
struct ethtool_rxfh_context *ctx;
WARN_ONCE(!rtnl_is_locked() &&
!lockdep_is_held_type(&dev->ethtool->rss_lock, -1),
"RSS context lock assertion failed\n");
netdev_err(dev, "device error, RSS context %d lost\n", context_id);
ctx = xa_erase(&dev->ethtool->rss_ctx, context_id);
kfree(ctx);
}
EXPORT_SYMBOL(ethtool_rxfh_context_lost);
......@@ -1290,6 +1290,40 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev,
return ret;
}
static struct ethtool_rxfh_context *
ethtool_rxfh_ctx_alloc(const struct ethtool_ops *ops,
u32 indir_size, u32 key_size)
{
size_t indir_bytes, flex_len, key_off, size;
struct ethtool_rxfh_context *ctx;
u32 priv_bytes, indir_max;
u16 key_max;
key_max = max(key_size, ops->rxfh_key_space);
indir_max = max(indir_size, ops->rxfh_indir_space);
priv_bytes = ALIGN(ops->rxfh_priv_size, sizeof(u32));
indir_bytes = array_size(indir_max, sizeof(u32));
key_off = size_add(priv_bytes, indir_bytes);
flex_len = size_add(key_off, key_max);
size = struct_size_t(struct ethtool_rxfh_context, data, flex_len);
ctx = kzalloc(size, GFP_KERNEL_ACCOUNT);
if (!ctx)
return NULL;
ctx->indir_size = indir_size;
ctx->key_size = key_size;
ctx->key_off = key_off;
ctx->priv_size = ops->rxfh_priv_size;
ctx->hfunc = ETH_RSS_HASH_NO_CHANGE;
ctx->input_xfrm = RXH_XFRM_NO_CHANGE;
return ctx;
}
static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
void __user *useraddr)
{
......@@ -1406,20 +1440,12 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev,
ret = -EINVAL;
goto out;
}
ctx = kzalloc(ethtool_rxfh_context_size(dev_indir_size,
dev_key_size,
ops->rxfh_priv_size),
GFP_KERNEL_ACCOUNT);
ctx = ethtool_rxfh_ctx_alloc(ops, dev_indir_size, dev_key_size);
if (!ctx) {
ret = -ENOMEM;
goto out;
}
ctx->indir_size = dev_indir_size;
ctx->key_size = dev_key_size;
ctx->priv_size = ops->rxfh_priv_size;
/* Initialise to an empty context */
ctx->hfunc = ETH_RSS_HASH_NO_CHANGE;
ctx->input_xfrm = RXH_XFRM_NO_CHANGE;
if (ops->create_rxfh_context) {
u32 limit = ops->rxfh_max_context_id ?: U32_MAX;
u32 ctx_id;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment