Commit 46115b27 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-updates-2022-11-29' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2022-11-29

Misc update for mlx5 driver

1) Various trivial cleanups

2) Maor Dickman, Adds support for trap offload with additional actions

3) From Tariq, UMR (device memory registrations) cleanups,
   UMR WQE must be aligned to 64B per device spec, (not a bug fix).

* tag 'mlx5-updates-2022-11-29' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: Support devlink reload of IPsec core
  net/mlx5e: TC, Add offload support for trap with additional actions
  net/mlx5e: Do early return when setup vports dests for slow path flow
  net/mlx5: Remove redundant check
  net/mlx5e: Delete always true DMA check
  net/mlx5e: Don't access directly DMA device pointer
  net/mlx5e: Don't use termination table when redundant
  net/mlx5: Fix orthography errors in documentation
  net/mlx5: Use generic definition for UMR KLM alignment
  net/mlx5: Generalize name of UMR alignment definition
  net/mlx5: Remove unused UMR MTT definitions
  net/mlx5e: Add padding when needed in UMR WQEs
  net/mlx5: Remove unused ctx variables
  net/mlx5e: Replace zero-length arrays with DECLARE_FLEX_ARRAY() helper
  net/mlx5e: Remove unneeded io-mapping.h #include
====================

Link: https://lore.kernel.org/r/20221130051152.479480-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents bc66fa87 953d7715
......@@ -230,8 +230,7 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
struct ib_umem_odp *umem_odp =
container_of(mni, struct ib_umem_odp, notifier);
struct mlx5_ib_mr *mr;
const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
sizeof(struct mlx5_mtt)) - 1;
const u64 umr_block_mask = MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1;
u64 idx = 0, blk_start_idx = 0;
u64 invalidations = 0;
unsigned long start;
......
......@@ -418,7 +418,7 @@ int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
}
#define MLX5_MAX_UMR_CHUNK \
((1 << (MLX5_MAX_UMR_SHIFT + 4)) - MLX5_UMR_MTT_ALIGNMENT)
((1 << (MLX5_MAX_UMR_SHIFT + 4)) - MLX5_UMR_FLEX_ALIGNMENT)
#define MLX5_SPARE_UMR_CHUNK 0x10000
/*
......@@ -428,11 +428,11 @@ int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
*/
static void *mlx5r_umr_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
{
const size_t xlt_chunk_align = MLX5_UMR_MTT_ALIGNMENT / ent_size;
const size_t xlt_chunk_align = MLX5_UMR_FLEX_ALIGNMENT / ent_size;
size_t size;
void *res = NULL;
static_assert(PAGE_SIZE % MLX5_UMR_MTT_ALIGNMENT == 0);
static_assert(PAGE_SIZE % MLX5_UMR_FLEX_ALIGNMENT == 0);
/*
* MLX5_IB_UPD_XLT_ATOMIC doesn't signal an atomic context just that the
......@@ -666,7 +666,7 @@ int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
}
final_size = (void *)cur_mtt - (void *)mtt;
sg.length = ALIGN(final_size, MLX5_UMR_MTT_ALIGNMENT);
sg.length = ALIGN(final_size, MLX5_UMR_FLEX_ALIGNMENT);
memset(cur_mtt, 0, sg.length - final_size);
mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags);
......@@ -690,7 +690,7 @@ int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
? sizeof(struct mlx5_klm)
: sizeof(struct mlx5_mtt);
const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size;
const int page_align = MLX5_UMR_FLEX_ALIGNMENT / desc_size;
struct mlx5_ib_dev *dev = mr_to_mdev(mr);
struct device *ddev = &dev->mdev->pdev->dev;
const int page_mask = page_align - 1;
......@@ -711,7 +711,7 @@ int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
if (WARN_ON(!mr->umem->is_odp))
return -EINVAL;
/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
/* UMR copies MTTs in units of MLX5_UMR_FLEX_ALIGNMENT bytes,
* so we need to align the offset and length accordingly
*/
if (idx & page_mask) {
......@@ -748,7 +748,7 @@ int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
dma_sync_single_for_device(ddev, sg.addr, sg.length,
DMA_TO_DEVICE);
sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT);
sg.length = ALIGN(size_to_map, MLX5_UMR_FLEX_ALIGNMENT);
if (pages_mapped + pages_iter >= pages_to_map)
mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags);
......
......@@ -37,7 +37,6 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/random.h>
#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eq.h>
#include <linux/debugfs.h>
......
......@@ -103,11 +103,11 @@ struct page_pool;
* size actually used at runtime, but it's not a problem when calculating static
* array sizes.
*/
#define MLX5_UMR_MAX_MTT_SPACE \
#define MLX5_UMR_MAX_FLEX_SPACE \
(ALIGN_DOWN(MLX5_SEND_WQE_MAX_SIZE - sizeof(struct mlx5e_umr_wqe), \
MLX5_UMR_MTT_ALIGNMENT))
MLX5_UMR_FLEX_ALIGNMENT))
#define MLX5_MPWRQ_MAX_PAGES_PER_WQE \
rounddown_pow_of_two(MLX5_UMR_MAX_MTT_SPACE / sizeof(struct mlx5_mtt))
rounddown_pow_of_two(MLX5_UMR_MAX_FLEX_SPACE / sizeof(struct mlx5_mtt))
#define MLX5E_MAX_RQ_NUM_MTTS \
(ALIGN_DOWN(U16_MAX, 4) * 2) /* Fits into u16 and aligned by WQEBB. */
......@@ -160,7 +160,7 @@ struct page_pool;
(((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm))
#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT)
#define MLX5E_MAX_KLM_PER_WQE(mdev) \
MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))
......
......@@ -107,7 +107,7 @@ u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
/* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */
max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe),
MLX5_UMR_MTT_ALIGNMENT) / umr_entry_size;
MLX5_UMR_FLEX_ALIGNMENT) / umr_entry_size;
max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift;
WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU);
......@@ -146,7 +146,7 @@ u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
u16 umr_wqe_sz;
umr_wqe_sz = sizeof(struct mlx5e_umr_wqe) +
ALIGN(pages_per_wqe * umr_entry_size, MLX5_UMR_MTT_ALIGNMENT);
ALIGN(pages_per_wqe * umr_entry_size, MLX5_UMR_FLEX_ALIGNMENT);
WARN_ON_ONCE(DIV_ROUND_UP(umr_wqe_sz, MLX5_SEND_WQE_DS) > MLX5_WQE_CTRL_DS_MASK);
......
......@@ -3,6 +3,7 @@
#include "act.h"
#include "en/tc_priv.h"
#include "eswitch.h"
static bool
tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state,
......@@ -10,13 +11,6 @@ tc_act_can_offload_trap(struct mlx5e_tc_act_parse_state *parse_state,
int act_index,
struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
if (parse_state->flow_action->num_entries != 1) {
NL_SET_ERR_MSG_MOD(extack, "action trap is supported as a sole action only");
return false;
}
return true;
}
......@@ -27,7 +21,7 @@ tc_act_parse_trap(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5_flow_attr *attr)
{
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
attr->dest_ft = mlx5_eswitch_get_slow_fdb(priv->mdev->priv.eswitch);
return 0;
}
......
......@@ -345,29 +345,27 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
kfree(sa_entry);
}
int mlx5e_ipsec_init(struct mlx5e_priv *priv)
void mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
struct mlx5e_ipsec *ipsec;
int ret;
int ret = -ENOMEM;
if (!mlx5_ipsec_device_caps(priv->mdev)) {
netdev_dbg(priv->netdev, "Not an IPSec offload device\n");
return 0;
return;
}
ipsec = kzalloc(sizeof(*ipsec), GFP_KERNEL);
if (!ipsec)
return -ENOMEM;
return;
hash_init(ipsec->sadb_rx);
spin_lock_init(&ipsec->sadb_rx_lock);
ipsec->mdev = priv->mdev;
ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
priv->netdev->name);
if (!ipsec->wq) {
ret = -ENOMEM;
if (!ipsec->wq)
goto err_wq;
}
ret = mlx5e_accel_ipsec_fs_init(ipsec);
if (ret)
......@@ -375,13 +373,14 @@ int mlx5e_ipsec_init(struct mlx5e_priv *priv)
priv->ipsec = ipsec;
netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
return 0;
return;
err_fs_init:
destroy_workqueue(ipsec->wq);
err_wq:
kfree(ipsec);
return (ret != -EOPNOTSUPP) ? ret : 0;
mlx5_core_err(priv->mdev, "IPSec initialization failed, %d\n", ret);
return;
}
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
......
......@@ -146,7 +146,7 @@ struct mlx5e_ipsec_sa_entry {
struct mlx5e_ipsec_modify_state_work modify_work;
};
int mlx5e_ipsec_init(struct mlx5e_priv *priv);
void mlx5e_ipsec_init(struct mlx5e_priv *priv);
void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
......@@ -174,9 +174,8 @@ mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
return sa_entry->ipsec->mdev;
}
#else
static inline int mlx5e_ipsec_init(struct mlx5e_priv *priv)
static inline void mlx5e_ipsec_init(struct mlx5e_priv *priv)
{
return 0;
}
static inline void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
......
......@@ -186,7 +186,7 @@ static int mlx5e_macsec_aso_reg_mr(struct mlx5_core_dev *mdev, struct mlx5e_macs
return err;
}
dma_device = &mdev->pdev->dev;
dma_device = mlx5_core_dma_dev(mdev);
dma_addr = dma_map_single(dma_device, umr->ctx, sizeof(umr->ctx), DMA_BIDIRECTIONAL);
err = dma_mapping_error(dma_device, dma_addr);
if (err) {
......@@ -1299,12 +1299,12 @@ static void macsec_aso_build_wqe_ctrl_seg(struct mlx5e_macsec_aso *macsec_aso,
struct mlx5_wqe_aso_ctrl_seg *aso_ctrl,
struct mlx5_aso_ctrl_param *param)
{
struct mlx5e_macsec_umr *umr = macsec_aso->umr;
memset(aso_ctrl, 0, sizeof(*aso_ctrl));
if (macsec_aso->umr->dma_addr) {
aso_ctrl->va_l = cpu_to_be32(macsec_aso->umr->dma_addr | ASO_CTRL_READ_EN);
aso_ctrl->va_h = cpu_to_be32((u64)macsec_aso->umr->dma_addr >> 32);
aso_ctrl->l_key = cpu_to_be32(macsec_aso->umr->mkey);
}
aso_ctrl->va_l = cpu_to_be32(umr->dma_addr | ASO_CTRL_READ_EN);
aso_ctrl->va_h = cpu_to_be32((u64)umr->dma_addr >> 32);
aso_ctrl->l_key = cpu_to_be32(umr->mkey);
if (!param)
return;
......
......@@ -208,7 +208,7 @@ static u16 mlx5e_mpwrq_umr_octowords(u32 entries, enum mlx5e_mpwrq_umr_mode umr_
u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
u32 sz;
sz = ALIGN(entries * umr_entry_size, MLX5_UMR_MTT_ALIGNMENT);
sz = ALIGN(entries * umr_entry_size, MLX5_UMR_FLEX_ALIGNMENT);
return sz / MLX5_OCTWORD;
}
......@@ -5238,10 +5238,6 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
}
priv->fs = fs;
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
err = mlx5e_ktls_init(priv);
if (err)
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
......@@ -5254,7 +5250,6 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
mlx5e_health_destroy_reporters(priv);
mlx5e_ktls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
mlx5e_fs_cleanup(priv->fs);
}
......@@ -5383,6 +5378,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
int err;
mlx5e_fs_init_l2_addr(priv->fs, netdev);
mlx5e_ipsec_init(priv);
err = mlx5e_macsec_init(priv);
if (err)
......@@ -5446,6 +5442,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5_lag_remove_netdev(mdev, priv->netdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
mlx5e_macsec_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
}
int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
......
......@@ -751,7 +751,6 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
priv->fs = mlx5e_fs_init(priv->profile, mdev,
!test_bit(MLX5E_STATE_DESTROYING, &priv->state));
......@@ -760,10 +759,6 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
return -ENOMEM;
}
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "Uplink rep IPsec initialization failed, %d\n", err);
mlx5e_vxlan_set_netdev_info(priv);
mlx5e_build_rep_params(netdev);
mlx5e_timestamp_init(priv);
......@@ -773,7 +768,6 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
{
mlx5e_fs_cleanup(priv->fs);
mlx5e_ipsec_cleanup(priv);
}
static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
......@@ -1112,6 +1106,8 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
u16 max_mtu;
mlx5e_ipsec_init(priv);
netdev->min_mtu = ETH_MIN_MTU;
mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
......@@ -1158,6 +1154,8 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
mlx5e_rep_tc_disable(priv);
mlx5_lag_remove_netdev(mdev, priv->netdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
mlx5e_ipsec_cleanup(priv);
}
static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
......
......@@ -593,8 +593,8 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
int headroom, i;
headroom = rq->buff.headroom;
new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
entries = ALIGN(klm_entries, MLX5_UMR_KLM_ALIGNMENT);
new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
entries = ALIGN(klm_entries, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries);
pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
......@@ -603,7 +603,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
for (i = 0; i < entries; i++, index++) {
dma_info = &shampo->info[index];
if (i >= klm_entries || (index < shampo->pi && shampo->pi - index <
MLX5_UMR_KLM_ALIGNMENT))
MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT))
goto update_klm;
header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
......@@ -668,8 +668,8 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
if (!klm_entries)
return 0;
klm_entries += (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_ALIGNMENT);
klm_entries += (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
entries_before = shampo->hd_per_wq - index;
if (unlikely(entries_before < klm_entries))
......@@ -727,6 +727,17 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
};
}
/* Pad if needed, in case the value set to ucseg->xlt_octowords
* in mlx5e_build_umr_wqe() needed alignment.
*/
if (rq->mpwqe.pages_per_wqe & (MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1)) {
int pad = ALIGN(rq->mpwqe.pages_per_wqe, MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT) -
rq->mpwqe.pages_per_wqe;
memset(&umr_wqe->inline_mtts[rq->mpwqe.pages_per_wqe], 0,
sizeof(*umr_wqe->inline_mtts) * pad);
}
bitmap_zero(wi->xdp_xmit_bitmap, rq->mpwqe.pages_per_wqe);
wi->consumed_strides = 0;
......
......@@ -97,8 +97,8 @@ struct mlx5_flow_attr {
} lag;
/* keep this union last */
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
DECLARE_FLEX_ARRAY(struct mlx5_esw_flow_attr, esw_attr);
DECLARE_FLEX_ARRAY(struct mlx5_nic_flow_attr, nic_attr);
};
};
......
......@@ -744,6 +744,11 @@ static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw)
return 0;
}
static inline struct mlx5_flow_table *
mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw)
{
return esw->fdb_table.offloads.slow_fdb;
}
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
......
......@@ -248,7 +248,7 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_ac
if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = esw->fdb_table.offloads.slow_fdb;
dest[i].ft = mlx5_eswitch_get_slow_fdb(esw);
}
static int
......@@ -479,13 +479,15 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
esw_src_port_rewrite_supported(esw))
attr->flags |= MLX5_ATTR_FLAG_SRC_REWRITE;
if (attr->flags & MLX5_ATTR_FLAG_SAMPLE &&
!(attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)) {
esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
(*i)++;
} else if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
esw_setup_slow_path_dest(dest, flow_act, esw, *i);
(*i)++;
goto out;
}
if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
(*i)++;
} else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
esw_setup_accept_dest(dest, flow_act, chains, *i);
(*i)++;
......@@ -506,6 +508,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
}
}
out:
return err;
}
......@@ -1046,7 +1049,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
if (rep->vport == MLX5_VPORT_UPLINK)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
flow_rule = mlx5_add_flow_rules(on_esw->fdb_table.offloads.slow_fdb,
flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
......@@ -1095,7 +1098,7 @@ mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num
mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
dest.vport.num = vport_num;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule))
esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n",
......@@ -1248,7 +1251,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
spec, MLX5_VPORT_PF);
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
......@@ -1260,7 +1263,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
if (mlx5_ecpf_vport_exists(esw->dev)) {
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
......@@ -1274,7 +1277,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
peer_dev->priv.eswitch,
spec, vport->vport);
flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow)) {
err = PTR_ERR(flow);
......@@ -1363,7 +1366,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dest.vport.num = esw->manager_vport;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
......@@ -1378,7 +1381,7 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
outer_headers.dmac_47_16);
dmac_v[0] = 0x01;
flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
spec, &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
......@@ -1927,7 +1930,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
fdb_chains_err:
mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
tc_miss_table_err:
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
slow_fdb_err:
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
......@@ -1938,7 +1941,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
if (!esw->fdb_table.offloads.slow_fdb)
if (!mlx5_eswitch_get_slow_fdb(esw))
return;
esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
......@@ -1954,7 +1957,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
esw_chains_destroy(esw, esw_chains(esw));
mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
MLX5_FLOW_STEERING_MODE_DMFS);
......
......@@ -210,6 +210,18 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
return (port_mask & port_value) == MLX5_VPORT_UPLINK;
}
static bool
mlx5_eswitch_is_push_vlan_no_cap(struct mlx5_eswitch *esw,
struct mlx5_flow_act *flow_act)
{
if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
!(mlx5_fs_get_capabilities(esw->dev, MLX5_FLOW_NAMESPACE_FDB) &
MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX))
return true;
return false;
}
bool
mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
......@@ -225,10 +237,7 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
(!mlx5_eswitch_offload_is_uplink_port(esw, spec) && !esw_attr->int_port))
return false;
/* push vlan on RX */
if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
!(mlx5_fs_get_capabilities(esw->dev, MLX5_FLOW_NAMESPACE_FDB) &
MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX))
if (mlx5_eswitch_is_push_vlan_no_cap(esw, flow_act))
return true;
/* hairpin */
......@@ -252,19 +261,31 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_act term_tbl_act = {};
struct mlx5_flow_handle *rule = NULL;
bool term_table_created = false;
bool is_push_vlan_on_rx;
int num_vport_dests = 0;
int i, curr_dest;
is_push_vlan_on_rx = mlx5_eswitch_is_push_vlan_no_cap(esw, flow_act);
mlx5_eswitch_termtbl_actions_move(flow_act, &term_tbl_act);
term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
for (i = 0; i < num_dest; i++) {
struct mlx5_termtbl_handle *tt;
bool hairpin = false;
/* only vport destinations can be terminated */
if (dest[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
continue;
if (attr->dests[num_vport_dests].rep &&
attr->dests[num_vport_dests].rep->vport == MLX5_VPORT_UPLINK)
hairpin = true;
if (!is_push_vlan_on_rx && !hairpin) {
num_vport_dests++;
continue;
}
if (attr->dests[num_vport_dests].flags & MLX5_ESW_DEST_ENCAP) {
term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
term_tbl_act.pkt_reformat = attr->dests[num_vport_dests].pkt_reformat;
......@@ -312,6 +333,9 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
for (curr_dest = 0; curr_dest < num_vport_dests; curr_dest++) {
struct mlx5_termtbl_handle *tt = attr->dests[curr_dest].termtbl;
if (!tt)
continue;
attr->dests[curr_dest].termtbl = NULL;
/* search for the destination associated with the
......
......@@ -334,9 +334,6 @@ struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn)
void mlx5_aso_destroy(struct mlx5_aso *aso)
{
if (IS_ERR_OR_NULL(aso))
return;
mlx5_aso_destroy_sq(aso);
mlx5_aso_destroy_cq(&aso->cq);
kfree(aso);
......
......@@ -37,7 +37,6 @@
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/io-mapping.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/mlx5/driver.h>
......@@ -1604,8 +1603,6 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
int err;
memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
lockdep_register_key(&dev->lock_key);
mutex_init(&dev->intf_state_mutex);
lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
......
......@@ -31,7 +31,6 @@
*/
#include <linux/kernel.h>
#include <linux/io-mapping.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
......
......@@ -290,10 +290,9 @@ enum {
MLX5_UMR_INLINE = (1 << 7),
};
#define MLX5_UMR_KLM_ALIGNMENT 4
#define MLX5_UMR_MTT_ALIGNMENT 0x40
#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
#define MLX5_UMR_FLEX_ALIGNMENT 0x40
#define MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_mtt))
#define MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_klm))
#define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
......
......@@ -606,8 +606,6 @@ struct mlx5_priv {
struct list_head pgdir_list;
/* end: alloc staff */
struct list_head ctx_list;
spinlock_t ctx_lock;
struct mlx5_adev **adev;
int adev_idx;
int sw_vhca_id;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment