Commit 4a082260 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-fixes-2023-07-26' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2023-07-26

This series provides bug fixes to mlx5 driver.

* tag 'mlx5-fixes-2023-07-26' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5: Unregister devlink params in case interface is down
  net/mlx5: DR, Fix peer domain namespace setting
  net/mlx5: fs_chains: Fix ft prio if ignore_flow_level is not supported
  net/mlx5e: kTLS, Fix protection domain in use syndrome when devlink reload
  net/mlx5: Bridge, set debugfs access right to root-only
  net/mlx5e: xsk: Fix crash on regular rq reactivation
  net/mlx5e: xsk: Fix invalid buffer access for legacy rq
  net/mlx5e: Move representor neigh cleanup to profile cleanup_tx
  net/mlx5e: Fix crash moving to switchdev mode when ntuple offload is set
  net/mlx5e: Don't hold encap tbl lock if there is no encap action
  net/mlx5: Honor user input for migratable port fn attr
  net/mlx5e: fix return value check in mlx5e_ipsec_remove_trailer()
  net/mlx5: fix potential memory leak in mlx5e_init_rep_rx
  net/mlx5: DR, fix memory leak in mlx5dr_cmd_create_reformat_ctx
  net/mlx5e: fix double free in macsec_fs_tx_create_crypto_table_groups
====================

Link: https://lore.kernel.org/r/20230726213206.47022-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents dadc5b86 53d737df
......@@ -1030,9 +1030,6 @@ int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv,
int out_index;
int err = 0;
if (!mlx5e_is_eswitch_flow(flow))
return 0;
parse_attr = attr->parse_attr;
esw_attr = attr->esw_attr;
*vf_tun = false;
......
......@@ -323,8 +323,11 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
net_prefetch(mxbuf->xdp.data);
prog = rcu_dereference(rq->xdp_prog);
if (likely(prog && mlx5e_xdp_handle(rq, prog, mxbuf)))
if (likely(prog && mlx5e_xdp_handle(rq, prog, mxbuf))) {
if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)))
wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
return NULL; /* page/packet was consumed by XDP */
}
/* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse
* will be handled by mlx5e_free_rx_wqe.
......
......@@ -58,7 +58,9 @@ static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x)
trailer_len = alen + plen + 2;
pskb_trim(skb, skb->len - trailer_len);
ret = pskb_trim(skb, skb->len - trailer_len);
if (unlikely(ret))
return ret;
if (skb->protocol == htons(ETH_P_IP)) {
ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len);
ip_send_check(ipv4hdr);
......
......@@ -188,7 +188,6 @@ static void mlx5e_tls_debugfs_init(struct mlx5e_tls *tls,
int mlx5e_ktls_init(struct mlx5e_priv *priv)
{
struct mlx5_crypto_dek_pool *dek_pool;
struct mlx5e_tls *tls;
if (!mlx5e_is_ktls_device(priv->mdev))
......@@ -199,12 +198,6 @@ int mlx5e_ktls_init(struct mlx5e_priv *priv)
return -ENOMEM;
tls->mdev = priv->mdev;
dek_pool = mlx5_crypto_dek_pool_create(priv->mdev, MLX5_ACCEL_OBJ_TLS_KEY);
if (IS_ERR(dek_pool)) {
kfree(tls);
return PTR_ERR(dek_pool);
}
tls->dek_pool = dek_pool;
priv->tls = tls;
mlx5e_tls_debugfs_init(tls, priv->dfs_root);
......@@ -222,7 +215,6 @@ void mlx5e_ktls_cleanup(struct mlx5e_priv *priv)
debugfs_remove_recursive(tls->debugfs.dfs);
tls->debugfs.dfs = NULL;
mlx5_crypto_dek_pool_destroy(tls->dek_pool);
kfree(priv->tls);
priv->tls = NULL;
}
......@@ -908,28 +908,51 @@ static void mlx5e_tls_tx_debugfs_init(struct mlx5e_tls *tls,
int mlx5e_ktls_init_tx(struct mlx5e_priv *priv)
{
struct mlx5_crypto_dek_pool *dek_pool;
struct mlx5e_tls *tls = priv->tls;
int err;
if (!mlx5e_is_ktls_device(priv->mdev))
return 0;
/* DEK pool could be used by either or both of TX and RX. But we have to
* put the creation here to avoid syndrome when doing devlink reload.
*/
dek_pool = mlx5_crypto_dek_pool_create(priv->mdev, MLX5_ACCEL_OBJ_TLS_KEY);
if (IS_ERR(dek_pool))
return PTR_ERR(dek_pool);
tls->dek_pool = dek_pool;
if (!mlx5e_is_ktls_tx(priv->mdev))
return 0;
priv->tls->tx_pool = mlx5e_tls_tx_pool_init(priv->mdev, &priv->tls->sw_stats);
if (!priv->tls->tx_pool)
return -ENOMEM;
if (!priv->tls->tx_pool) {
err = -ENOMEM;
goto err_tx_pool_init;
}
mlx5e_tls_tx_debugfs_init(tls, tls->debugfs.dfs);
return 0;
err_tx_pool_init:
mlx5_crypto_dek_pool_destroy(dek_pool);
return err;
}
void mlx5e_ktls_cleanup_tx(struct mlx5e_priv *priv)
{
if (!mlx5e_is_ktls_tx(priv->mdev))
return;
goto dek_pool_destroy;
debugfs_remove_recursive(priv->tls->debugfs.dfs_tx);
priv->tls->debugfs.dfs_tx = NULL;
mlx5e_tls_tx_pool_cleanup(priv->tls->tx_pool);
priv->tls->tx_pool = NULL;
dek_pool_destroy:
if (mlx5e_is_ktls_device(priv->mdev))
mlx5_crypto_dek_pool_destroy(priv->tls->dek_pool);
}
......@@ -161,6 +161,7 @@ static int macsec_fs_tx_create_crypto_table_groups(struct mlx5e_flow_table *ft)
if (!in) {
kfree(ft->g);
ft->g = NULL;
return -ENOMEM;
}
......
......@@ -135,6 +135,16 @@ static void arfs_del_rules(struct mlx5e_flow_steering *fs);
int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs)
{
/* Moving to switchdev mode, fs->arfs is freed by mlx5e_nic_profile
* cleanup_rx callback and it is not recreated when
* mlx5e_uplink_rep_profile is loaded as mlx5e_create_flow_steering()
* is not called by the uplink_rep profile init_rx callback. Thus, if
* ntuple is set, moving to switchdev flow will enter this function
* with fs->arfs nullified.
*/
if (!mlx5e_fs_get_arfs(fs))
return 0;
arfs_del_rules(fs);
return arfs_disable(fs);
......
......@@ -1036,7 +1036,23 @@ static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_s
return err;
}
static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
static void mlx5e_flush_rq_cq(struct mlx5e_rq *rq)
{
struct mlx5_cqwq *cqwq = &rq->cq.wq;
struct mlx5_cqe64 *cqe;
if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state)) {
while ((cqe = mlx5_cqwq_get_cqe_enahnced_comp(cqwq)))
mlx5_cqwq_pop(cqwq);
} else {
while ((cqe = mlx5_cqwq_get_cqe(cqwq)))
mlx5_cqwq_pop(cqwq);
}
mlx5_cqwq_update_db_record(cqwq);
}
int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
{
struct net_device *dev = rq->netdev;
int err;
......@@ -1046,6 +1062,10 @@ static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
netdev_err(dev, "Failed to move rq 0x%x to reset\n", rq->rqn);
return err;
}
mlx5e_free_rx_descs(rq);
mlx5e_flush_rq_cq(rq);
err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
if (err) {
netdev_err(dev, "Failed to move rq 0x%x to ready\n", rq->rqn);
......@@ -1055,13 +1075,6 @@ static int mlx5e_rq_to_ready(struct mlx5e_rq *rq, int curr_state)
return 0;
}
int mlx5e_flush_rq(struct mlx5e_rq *rq, int curr_state)
{
mlx5e_free_rx_descs(rq);
return mlx5e_rq_to_ready(rq, curr_state);
}
static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
{
struct mlx5_core_dev *mdev = rq->mdev;
......
......@@ -1012,7 +1012,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) {
mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
return err;
goto err_rx_res_free;
}
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
......@@ -1046,6 +1046,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
err_rx_res_free:
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
err_free_fs:
......@@ -1159,6 +1160,10 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
return err;
}
err = mlx5e_rep_neigh_init(rpriv);
if (err)
goto err_neigh_init;
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
err = mlx5e_init_uplink_rep_tx(rpriv);
if (err)
......@@ -1175,6 +1180,8 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
mlx5e_cleanup_uplink_rep_tx(rpriv);
err_init_tx:
mlx5e_rep_neigh_cleanup(rpriv);
err_neigh_init:
mlx5e_destroy_tises(priv);
return err;
}
......@@ -1188,22 +1195,17 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
mlx5e_cleanup_uplink_rep_tx(rpriv);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_destroy_tises(priv);
}
static void mlx5e_rep_enable(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
mlx5e_set_netdev_mtu_boundaries(priv);
mlx5e_rep_neigh_init(rpriv);
}
static void mlx5e_rep_disable(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
mlx5e_rep_neigh_cleanup(rpriv);
}
static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
......@@ -1253,7 +1255,6 @@ static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event
static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
u16 max_mtu;
......@@ -1275,7 +1276,6 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
mlx5_notifier_register(mdev, &priv->events_nb);
mlx5e_dcbnl_initialize(priv);
mlx5e_dcbnl_init_app(priv);
mlx5e_rep_neigh_init(rpriv);
mlx5e_rep_bridge_init(priv);
netdev->wanted_features |= NETIF_F_HW_TC;
......@@ -1290,7 +1290,6 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_core_dev *mdev = priv->mdev;
rtnl_lock();
......@@ -1300,7 +1299,6 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
rtnl_unlock();
mlx5e_rep_bridge_cleanup(priv);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_dcbnl_delete_app(priv);
mlx5_notifier_unregister(mdev, &priv->events_nb);
mlx5e_rep_tc_disable(priv);
......
......@@ -1725,6 +1725,19 @@ verify_attr_actions(u32 actions, struct netlink_ext_ack *extack)
return 0;
}
static bool
has_encap_dests(struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
int out_index;
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
return true;
return false;
}
static int
post_process_attr(struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
......@@ -1737,9 +1750,11 @@ post_process_attr(struct mlx5e_tc_flow *flow,
if (err)
goto err_out;
err = mlx5e_tc_tun_encap_dests_set(flow->priv, flow, attr, extack, &vf_tun);
if (err)
goto err_out;
if (mlx5e_is_eswitch_flow(flow) && has_encap_dests(attr)) {
err = mlx5e_tc_tun_encap_dests_set(flow->priv, flow, attr, extack, &vf_tun);
if (err)
goto err_out;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr);
......
......@@ -64,7 +64,7 @@ void mlx5_esw_bridge_debugfs_init(struct net_device *br_netdev, struct mlx5_esw_
bridge->debugfs_dir = debugfs_create_dir(br_netdev->name,
bridge->br_offloads->debugfs_root);
debugfs_create_file("fdb", 0444, bridge->debugfs_dir, bridge,
debugfs_create_file("fdb", 0400, bridge->debugfs_dir, bridge,
&mlx5_esw_bridge_debugfs_fops);
}
......
......@@ -1436,7 +1436,6 @@ esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
esw_init_chains_offload_flags(esw, &attr.flags);
attr.ns = MLX5_FLOW_NAMESPACE_FDB;
attr.fs_base_prio = FDB_TC_OFFLOAD;
attr.max_grp_num = esw->params.large_group_num;
attr.default_ft = miss_fdb;
attr.mapping = esw->offloads.reg_c0_obj_pool;
......@@ -2779,9 +2778,9 @@ static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
struct mlx5_eswitch *peer_esw,
bool pair)
{
u8 peer_idx = mlx5_get_dev_index(peer_esw->dev);
u16 peer_vhca_id = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
u16 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
struct mlx5_flow_root_namespace *peer_ns;
u8 idx = mlx5_get_dev_index(esw->dev);
struct mlx5_flow_root_namespace *ns;
int err;
......@@ -2789,18 +2788,18 @@ static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
ns = esw->dev->priv.steering->fdb_root_ns;
if (pair) {
err = mlx5_flow_namespace_set_peer(ns, peer_ns, peer_idx);
err = mlx5_flow_namespace_set_peer(ns, peer_ns, peer_vhca_id);
if (err)
return err;
err = mlx5_flow_namespace_set_peer(peer_ns, ns, idx);
err = mlx5_flow_namespace_set_peer(peer_ns, ns, vhca_id);
if (err) {
mlx5_flow_namespace_set_peer(ns, NULL, peer_idx);
mlx5_flow_namespace_set_peer(ns, NULL, peer_vhca_id);
return err;
}
} else {
mlx5_flow_namespace_set_peer(ns, NULL, peer_idx);
mlx5_flow_namespace_set_peer(peer_ns, NULL, idx);
mlx5_flow_namespace_set_peer(ns, NULL, peer_vhca_id);
mlx5_flow_namespace_set_peer(peer_ns, NULL, vhca_id);
}
return 0;
......@@ -4196,7 +4195,7 @@ int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
}
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, 1);
MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, enable);
err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2);
......
......@@ -140,7 +140,7 @@ static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace
static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns,
u8 peer_idx)
u16 peer_vhca_id)
{
return 0;
}
......
......@@ -94,7 +94,7 @@ struct mlx5_flow_cmds {
int (*set_peer)(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns,
u8 peer_idx);
u16 peer_vhca_id);
int (*create_ns)(struct mlx5_flow_root_namespace *ns);
int (*destroy_ns)(struct mlx5_flow_root_namespace *ns);
......
......@@ -3621,7 +3621,7 @@ void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns,
u8 peer_idx)
u16 peer_vhca_id)
{
if (peer_ns && ns->mode != peer_ns->mode) {
mlx5_core_err(ns->dev,
......@@ -3629,7 +3629,7 @@ int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
return -EINVAL;
}
return ns->cmds->set_peer(ns, peer_ns, peer_idx);
return ns->cmds->set_peer(ns, peer_ns, peer_vhca_id);
}
/* This function should be called only at init stage of the namespace.
......
......@@ -303,7 +303,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns,
u8 peer_idx);
u16 peer_vhca_id);
int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
enum mlx5_flow_steering_mode mode);
......
......@@ -178,7 +178,7 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
if (!mlx5_chains_ignore_flow_level_supported(chains) ||
(chain == 0 && prio == 1 && level == 0)) {
ft_attr.level = chains->fs_base_level;
ft_attr.prio = chains->fs_base_prio;
ft_attr.prio = chains->fs_base_prio + prio - 1;
ns = (chains->ns == MLX5_FLOW_NAMESPACE_FDB) ?
mlx5_get_fdb_sub_ns(chains->dev, chain) :
mlx5_get_flow_namespace(chains->dev, chains->ns);
......
......@@ -1506,6 +1506,7 @@ void mlx5_uninit_one(struct mlx5_core_dev *dev)
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
mlx5_core_warn(dev, "%s: interface is down, NOP\n",
__func__);
mlx5_devlink_params_unregister(priv_to_devlink(dev));
mlx5_cleanup_once(dev);
goto out;
}
......
......@@ -2079,7 +2079,7 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
peer_vport = vhca_id_valid && mlx5_core_is_pf(dmn->mdev) &&
(vhca_id != dmn->info.caps.gvmi);
vport_dmn = peer_vport ? dmn->peer_dmn[vhca_id] : dmn;
vport_dmn = peer_vport ? xa_load(&dmn->peer_dmn_xa, vhca_id) : dmn;
if (!vport_dmn) {
mlx5dr_dbg(dmn, "No peer vport domain for given vhca_id\n");
return NULL;
......
......@@ -564,11 +564,12 @@ int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
if (err)
return err;
goto err_free_in;
*reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
kvfree(in);
err_free_in:
kvfree(in);
return err;
}
......
......@@ -475,6 +475,7 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
mutex_init(&dmn->info.rx.mutex);
mutex_init(&dmn->info.tx.mutex);
xa_init(&dmn->definers_xa);
xa_init(&dmn->peer_dmn_xa);
if (dr_domain_caps_init(mdev, dmn)) {
mlx5dr_err(dmn, "Failed init domain, no caps\n");
......@@ -507,6 +508,7 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
uninit_caps:
dr_domain_caps_uninit(dmn);
def_xa_destroy:
xa_destroy(&dmn->peer_dmn_xa);
xa_destroy(&dmn->definers_xa);
kfree(dmn);
return NULL;
......@@ -547,6 +549,7 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
dr_domain_uninit_csum_recalc_fts(dmn);
dr_domain_uninit_resources(dmn);
dr_domain_caps_uninit(dmn);
xa_destroy(&dmn->peer_dmn_xa);
xa_destroy(&dmn->definers_xa);
mutex_destroy(&dmn->info.tx.mutex);
mutex_destroy(&dmn->info.rx.mutex);
......@@ -556,17 +559,21 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn,
u8 peer_idx)
u16 peer_vhca_id)
{
struct mlx5dr_domain *peer;
mlx5dr_domain_lock(dmn);
if (dmn->peer_dmn[peer_idx])
refcount_dec(&dmn->peer_dmn[peer_idx]->refcount);
peer = xa_load(&dmn->peer_dmn_xa, peer_vhca_id);
if (peer)
refcount_dec(&peer->refcount);
dmn->peer_dmn[peer_idx] = peer_dmn;
WARN_ON(xa_err(xa_store(&dmn->peer_dmn_xa, peer_vhca_id, peer_dmn, GFP_KERNEL)));
if (dmn->peer_dmn[peer_idx])
refcount_inc(&dmn->peer_dmn[peer_idx]->refcount);
peer = xa_load(&dmn->peer_dmn_xa, peer_vhca_id);
if (peer)
refcount_inc(&peer->refcount);
mlx5dr_domain_unlock(dmn);
}
......@@ -1652,17 +1652,18 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
struct mlx5dr_domain *dmn = sb->dmn;
struct mlx5dr_domain *vport_dmn;
u8 *bit_mask = sb->bit_mask;
struct mlx5dr_domain *peer;
bool source_gvmi_set;
DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
if (sb->vhca_id_valid) {
peer = xa_load(&dmn->peer_dmn_xa, id);
/* Find port GVMI based on the eswitch_owner_vhca_id */
if (id == dmn->info.caps.gvmi)
vport_dmn = dmn;
else if (id < MLX5_MAX_PORTS && dmn->peer_dmn[id] &&
(id == dmn->peer_dmn[id]->info.caps.gvmi))
vport_dmn = dmn->peer_dmn[id];
else if (peer && (id == peer->info.caps.gvmi))
vport_dmn = peer;
else
return -EINVAL;
......
......@@ -1984,16 +1984,17 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
struct mlx5dr_domain *dmn = sb->dmn;
struct mlx5dr_domain *vport_dmn;
u8 *bit_mask = sb->bit_mask;
struct mlx5dr_domain *peer;
DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn);
if (sb->vhca_id_valid) {
peer = xa_load(&dmn->peer_dmn_xa, id);
/* Find port GVMI based on the eswitch_owner_vhca_id */
if (id == dmn->info.caps.gvmi)
vport_dmn = dmn;
else if (id < MLX5_MAX_PORTS && dmn->peer_dmn[id] &&
(id == dmn->peer_dmn[id]->info.caps.gvmi))
vport_dmn = dmn->peer_dmn[id];
else if (peer && (id == peer->info.caps.gvmi))
vport_dmn = peer;
else
return -EINVAL;
......
......@@ -935,7 +935,6 @@ struct mlx5dr_domain_info {
};
struct mlx5dr_domain {
struct mlx5dr_domain *peer_dmn[MLX5_MAX_PORTS];
struct mlx5_core_dev *mdev;
u32 pdn;
struct mlx5_uars_page *uar;
......@@ -956,6 +955,7 @@ struct mlx5dr_domain {
struct list_head dbg_tbl_list;
struct mlx5dr_dbg_dump_info dump_info;
struct xarray definers_xa;
struct xarray peer_dmn_xa;
/* memory management statistics */
u32 num_buddies[DR_ICM_TYPE_MAX];
};
......
......@@ -781,14 +781,14 @@ static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns,
u8 peer_idx)
u16 peer_vhca_id)
{
struct mlx5dr_domain *peer_domain = NULL;
if (peer_ns)
peer_domain = peer_ns->fs_dr_domain.dr_domain;
mlx5dr_domain_set_peer(ns->fs_dr_domain.dr_domain,
peer_domain, peer_idx);
peer_domain, peer_vhca_id);
return 0;
}
......
......@@ -49,7 +49,7 @@ int mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags);
void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
struct mlx5dr_domain *peer_dmn,
u8 peer_idx);
u16 peer_vhca_id);
struct mlx5dr_table *
mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment