Commit be338bda authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2021-05-18' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2021-05-18

This series introduces some fixes to mlx5 driver.

Please pull and let me know if there is any problem.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c71b9964 e63052a5
......@@ -743,10 +743,10 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->xlt = (1 << ent->order) * sizeof(struct mlx5_mtt) /
MLX5_IB_UMR_OCTOWORD;
ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
!dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
mlx5_ib_can_load_pas_with_umr(dev, 0))
ent->limit = dev->mdev->profile->mr_cache[i].limit;
ent->limit = dev->mdev->profile.mr_cache[i].limit;
else
ent->limit = 0;
spin_lock_irq(&ent->lock);
......
......@@ -223,6 +223,8 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt
rpriv = priv->ppriv;
fwd_vport_num = rpriv->rep->vport;
lag_dev = netdev_master_upper_dev_get(netdev);
if (!lag_dev)
return;
netdev_dbg(netdev, "lag_dev(%s)'s slave vport(%d) is txable(%d)\n",
lag_dev->name, fwd_vport_num, net_lag_port_dev_txable(netdev));
......
......@@ -1505,7 +1505,7 @@ mlx5e_init_fib_work_ipv4(struct mlx5e_priv *priv,
fen_info = container_of(info, struct fib_entry_notifier_info, info);
fib_dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
if (fib_dev->netdev_ops != &mlx5e_netdev_ops ||
if (!fib_dev || fib_dev->netdev_ops != &mlx5e_netdev_ops ||
fen_info->dst_len != 32)
return NULL;
......
......@@ -35,6 +35,7 @@
#include <linux/ipv6.h>
#include <linux/tcp.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/mpfs.h>
#include "en.h"
#include "en_rep.h"
#include "lib/mpfs.h"
......
......@@ -889,10 +889,13 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
void mlx5e_activate_rq(struct mlx5e_rq *rq)
{
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
if (rq->icosq)
if (rq->icosq) {
mlx5e_trigger_irq(rq->icosq);
else
} else {
local_bh_disable();
napi_schedule(rq->cq.napi);
local_bh_enable();
}
}
void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
......@@ -2697,7 +2700,7 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
int err;
old_num_txqs = netdev->real_num_tx_queues;
old_ntc = netdev->num_tc;
old_ntc = netdev->num_tc ? : 1;
nch = priv->channels.params.num_channels;
ntc = priv->channels.params.num_tc;
......@@ -5229,6 +5232,11 @@ static void mlx5e_update_features(struct net_device *netdev)
rtnl_unlock();
}
static void mlx5e_reset_channels(struct net_device *netdev)
{
netdev_reset_tc(netdev);
}
int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{
const bool take_rtnl = priv->netdev->reg_state == NETREG_REGISTERED;
......@@ -5283,6 +5291,7 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
profile->cleanup_tx(priv);
out:
mlx5e_reset_channels(priv->netdev);
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
cancel_work_sync(&priv->update_stats_work);
return err;
......@@ -5300,6 +5309,7 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
profile->cleanup_rx(priv);
profile->cleanup_tx(priv);
mlx5e_reset_channels(priv->netdev);
cancel_work_sync(&priv->update_stats_work);
}
......
......@@ -1322,10 +1322,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *out_dev, *encap_dev = NULL;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
bool vf_tun = false, encap_valid = true;
struct net_device *encap_dev = NULL;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_fc *counter = NULL;
struct mlx5e_rep_priv *rpriv;
......@@ -1371,16 +1371,22 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr = attr->esw_attr;
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
struct net_device *out_dev;
int mirred_ifindex;
if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
continue;
mirred_ifindex = parse_attr->mirred_ifindex[out_index];
out_dev = __dev_get_by_index(dev_net(priv->netdev),
mirred_ifindex);
out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
if (!out_dev) {
NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
err = -ENODEV;
goto err_out;
}
err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
extack, &encap_dev, &encap_valid);
dev_put(out_dev);
if (err)
goto err_out;
......@@ -1393,6 +1399,12 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr->dests[out_index].mdev = out_priv->mdev;
}
if (vf_tun && esw_attr->out_count > 1) {
NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
err = -EOPNOTSUPP;
goto err_out;
}
err = mlx5_eswitch_add_vlan_action(esw, attr);
if (err)
goto err_out;
......@@ -3526,8 +3538,12 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
if (err)
return err;
*out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
dev_get_iflink(vlan_dev));
rcu_read_lock();
*out_dev = dev_get_by_index_rcu(dev_net(vlan_dev), dev_get_iflink(vlan_dev));
rcu_read_unlock();
if (!*out_dev)
return -ENODEV;
if (is_vlan_dev(*out_dev))
err = add_vlan_push_action(priv, attr, out_dev, action);
......
......@@ -35,6 +35,7 @@
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/mpfs.h>
#include "esw/acl/lgcy.h"
#include "esw/legacy.h"
#include "mlx5_core.h"
......
......@@ -65,7 +65,7 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
{
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *root_ns;
int err;
int err, err2;
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) {
......@@ -76,33 +76,34 @@ mlx5_eswitch_termtbl_create(struct mlx5_core_dev *dev,
/* As this is the terminating action then the termination table is the
* same prio as the slow path
*/
ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION |
ft_attr.flags = MLX5_FLOW_TABLE_TERMINATION | MLX5_FLOW_TABLE_UNMANAGED |
MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
ft_attr.prio = FDB_SLOW_PATH;
ft_attr.prio = FDB_TC_OFFLOAD;
ft_attr.max_fte = 1;
ft_attr.level = 1;
ft_attr.autogroup.max_num_groups = 1;
tt->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
if (IS_ERR(tt->termtbl)) {
esw_warn(dev, "Failed to create termination table (error %d)\n",
IS_ERR(tt->termtbl));
return -EOPNOTSUPP;
err = PTR_ERR(tt->termtbl);
esw_warn(dev, "Failed to create termination table, err %pe\n", tt->termtbl);
return err;
}
tt->rule = mlx5_add_flow_rules(tt->termtbl, NULL, flow_act,
&tt->dest, 1);
if (IS_ERR(tt->rule)) {
esw_warn(dev, "Failed to create termination table rule (error %d)\n",
IS_ERR(tt->rule));
err = PTR_ERR(tt->rule);
esw_warn(dev, "Failed to create termination table rule, err %pe\n", tt->rule);
goto add_flow_err;
}
return 0;
add_flow_err:
err = mlx5_destroy_flow_table(tt->termtbl);
if (err)
esw_warn(dev, "Failed to destroy termination table\n");
err2 = mlx5_destroy_flow_table(tt->termtbl);
if (err2)
esw_warn(dev, "Failed to destroy termination table, err %d\n", err2);
return -EOPNOTSUPP;
return err;
}
static struct mlx5_termtbl_handle *
......@@ -172,19 +173,6 @@ mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
}
}
static bool mlx5_eswitch_termtbl_is_encap_reformat(struct mlx5_pkt_reformat *rt)
{
switch (rt->reformat_type) {
case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
return true;
default:
return false;
}
}
static void
mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
struct mlx5_flow_act *dst)
......@@ -202,14 +190,6 @@ mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
}
}
if (src->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
mlx5_eswitch_termtbl_is_encap_reformat(src->pkt_reformat)) {
src->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
dst->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
dst->pkt_reformat = src->pkt_reformat;
src->pkt_reformat = NULL;
}
}
static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
......@@ -238,6 +218,7 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
int i;
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level) ||
attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH ||
!mlx5_eswitch_offload_is_uplink_port(esw, spec))
return false;
......@@ -279,12 +260,19 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
if (dest[i].type != MLX5_FLOW_DESTINATION_TYPE_VPORT)
continue;
if (attr->dests[num_vport_dests].flags & MLX5_ESW_DEST_ENCAP) {
term_tbl_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
term_tbl_act.pkt_reformat = attr->dests[num_vport_dests].pkt_reformat;
} else {
term_tbl_act.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
term_tbl_act.pkt_reformat = NULL;
}
/* get the terminating table for the action list */
tt = mlx5_eswitch_termtbl_get_create(esw, &term_tbl_act,
&dest[i], attr);
if (IS_ERR(tt)) {
esw_warn(esw->dev, "Failed to get termination table (error %d)\n",
IS_ERR(tt));
esw_warn(esw->dev, "Failed to get termination table, err %pe\n", tt);
goto revert_changes;
}
attr->dests[num_vport_dests].termtbl = tt;
......@@ -301,6 +289,9 @@ mlx5_eswitch_add_termtbl_rule(struct mlx5_eswitch *esw,
goto revert_changes;
/* create the FTE */
flow_act->action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
flow_act->pkt_reformat = NULL;
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
rule = mlx5_add_flow_rules(fdb, spec, flow_act, dest, num_dest);
if (IS_ERR(rule))
goto revert_changes;
......
......@@ -307,6 +307,11 @@ int mlx5_lag_mp_init(struct mlx5_lag *ldev)
struct lag_mp *mp = &ldev->lag_mp;
int err;
/* always clear mfi, as it might become stale when a route delete event
* has been missed
*/
mp->mfi = NULL;
if (mp->fib_nb.notifier_call)
return 0;
......@@ -335,4 +340,5 @@ void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev)
unregister_fib_notifier(&init_net, &mp->fib_nb);
destroy_workqueue(mp->wq);
mp->fib_nb.notifier_call = NULL;
mp->mfi = NULL;
}
......@@ -33,6 +33,7 @@
#include <linux/etherdevice.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/mpfs.h>
#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
#include "lib/mpfs.h"
......@@ -175,6 +176,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
mutex_unlock(&mpfs->lock);
return err;
}
EXPORT_SYMBOL(mlx5_mpfs_add_mac);
int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
{
......@@ -206,3 +208,4 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
mutex_unlock(&mpfs->lock);
return err;
}
EXPORT_SYMBOL(mlx5_mpfs_del_mac);
......@@ -84,12 +84,9 @@ struct l2addr_node {
#ifdef CONFIG_MLX5_MPFS
int mlx5_mpfs_init(struct mlx5_core_dev *dev);
void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac);
int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac);
#else /* #ifndef CONFIG_MLX5_MPFS */
static inline int mlx5_mpfs_init(struct mlx5_core_dev *dev) { return 0; }
static inline void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) {}
static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
#endif
#endif
......@@ -503,7 +503,7 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
{
struct mlx5_profile *prof = dev->profile;
struct mlx5_profile *prof = &dev->profile;
void *set_hca_cap;
int err;
......@@ -524,11 +524,11 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
to_fw_pkey_sz(dev, 128));
/* Check log_max_qp from HCA caps to set in current profile */
if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
profile[prof_sel].log_max_qp,
prof->log_max_qp,
MLX5_CAP_GEN_MAX(dev, log_max_qp));
profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
}
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
......@@ -1381,8 +1381,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
struct mlx5_priv *priv = &dev->priv;
int err;
dev->profile = &profile[profile_idx];
memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
mutex_init(&dev->intf_state_mutex);
......
......@@ -95,9 +95,10 @@ int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs)
int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
int msix_vec_count)
{
int sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
void *hca_cap = NULL, *query_cap = NULL, *cap;
int num_vf_msix, min_msix, max_msix;
void *hca_cap, *cap;
int ret;
num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
......@@ -116,11 +117,20 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
if (msix_vec_count > max_msix)
return -EOVERFLOW;
hca_cap = kzalloc(sz, GFP_KERNEL);
if (!hca_cap)
return -ENOMEM;
query_cap = kzalloc(query_sz, GFP_KERNEL);
hca_cap = kzalloc(set_sz, GFP_KERNEL);
if (!hca_cap || !query_cap) {
ret = -ENOMEM;
goto out;
}
ret = mlx5_vport_get_other_func_cap(dev, function_id, query_cap);
if (ret)
goto out;
cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
MLX5_UN_SZ_BYTES(hca_cap_union));
MLX5_SET(cmd_hca_cap, cap, dynamic_msix_table_size, msix_vec_count);
MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
......@@ -130,7 +140,9 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
out:
kfree(hca_cap);
kfree(query_cap);
return ret;
}
......
......@@ -136,10 +136,10 @@ static enum devlink_port_fn_state mlx5_sf_to_devlink_state(u8 hw_state)
switch (hw_state) {
case MLX5_VHCA_STATE_ACTIVE:
case MLX5_VHCA_STATE_IN_USE:
case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
return DEVLINK_PORT_FN_STATE_ACTIVE;
case MLX5_VHCA_STATE_INVALID:
case MLX5_VHCA_STATE_ALLOCATED:
case MLX5_VHCA_STATE_TEARDOWN_REQUEST:
default:
return DEVLINK_PORT_FN_STATE_INACTIVE;
}
......@@ -192,14 +192,17 @@ int mlx5_devlink_sf_port_fn_state_get(struct devlink *devlink, struct devlink_po
return err;
}
static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
struct netlink_ext_ack *extack)
{
int err;
if (mlx5_sf_is_active(sf))
return 0;
if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED)
return -EINVAL;
if (sf->hw_state != MLX5_VHCA_STATE_ALLOCATED) {
NL_SET_ERR_MSG_MOD(extack, "SF is inactivated but it is still attached");
return -EBUSY;
}
err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
if (err)
......@@ -226,7 +229,8 @@ static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
struct mlx5_sf *sf,
enum devlink_port_fn_state state)
enum devlink_port_fn_state state,
struct netlink_ext_ack *extack)
{
int err = 0;
......@@ -234,7 +238,7 @@ static int mlx5_sf_state_set(struct mlx5_core_dev *dev, struct mlx5_sf_table *ta
if (state == mlx5_sf_to_devlink_state(sf->hw_state))
goto out;
if (state == DEVLINK_PORT_FN_STATE_ACTIVE)
err = mlx5_sf_activate(dev, sf);
err = mlx5_sf_activate(dev, sf, extack);
else if (state == DEVLINK_PORT_FN_STATE_INACTIVE)
err = mlx5_sf_deactivate(dev, sf);
else
......@@ -265,7 +269,7 @@ int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_po
goto out;
}
err = mlx5_sf_state_set(dev, table, sf, state);
err = mlx5_sf_state_set(dev, table, sf, state, extack);
out:
mlx5_sf_table_put(table);
return err;
......
......@@ -15,6 +15,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/mlx5_ifc_vdpa.h>
#include <linux/mlx5/mpfs.h>
#include "mlx5_vdpa.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
......@@ -1859,11 +1860,16 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb
static void mlx5_vdpa_free(struct vdpa_device *vdev)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_core_dev *pfmdev;
struct mlx5_vdpa_net *ndev;
ndev = to_mlx5_vdpa_ndev(mvdev);
free_resources(ndev);
if (!is_zero_ether_addr(ndev->config.mac)) {
pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
}
mlx5_vdpa_free_resources(&ndev->mvdev);
mutex_destroy(&ndev->reslock);
}
......@@ -1990,6 +1996,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
{
struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
struct virtio_net_config *config;
struct mlx5_core_dev *pfmdev;
struct mlx5_vdpa_dev *mvdev;
struct mlx5_vdpa_net *ndev;
struct mlx5_core_dev *mdev;
......@@ -2023,10 +2030,17 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
if (err)
goto err_mtu;
if (!is_zero_ether_addr(config->mac)) {
pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev));
err = mlx5_mpfs_add_mac(pfmdev, config->mac);
if (err)
goto err_mtu;
}
mvdev->vdev.dma_dev = mdev->device;
err = mlx5_vdpa_alloc_resources(&ndev->mvdev);
if (err)
goto err_mtu;
goto err_mpfs;
err = alloc_resources(ndev);
if (err)
......@@ -2044,6 +2058,9 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
free_resources(ndev);
err_res:
mlx5_vdpa_free_resources(&ndev->mvdev);
err_mpfs:
if (!is_zero_ether_addr(config->mac))
mlx5_mpfs_del_mac(pfmdev, config->mac);
err_mtu:
mutex_destroy(&ndev->reslock);
put_device(&mvdev->vdev.dev);
......
......@@ -703,6 +703,27 @@ struct mlx5_hv_vhca;
#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
enum {
MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
};
enum {
MR_CACHE_LAST_STD_ENTRY = 20,
MLX5_IMR_MTT_CACHE_ENTRY,
MLX5_IMR_KSM_CACHE_ENTRY,
MAX_MR_CACHE_ENTRIES
};
struct mlx5_profile {
u64 mask;
u8 log_max_qp;
struct {
int size;
int limit;
} mr_cache[MAX_MR_CACHE_ENTRIES];
};
struct mlx5_core_dev {
struct device *device;
enum mlx5_coredev_type coredev_type;
......@@ -731,7 +752,7 @@ struct mlx5_core_dev {
struct mutex intf_state_mutex;
unsigned long intf_state;
struct mlx5_priv priv;
struct mlx5_profile *profile;
struct mlx5_profile profile;
u32 issi;
struct mlx5e_resources mlx5e_res;
struct mlx5_dm *dm;
......@@ -1083,18 +1104,6 @@ static inline u8 mlx5_mkey_variant(u32 mkey)
return mkey & 0xff;
}
enum {
MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
};
enum {
MR_CACHE_LAST_STD_ENTRY = 20,
MLX5_IMR_MTT_CACHE_ENTRY,
MLX5_IMR_KSM_CACHE_ENTRY,
MAX_MR_CACHE_ENTRIES
};
/* Async-atomic event notifier used by mlx5 core to forward FW
* evetns recived from event queue to mlx5 consumers.
* Optimise event queue dipatching.
......@@ -1148,15 +1157,6 @@ int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
struct ib_device *device,
struct rdma_netdev_alloc_params *params);
struct mlx5_profile {
u64 mask;
u8 log_max_qp;
struct {
int size;
int limit;
} mr_cache[MAX_MR_CACHE_ENTRIES];
};
enum {
MLX5_PCI_DEV_IS_VF = 1 << 0,
};
......
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
* Copyright (c) 2021 Mellanox Technologies Ltd.
*/
#ifndef _MLX5_MPFS_
#define _MLX5_MPFS_
struct mlx5_core_dev;
#ifdef CONFIG_MLX5_MPFS
int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac);
int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac);
#else /* #ifndef CONFIG_MLX5_MPFS */
static inline int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
static inline int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) { return 0; }
#endif
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment