Commit ee661a4a authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-fixes-2020-11-03' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2020-11-03

v1->v2:
 - Fix fixes line tag in patch #1
 - Toss ktls refcount leak fix, Maxim will look further into the root
   cause.
 - Toss eswitch chain 0 prio patch, until we determine if it is needed
   for -rc and net.

* tag 'mlx5-fixes-2020-11-03' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: Fix incorrect access of RCU-protected xdp_prog
  net/mlx5e: Fix VXLAN synchronization after function reload
  net/mlx5: E-switch, Avoid extack error log for disabled vport
  net/mlx5: Fix deletion of duplicate rules
  net/mlx5e: Use spin_lock_bh for async_icosq_lock
  net/mlx5e: Protect encap route dev from concurrent release
  net/mlx5e: Fix modify header actions memory leak
====================

Link: https://lore.kernel.org/r/20201105202129.23644-1-saeedm@nvidia.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 847f0a2b 1a50cf9a
...@@ -107,12 +107,16 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv, ...@@ -107,12 +107,16 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
mlx5e_tc_encap_flows_del(priv, e, &flow_list); mlx5e_tc_encap_flows_del(priv, e, &flow_list);
if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
struct net_device *route_dev;
ether_addr_copy(e->h_dest, ha); ether_addr_copy(e->h_dest, ha);
ether_addr_copy(eth->h_dest, ha); ether_addr_copy(eth->h_dest, ha);
/* Update the encap source mac, in case that we delete /* Update the encap source mac, in case that we delete
* the flows when encap source mac changed. * the flows when encap source mac changed.
*/ */
ether_addr_copy(eth->h_source, e->route_dev->dev_addr); route_dev = __dev_get_by_index(dev_net(priv->netdev), e->route_dev_ifindex);
if (route_dev)
ether_addr_copy(eth->h_source, route_dev->dev_addr);
mlx5e_tc_encap_flows_add(priv, e, &flow_list); mlx5e_tc_encap_flows_add(priv, e, &flow_list);
} }
......
...@@ -77,13 +77,13 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv, ...@@ -77,13 +77,13 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
return 0; return 0;
} }
static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
struct net_device *mirred_dev, struct net_device *mirred_dev,
struct net_device **out_dev, struct net_device **out_dev,
struct net_device **route_dev, struct net_device **route_dev,
struct flowi4 *fl4, struct flowi4 *fl4,
struct neighbour **out_n, struct neighbour **out_n,
u8 *out_ttl) u8 *out_ttl)
{ {
struct neighbour *n; struct neighbour *n;
struct rtable *rt; struct rtable *rt;
...@@ -117,18 +117,28 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, ...@@ -117,18 +117,28 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
ip_rt_put(rt); ip_rt_put(rt);
return ret; return ret;
} }
dev_hold(*route_dev);
if (!(*out_ttl)) if (!(*out_ttl))
*out_ttl = ip4_dst_hoplimit(&rt->dst); *out_ttl = ip4_dst_hoplimit(&rt->dst);
n = dst_neigh_lookup(&rt->dst, &fl4->daddr); n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
ip_rt_put(rt); ip_rt_put(rt);
if (!n) if (!n) {
dev_put(*route_dev);
return -ENOMEM; return -ENOMEM;
}
*out_n = n; *out_n = n;
return 0; return 0;
} }
static void mlx5e_route_lookup_ipv4_put(struct net_device *route_dev,
struct neighbour *n)
{
neigh_release(n);
dev_put(route_dev);
}
static const char *mlx5e_netdev_kind(struct net_device *dev) static const char *mlx5e_netdev_kind(struct net_device *dev)
{ {
if (dev->rtnl_link_ops) if (dev->rtnl_link_ops)
...@@ -193,8 +203,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, ...@@ -193,8 +203,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
fl4.saddr = tun_key->u.ipv4.src; fl4.saddr = tun_key->u.ipv4.src;
ttl = tun_key->ttl; ttl = tun_key->ttl;
err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, &route_dev, err = mlx5e_route_lookup_ipv4_get(priv, mirred_dev, &out_dev, &route_dev,
&fl4, &n, &ttl); &fl4, &n, &ttl);
if (err) if (err)
return err; return err;
...@@ -223,7 +233,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, ...@@ -223,7 +233,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
e->m_neigh.family = n->ops->family; e->m_neigh.family = n->ops->family;
memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
e->out_dev = out_dev; e->out_dev = out_dev;
e->route_dev = route_dev; e->route_dev_ifindex = route_dev->ifindex;
/* It's important to add the neigh to the hash table before checking /* It's important to add the neigh to the hash table before checking
* the neigh validity state. So if we'll get a notification, in case the * the neigh validity state. So if we'll get a notification, in case the
...@@ -278,7 +288,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, ...@@ -278,7 +288,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
e->flags |= MLX5_ENCAP_ENTRY_VALID; e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
neigh_release(n); mlx5e_route_lookup_ipv4_put(route_dev, n);
return err; return err;
destroy_neigh_entry: destroy_neigh_entry:
...@@ -286,18 +296,18 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, ...@@ -286,18 +296,18 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
free_encap: free_encap:
kfree(encap_header); kfree(encap_header);
release_neigh: release_neigh:
neigh_release(n); mlx5e_route_lookup_ipv4_put(route_dev, n);
return err; return err;
} }
#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
struct net_device *mirred_dev, struct net_device *mirred_dev,
struct net_device **out_dev, struct net_device **out_dev,
struct net_device **route_dev, struct net_device **route_dev,
struct flowi6 *fl6, struct flowi6 *fl6,
struct neighbour **out_n, struct neighbour **out_n,
u8 *out_ttl) u8 *out_ttl)
{ {
struct dst_entry *dst; struct dst_entry *dst;
struct neighbour *n; struct neighbour *n;
...@@ -318,15 +328,25 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, ...@@ -318,15 +328,25 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
return ret; return ret;
} }
dev_hold(*route_dev);
n = dst_neigh_lookup(dst, &fl6->daddr); n = dst_neigh_lookup(dst, &fl6->daddr);
dst_release(dst); dst_release(dst);
if (!n) if (!n) {
dev_put(*route_dev);
return -ENOMEM; return -ENOMEM;
}
*out_n = n; *out_n = n;
return 0; return 0;
} }
static void mlx5e_route_lookup_ipv6_put(struct net_device *route_dev,
struct neighbour *n)
{
neigh_release(n);
dev_put(route_dev);
}
int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
struct net_device *mirred_dev, struct net_device *mirred_dev,
struct mlx5e_encap_entry *e) struct mlx5e_encap_entry *e)
...@@ -348,8 +368,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, ...@@ -348,8 +368,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
fl6.daddr = tun_key->u.ipv6.dst; fl6.daddr = tun_key->u.ipv6.dst;
fl6.saddr = tun_key->u.ipv6.src; fl6.saddr = tun_key->u.ipv6.src;
err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, &route_dev, err = mlx5e_route_lookup_ipv6_get(priv, mirred_dev, &out_dev, &route_dev,
&fl6, &n, &ttl); &fl6, &n, &ttl);
if (err) if (err)
return err; return err;
...@@ -378,7 +398,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, ...@@ -378,7 +398,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
e->m_neigh.family = n->ops->family; e->m_neigh.family = n->ops->family;
memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
e->out_dev = out_dev; e->out_dev = out_dev;
e->route_dev = route_dev; e->route_dev_ifindex = route_dev->ifindex;
/* It's importent to add the neigh to the hash table before checking /* It's importent to add the neigh to the hash table before checking
* the neigh validity state. So if we'll get a notification, in case the * the neigh validity state. So if we'll get a notification, in case the
...@@ -433,7 +453,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, ...@@ -433,7 +453,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
e->flags |= MLX5_ENCAP_ENTRY_VALID; e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
neigh_release(n); mlx5e_route_lookup_ipv6_put(route_dev, n);
return err; return err;
destroy_neigh_entry: destroy_neigh_entry:
...@@ -441,7 +461,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, ...@@ -441,7 +461,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
free_encap: free_encap:
kfree(encap_header); kfree(encap_header);
release_neigh: release_neigh:
neigh_release(n); mlx5e_route_lookup_ipv6_put(route_dev, n);
return err; return err;
} }
#endif #endif
......
...@@ -122,9 +122,9 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c) ...@@ -122,9 +122,9 @@ void mlx5e_activate_xsk(struct mlx5e_channel *c)
set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state);
/* TX queue is created active. */ /* TX queue is created active. */
spin_lock(&c->async_icosq_lock); spin_lock_bh(&c->async_icosq_lock);
mlx5e_trigger_irq(&c->async_icosq); mlx5e_trigger_irq(&c->async_icosq);
spin_unlock(&c->async_icosq_lock); spin_unlock_bh(&c->async_icosq_lock);
} }
void mlx5e_deactivate_xsk(struct mlx5e_channel *c) void mlx5e_deactivate_xsk(struct mlx5e_channel *c)
......
...@@ -36,9 +36,9 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) ...@@ -36,9 +36,9 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state)) if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state))
return 0; return 0;
spin_lock(&c->async_icosq_lock); spin_lock_bh(&c->async_icosq_lock);
mlx5e_trigger_irq(&c->async_icosq); mlx5e_trigger_irq(&c->async_icosq);
spin_unlock(&c->async_icosq_lock); spin_unlock_bh(&c->async_icosq_lock);
} }
return 0; return 0;
......
...@@ -188,7 +188,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c, ...@@ -188,7 +188,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
err = 0; err = 0;
sq = &c->async_icosq; sq = &c->async_icosq;
spin_lock(&c->async_icosq_lock); spin_lock_bh(&c->async_icosq_lock);
cseg = post_static_params(sq, priv_rx); cseg = post_static_params(sq, priv_rx);
if (IS_ERR(cseg)) if (IS_ERR(cseg))
...@@ -199,7 +199,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c, ...@@ -199,7 +199,7 @@ static int post_rx_param_wqes(struct mlx5e_channel *c,
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
unlock: unlock:
spin_unlock(&c->async_icosq_lock); spin_unlock_bh(&c->async_icosq_lock);
return err; return err;
...@@ -265,10 +265,10 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq, ...@@ -265,10 +265,10 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1); BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1);
spin_lock(&sq->channel->async_icosq_lock); spin_lock_bh(&sq->channel->async_icosq_lock);
if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) { if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) {
spin_unlock(&sq->channel->async_icosq_lock); spin_unlock_bh(&sq->channel->async_icosq_lock);
err = -ENOSPC; err = -ENOSPC;
goto err_dma_unmap; goto err_dma_unmap;
} }
...@@ -299,7 +299,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq, ...@@ -299,7 +299,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
icosq_fill_wi(sq, pi, &wi); icosq_fill_wi(sq, pi, &wi);
sq->pc++; sq->pc++;
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
spin_unlock(&sq->channel->async_icosq_lock); spin_unlock_bh(&sq->channel->async_icosq_lock);
return 0; return 0;
...@@ -360,7 +360,7 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx ...@@ -360,7 +360,7 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
err = 0; err = 0;
sq = &c->async_icosq; sq = &c->async_icosq;
spin_lock(&c->async_icosq_lock); spin_lock_bh(&c->async_icosq_lock);
cseg = post_static_params(sq, priv_rx); cseg = post_static_params(sq, priv_rx);
if (IS_ERR(cseg)) { if (IS_ERR(cseg)) {
...@@ -372,7 +372,7 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx ...@@ -372,7 +372,7 @@ static int resync_handle_seq_match(struct mlx5e_ktls_offload_context_rx *priv_rx
mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
priv_rx->stats->tls_resync_res_ok++; priv_rx->stats->tls_resync_res_ok++;
unlock: unlock:
spin_unlock(&c->async_icosq_lock); spin_unlock_bh(&c->async_icosq_lock);
return err; return err;
} }
......
...@@ -5253,6 +5253,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) ...@@ -5253,6 +5253,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_disable_async_events(priv); mlx5e_disable_async_events(priv);
mlx5_lag_remove(mdev); mlx5_lag_remove(mdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
} }
int mlx5e_update_nic_rx(struct mlx5e_priv *priv) int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
......
...@@ -186,7 +186,7 @@ struct mlx5e_encap_entry { ...@@ -186,7 +186,7 @@ struct mlx5e_encap_entry {
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
struct net_device *out_dev; struct net_device *out_dev;
struct net_device *route_dev; int route_dev_ifindex;
struct mlx5e_tc_tunnel *tunnel; struct mlx5e_tc_tunnel *tunnel;
int reformat_type; int reformat_type;
u8 flags; u8 flags;
......
...@@ -1584,7 +1584,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) ...@@ -1584,7 +1584,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq))); } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
out: out:
if (rq->xdp_prog) if (rcu_access_pointer(rq->xdp_prog))
mlx5e_xdp_rx_poll_complete(rq); mlx5e_xdp_rx_poll_complete(rq);
mlx5_cqwq_update_db_record(cqwq); mlx5_cqwq_update_db_record(cqwq);
......
...@@ -4658,6 +4658,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -4658,6 +4658,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
return flow; return flow;
err_free: err_free:
dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
mlx5e_flow_put(priv, flow); mlx5e_flow_put(priv, flow);
out: out:
return ERR_PTR(err); return ERR_PTR(err);
...@@ -4802,6 +4803,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv, ...@@ -4802,6 +4803,7 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
return 0; return 0;
err_free: err_free:
dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
mlx5e_flow_put(priv, flow); mlx5e_flow_put(priv, flow);
out: out:
return err; return err;
......
...@@ -1902,8 +1902,6 @@ int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink, ...@@ -1902,8 +1902,6 @@ int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
ether_addr_copy(hw_addr, vport->info.mac); ether_addr_copy(hw_addr, vport->info.mac);
*hw_addr_len = ETH_ALEN; *hw_addr_len = ETH_ALEN;
err = 0; err = 0;
} else {
NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
} }
mutex_unlock(&esw->state_lock); mutex_unlock(&esw->state_lock);
return err; return err;
......
...@@ -2010,10 +2010,11 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) ...@@ -2010,10 +2010,11 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle)
down_write_ref_node(&fte->node, false); down_write_ref_node(&fte->node, false);
for (i = handle->num_rules - 1; i >= 0; i--) for (i = handle->num_rules - 1; i >= 0; i--)
tree_remove_node(&handle->rule[i]->node, true); tree_remove_node(&handle->rule[i]->node, true);
if (fte->modify_mask && fte->dests_size) { if (fte->dests_size) {
modify_fte(fte); if (fte->modify_mask)
modify_fte(fte);
up_write_ref_node(&fte->node, false); up_write_ref_node(&fte->node, false);
} else { } else if (list_empty(&fte->node.children)) {
del_hw_fte(&fte->node); del_hw_fte(&fte->node);
/* Avoid double call to del_hw_fte */ /* Avoid double call to del_hw_fte */
fte->node.del_hw_func = NULL; fte->node.del_hw_func = NULL;
......
...@@ -167,6 +167,17 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev) ...@@ -167,6 +167,17 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
} }
void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan)
{
if (!mlx5_vxlan_allowed(vxlan))
return;
mlx5_vxlan_del_port(vxlan, IANA_VXLAN_UDP_PORT);
WARN_ON(!hash_empty(vxlan->htable));
kfree(vxlan);
}
void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan)
{ {
struct mlx5_vxlan_port *vxlanp; struct mlx5_vxlan_port *vxlanp;
struct hlist_node *tmp; struct hlist_node *tmp;
...@@ -175,12 +186,12 @@ void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) ...@@ -175,12 +186,12 @@ void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan)
if (!mlx5_vxlan_allowed(vxlan)) if (!mlx5_vxlan_allowed(vxlan))
return; return;
/* Lockless since we are the only hash table consumers*/
hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) { hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) {
hash_del(&vxlanp->hlist); /* Don't delete default UDP port added by the HW.
mlx5_vxlan_core_del_port_cmd(vxlan->mdev, vxlanp->udp_port); * Remove only user configured ports
kfree(vxlanp); */
if (vxlanp->udp_port == IANA_VXLAN_UDP_PORT)
continue;
mlx5_vxlan_del_port(vxlan, vxlanp->udp_port);
} }
kfree(vxlan);
} }
...@@ -56,6 +56,7 @@ void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan); ...@@ -56,6 +56,7 @@ void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan);
int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port); int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port);
int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port); int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port);
bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port); bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port);
void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan);
#else #else
static inline struct mlx5_vxlan* static inline struct mlx5_vxlan*
mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-EOPNOTSUPP); } mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-EOPNOTSUPP); }
...@@ -63,6 +64,7 @@ static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; } ...@@ -63,6 +64,7 @@ static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; }
static inline int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; } static inline int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
static inline int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; } static inline int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
static inline bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { return false; } static inline bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { return false; }
static inline void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan) { return; }
#endif #endif
#endif /* __MLX5_VXLAN_H__ */ #endif /* __MLX5_VXLAN_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment