Commit b9f1bcb2 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2018-10-01' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2018-10-01

This pull request includes some fixes to mlx5 driver,
Please pull and let me know if there's any problem.

For -stable v4.11:
"6e0a4a23c59a ('net/mlx5: E-Switch, Fix out of bound access when setting vport rate')"

For -stable v4.18:
"98d6627c372a ('net/mlx5e: Set vlan masks for all offloaded TC rules')"
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 31c73cb5 cee26487
...@@ -54,6 +54,7 @@ ...@@ -54,6 +54,7 @@
#include "en_stats.h" #include "en_stats.h"
#include "en/fs.h" #include "en/fs.h"
extern const struct net_device_ops mlx5e_netdev_ops;
struct page_pool; struct page_pool;
#define MLX5E_METADATA_ETHER_TYPE (0x8CE4) #define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
......
...@@ -16,6 +16,8 @@ struct mlx5e_tc_table { ...@@ -16,6 +16,8 @@ struct mlx5e_tc_table {
DECLARE_HASHTABLE(mod_hdr_tbl, 8); DECLARE_HASHTABLE(mod_hdr_tbl, 8);
DECLARE_HASHTABLE(hairpin_tbl, 8); DECLARE_HASHTABLE(hairpin_tbl, 8);
struct notifier_block netdevice_nb;
}; };
struct mlx5e_flow_table { struct mlx5e_flow_table {
......
...@@ -4315,7 +4315,7 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp) ...@@ -4315,7 +4315,7 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
} }
} }
static const struct net_device_ops mlx5e_netdev_ops = { const struct net_device_ops mlx5e_netdev_ops = {
.ndo_open = mlx5e_open, .ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close, .ndo_stop = mlx5e_close,
.ndo_start_xmit = mlx5e_xmit, .ndo_start_xmit = mlx5e_xmit,
......
...@@ -1368,6 +1368,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, ...@@ -1368,6 +1368,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
*match_level = MLX5_MATCH_L2; *match_level = MLX5_MATCH_L2;
} }
} else {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
} }
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) { if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
...@@ -2946,14 +2949,71 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, ...@@ -2946,14 +2949,71 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
return 0; return 0;
} }
static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
struct mlx5e_priv *peer_priv)
{
struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
struct mlx5e_hairpin_entry *hpe;
u16 peer_vhca_id;
int bkt;
if (!same_hw_devs(priv, peer_priv))
return;
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
if (hpe->peer_vhca_id == peer_vhca_id)
hpe->hp->pair->peer_gone = true;
}
}
static int mlx5e_tc_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
struct mlx5e_flow_steering *fs;
struct mlx5e_priv *peer_priv;
struct mlx5e_tc_table *tc;
struct mlx5e_priv *priv;
if (ndev->netdev_ops != &mlx5e_netdev_ops ||
event != NETDEV_UNREGISTER ||
ndev->reg_state == NETREG_REGISTERED)
return NOTIFY_DONE;
tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
fs = container_of(tc, struct mlx5e_flow_steering, tc);
priv = container_of(fs, struct mlx5e_priv, fs);
peer_priv = netdev_priv(ndev);
if (priv == peer_priv ||
!(priv->netdev->features & NETIF_F_HW_TC))
return NOTIFY_DONE;
mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
return NOTIFY_DONE;
}
int mlx5e_tc_nic_init(struct mlx5e_priv *priv) int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{ {
struct mlx5e_tc_table *tc = &priv->fs.tc; struct mlx5e_tc_table *tc = &priv->fs.tc;
int err;
hash_init(tc->mod_hdr_tbl); hash_init(tc->mod_hdr_tbl);
hash_init(tc->hairpin_tbl); hash_init(tc->hairpin_tbl);
return rhashtable_init(&tc->ht, &tc_ht_params); err = rhashtable_init(&tc->ht, &tc_ht_params);
if (err)
return err;
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
if (register_netdevice_notifier(&tc->netdevice_nb)) {
tc->netdevice_nb.notifier_call = NULL;
mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
}
return err;
} }
static void _mlx5e_tc_del_flow(void *ptr, void *arg) static void _mlx5e_tc_del_flow(void *ptr, void *arg)
...@@ -2969,6 +3029,9 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) ...@@ -2969,6 +3029,9 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
{ {
struct mlx5e_tc_table *tc = &priv->fs.tc; struct mlx5e_tc_table *tc = &priv->fs.tc;
if (tc->netdevice_nb.notifier_call)
unregister_netdevice_notifier(&tc->netdevice_nb);
rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL); rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
if (!IS_ERR_OR_NULL(tc->t)) { if (!IS_ERR_OR_NULL(tc->t)) {
......
...@@ -2000,7 +2000,7 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw) ...@@ -2000,7 +2000,7 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
u32 max_guarantee = 0; u32 max_guarantee = 0;
int i; int i;
for (i = 0; i <= esw->total_vports; i++) { for (i = 0; i < esw->total_vports; i++) {
evport = &esw->vports[i]; evport = &esw->vports[i];
if (!evport->enabled || evport->info.min_rate < max_guarantee) if (!evport->enabled || evport->info.min_rate < max_guarantee)
continue; continue;
...@@ -2020,7 +2020,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider) ...@@ -2020,7 +2020,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
int err; int err;
int i; int i;
for (i = 0; i <= esw->total_vports; i++) { for (i = 0; i < esw->total_vports; i++) {
evport = &esw->vports[i]; evport = &esw->vports[i];
if (!evport->enabled) if (!evport->enabled)
continue; continue;
......
...@@ -475,7 +475,8 @@ static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp) ...@@ -475,7 +475,8 @@ static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
for (i = 0; i < hp->num_channels; i++) { for (i = 0; i < hp->num_channels; i++) {
mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]); mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]);
mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]); if (!hp->peer_gone)
mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
} }
} }
...@@ -567,6 +568,8 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp) ...@@ -567,6 +568,8 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
MLX5_RQC_STATE_RST, 0, 0); MLX5_RQC_STATE_RST, 0, 0);
/* unset peer SQs */ /* unset peer SQs */
if (hp->peer_gone)
return;
for (i = 0; i < hp->num_channels; i++) for (i = 0; i < hp->num_channels; i++)
mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY, mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
MLX5_SQC_STATE_RST, 0, 0); MLX5_SQC_STATE_RST, 0, 0);
......
...@@ -90,6 +90,8 @@ struct mlx5_hairpin { ...@@ -90,6 +90,8 @@ struct mlx5_hairpin {
u32 *rqn; u32 *rqn;
u32 *sqn; u32 *sqn;
bool peer_gone;
}; };
struct mlx5_hairpin * struct mlx5_hairpin *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment