Commit 540061ac authored by Eli Cohen's avatar Eli Cohen Committed by Michael S. Tsirkin

vdpa/mlx5: Forward only packets with allowed MAC address

Add rules to forward packets to the net device's TIR only if the
destination MAC is equal to the configured MAC. This is required to
prevent the netdevice from receiving traffic not destined to its
configured MAC.
Signed-off-by: default avatarEli Cohen <elic@nvidia.com>
Reviewed-by: default avatarParav Pandit <parav@nvidia.com>
Link: https://lore.kernel.org/r/20211026175519.87795-9-parav@nvidia.comSigned-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
Acked-by: default avatarJason Wang <jasowang@redhat.com>
parent a007d940
...@@ -158,7 +158,8 @@ struct mlx5_vdpa_net { ...@@ -158,7 +158,8 @@ struct mlx5_vdpa_net {
struct mutex reslock; struct mutex reslock;
struct mlx5_flow_table *rxft; struct mlx5_flow_table *rxft;
struct mlx5_fc *rx_counter; struct mlx5_fc *rx_counter;
struct mlx5_flow_handle *rx_rule; struct mlx5_flow_handle *rx_rule_ucast;
struct mlx5_flow_handle *rx_rule_mcast;
bool setup; bool setup;
u32 cur_num_vqs; u32 cur_num_vqs;
struct notifier_block nb; struct notifier_block nb;
...@@ -1383,21 +1384,33 @@ static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev) ...@@ -1383,21 +1384,33 @@ static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev)
struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_act flow_act = {}; struct mlx5_flow_act flow_act = {};
struct mlx5_flow_namespace *ns; struct mlx5_flow_namespace *ns;
struct mlx5_flow_spec *spec;
void *headers_c;
void *headers_v;
u8 *dmac_c;
u8 *dmac_v;
int err; int err;
/* for now, one entry, match all, forward to tir */ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
ft_attr.max_fte = 1; if (!spec)
ft_attr.autogroup.max_num_groups = 1; return -ENOMEM;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
ft_attr.max_fte = 2;
ft_attr.autogroup.max_num_groups = 2;
ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS); ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
if (!ns) { if (!ns) {
mlx5_vdpa_warn(&ndev->mvdev, "get flow namespace\n"); mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n");
return -EOPNOTSUPP; err = -EOPNOTSUPP;
goto err_ns;
} }
ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (IS_ERR(ndev->rxft)) if (IS_ERR(ndev->rxft)) {
return PTR_ERR(ndev->rxft); err = PTR_ERR(ndev->rxft);
goto err_ns;
}
ndev->rx_counter = mlx5_fc_create(ndev->mvdev.mdev, false); ndev->rx_counter = mlx5_fc_create(ndev->mvdev.mdev, false);
if (IS_ERR(ndev->rx_counter)) { if (IS_ERR(ndev->rx_counter)) {
...@@ -1405,37 +1418,64 @@ static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev) ...@@ -1405,37 +1418,64 @@ static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev)
goto err_fc; goto err_fc;
} }
headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, outer_headers.dmac_47_16);
memset(dmac_c, 0xff, ETH_ALEN);
headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16);
ether_addr_copy(dmac_v, ndev->config.mac);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_TIR; dest[0].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest[0].tir_num = ndev->res.tirn; dest[0].tir_num = ndev->res.tirn;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[1].counter_id = mlx5_fc_id(ndev->rx_counter); dest[1].counter_id = mlx5_fc_id(ndev->rx_counter);
ndev->rx_rule = mlx5_add_flow_rules(ndev->rxft, NULL, &flow_act, dest, 2); ndev->rx_rule_ucast = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dest, 2);
if (IS_ERR(ndev->rx_rule)) {
err = PTR_ERR(ndev->rx_rule); if (IS_ERR(ndev->rx_rule_ucast)) {
ndev->rx_rule = NULL; err = PTR_ERR(ndev->rx_rule_ucast);
goto err_rule; ndev->rx_rule_ucast = NULL;
goto err_rule_ucast;
}
memset(dmac_c, 0, ETH_ALEN);
memset(dmac_v, 0, ETH_ALEN);
dmac_c[0] = 1;
dmac_v[0] = 1;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
ndev->rx_rule_mcast = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dest, 1);
if (IS_ERR(ndev->rx_rule_mcast)) {
err = PTR_ERR(ndev->rx_rule_mcast);
ndev->rx_rule_mcast = NULL;
goto err_rule_mcast;
} }
kvfree(spec);
return 0; return 0;
err_rule: err_rule_mcast:
mlx5_del_flow_rules(ndev->rx_rule_ucast);
ndev->rx_rule_ucast = NULL;
err_rule_ucast:
mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter); mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
err_fc: err_fc:
mlx5_destroy_flow_table(ndev->rxft); mlx5_destroy_flow_table(ndev->rxft);
err_ns:
kvfree(spec);
return err; return err;
} }
static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev) static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev)
{ {
if (!ndev->rx_rule) if (!ndev->rx_rule_ucast)
return; return;
mlx5_del_flow_rules(ndev->rx_rule); mlx5_del_flow_rules(ndev->rx_rule_mcast);
ndev->rx_rule_mcast = NULL;
mlx5_del_flow_rules(ndev->rx_rule_ucast);
ndev->rx_rule_ucast = NULL;
mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter); mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
mlx5_destroy_flow_table(ndev->rxft); mlx5_destroy_flow_table(ndev->rxft);
ndev->rx_rule = NULL;
} }
static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd) static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment