Commit 74491de9 authored by Mark Bloch's avatar Mark Bloch Committed by Leon Romanovsky

net/mlx5: Add multi dest support

Currently when calling mlx5_add_flow_rule we accept
only one flow destination, this commit allows to pass
multiple destinations.

This change forces us to change the return structure to a more
flexible one. We introduce a flow handle (struct mlx5_flow_handle),
it holds internally the number for rules created and holds an array
where each cell points the to a flow rule.

From the consumers (of mlx5_add_flow_rule) point of view this
change is only cosmetic and requires only to change the type
of the returned value they store.

From the core point of view, we now need to use a loop when
allocating and deleting rules (e.g given to us a flow handler).
Signed-off-by: default avatarMark Bloch <markb@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent a6224985
...@@ -1771,13 +1771,13 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id) ...@@ -1771,13 +1771,13 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
mutex_lock(&dev->flow_db.lock); mutex_lock(&dev->flow_db.lock);
list_for_each_entry_safe(iter, tmp, &handler->list, list) { list_for_each_entry_safe(iter, tmp, &handler->list, list) {
mlx5_del_flow_rule(iter->rule); mlx5_del_flow_rules(iter->rule);
put_flow_table(dev, iter->prio, true); put_flow_table(dev, iter->prio, true);
list_del(&iter->list); list_del(&iter->list);
kfree(iter); kfree(iter);
} }
mlx5_del_flow_rule(handler->rule); mlx5_del_flow_rules(handler->rule);
put_flow_table(dev, handler->prio, true); put_flow_table(dev, handler->prio, true);
mutex_unlock(&dev->flow_db.lock); mutex_unlock(&dev->flow_db.lock);
...@@ -1907,10 +1907,10 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, ...@@ -1907,10 +1907,10 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST :
MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO; MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO;
handler->rule = mlx5_add_flow_rule(ft, spec, handler->rule = mlx5_add_flow_rules(ft, spec,
action, action,
MLX5_FS_DEFAULT_FLOW_TAG, MLX5_FS_DEFAULT_FLOW_TAG,
dst); dst, 1);
if (IS_ERR(handler->rule)) { if (IS_ERR(handler->rule)) {
err = PTR_ERR(handler->rule); err = PTR_ERR(handler->rule);
...@@ -1941,7 +1941,7 @@ static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *de ...@@ -1941,7 +1941,7 @@ static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *de
handler_dst = create_flow_rule(dev, ft_prio, handler_dst = create_flow_rule(dev, ft_prio,
flow_attr, dst); flow_attr, dst);
if (IS_ERR(handler_dst)) { if (IS_ERR(handler_dst)) {
mlx5_del_flow_rule(handler->rule); mlx5_del_flow_rules(handler->rule);
ft_prio->refcount--; ft_prio->refcount--;
kfree(handler); kfree(handler);
handler = handler_dst; handler = handler_dst;
...@@ -2004,7 +2004,7 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de ...@@ -2004,7 +2004,7 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de
&leftovers_specs[LEFTOVERS_UC].flow_attr, &leftovers_specs[LEFTOVERS_UC].flow_attr,
dst); dst);
if (IS_ERR(handler_ucast)) { if (IS_ERR(handler_ucast)) {
mlx5_del_flow_rule(handler->rule); mlx5_del_flow_rules(handler->rule);
ft_prio->refcount--; ft_prio->refcount--;
kfree(handler); kfree(handler);
handler = handler_ucast; handler = handler_ucast;
...@@ -2046,7 +2046,7 @@ static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev, ...@@ -2046,7 +2046,7 @@ static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
return handler_rx; return handler_rx;
err_tx: err_tx:
mlx5_del_flow_rule(handler_rx->rule); mlx5_del_flow_rules(handler_rx->rule);
ft_rx->refcount--; ft_rx->refcount--;
kfree(handler_rx); kfree(handler_rx);
err: err:
......
...@@ -153,7 +153,7 @@ struct mlx5_ib_flow_handler { ...@@ -153,7 +153,7 @@ struct mlx5_ib_flow_handler {
struct list_head list; struct list_head list;
struct ib_flow ibflow; struct ib_flow ibflow;
struct mlx5_ib_flow_prio *prio; struct mlx5_ib_flow_prio *prio;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
}; };
struct mlx5_ib_flow_db { struct mlx5_ib_flow_db {
......
...@@ -520,7 +520,7 @@ struct mlx5e_vxlan_db { ...@@ -520,7 +520,7 @@ struct mlx5e_vxlan_db {
struct mlx5e_l2_rule { struct mlx5e_l2_rule {
u8 addr[ETH_ALEN + 2]; u8 addr[ETH_ALEN + 2];
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
}; };
struct mlx5e_flow_table { struct mlx5e_flow_table {
...@@ -541,10 +541,10 @@ struct mlx5e_tc_table { ...@@ -541,10 +541,10 @@ struct mlx5e_tc_table {
struct mlx5e_vlan_table { struct mlx5e_vlan_table {
struct mlx5e_flow_table ft; struct mlx5e_flow_table ft;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct mlx5_flow_rule *active_vlans_rule[VLAN_N_VID]; struct mlx5_flow_handle *active_vlans_rule[VLAN_N_VID];
struct mlx5_flow_rule *untagged_rule; struct mlx5_flow_handle *untagged_rule;
struct mlx5_flow_rule *any_vlan_rule; struct mlx5_flow_handle *any_vlan_rule;
bool filter_disabled; bool filter_disabled;
}; };
struct mlx5e_l2_table { struct mlx5e_l2_table {
...@@ -562,14 +562,14 @@ struct mlx5e_l2_table { ...@@ -562,14 +562,14 @@ struct mlx5e_l2_table {
/* L3/L4 traffic type classifier */ /* L3/L4 traffic type classifier */
struct mlx5e_ttc_table { struct mlx5e_ttc_table {
struct mlx5e_flow_table ft; struct mlx5e_flow_table ft;
struct mlx5_flow_rule *rules[MLX5E_NUM_TT]; struct mlx5_flow_handle *rules[MLX5E_NUM_TT];
}; };
#define ARFS_HASH_SHIFT BITS_PER_BYTE #define ARFS_HASH_SHIFT BITS_PER_BYTE
#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE) #define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
struct arfs_table { struct arfs_table {
struct mlx5e_flow_table ft; struct mlx5e_flow_table ft;
struct mlx5_flow_rule *default_rule; struct mlx5_flow_handle *default_rule;
struct hlist_head rules_hash[ARFS_HASH_SIZE]; struct hlist_head rules_hash[ARFS_HASH_SIZE];
}; };
......
...@@ -56,7 +56,7 @@ struct arfs_tuple { ...@@ -56,7 +56,7 @@ struct arfs_tuple {
struct arfs_rule { struct arfs_rule {
struct mlx5e_priv *priv; struct mlx5e_priv *priv;
struct work_struct arfs_work; struct work_struct arfs_work;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
struct hlist_node hlist; struct hlist_node hlist;
int rxq; int rxq;
/* Flow ID passed to ndo_rx_flow_steer */ /* Flow ID passed to ndo_rx_flow_steer */
...@@ -104,7 +104,7 @@ static int arfs_disable(struct mlx5e_priv *priv) ...@@ -104,7 +104,7 @@ static int arfs_disable(struct mlx5e_priv *priv)
tt = arfs_get_tt(i); tt = arfs_get_tt(i);
/* Modify ttc rules destination to bypass the aRFS tables*/ /* Modify ttc rules destination to bypass the aRFS tables*/
err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt], err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
&dest); &dest, NULL);
if (err) { if (err) {
netdev_err(priv->netdev, netdev_err(priv->netdev,
"%s: modify ttc destination failed\n", "%s: modify ttc destination failed\n",
...@@ -137,7 +137,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv) ...@@ -137,7 +137,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)
tt = arfs_get_tt(i); tt = arfs_get_tt(i);
/* Modify ttc rules destination to point on the aRFS FTs */ /* Modify ttc rules destination to point on the aRFS FTs */
err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt], err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
&dest); &dest, NULL);
if (err) { if (err) {
netdev_err(priv->netdev, netdev_err(priv->netdev,
"%s: modify ttc destination failed err=%d\n", "%s: modify ttc destination failed err=%d\n",
...@@ -151,7 +151,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv) ...@@ -151,7 +151,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)
static void arfs_destroy_table(struct arfs_table *arfs_t) static void arfs_destroy_table(struct arfs_table *arfs_t)
{ {
mlx5_del_flow_rule(arfs_t->default_rule); mlx5_del_flow_rules(arfs_t->default_rule);
mlx5e_destroy_flow_table(&arfs_t->ft); mlx5e_destroy_flow_table(&arfs_t->ft);
} }
...@@ -205,10 +205,10 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, ...@@ -205,10 +205,10 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
goto out; goto out;
} }
arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, spec, arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, MLX5_FS_DEFAULT_FLOW_TAG,
&dest); &dest, 1);
if (IS_ERR(arfs_t->default_rule)) { if (IS_ERR(arfs_t->default_rule)) {
err = PTR_ERR(arfs_t->default_rule); err = PTR_ERR(arfs_t->default_rule);
arfs_t->default_rule = NULL; arfs_t->default_rule = NULL;
...@@ -396,7 +396,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv) ...@@ -396,7 +396,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
spin_unlock_bh(&priv->fs.arfs.arfs_lock); spin_unlock_bh(&priv->fs.arfs.arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) { hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
if (arfs_rule->rule) if (arfs_rule->rule)
mlx5_del_flow_rule(arfs_rule->rule); mlx5_del_flow_rules(arfs_rule->rule);
hlist_del(&arfs_rule->hlist); hlist_del(&arfs_rule->hlist);
kfree(arfs_rule); kfree(arfs_rule);
} }
...@@ -420,7 +420,7 @@ static void arfs_del_rules(struct mlx5e_priv *priv) ...@@ -420,7 +420,7 @@ static void arfs_del_rules(struct mlx5e_priv *priv)
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) { hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
cancel_work_sync(&rule->arfs_work); cancel_work_sync(&rule->arfs_work);
if (rule->rule) if (rule->rule)
mlx5_del_flow_rule(rule->rule); mlx5_del_flow_rules(rule->rule);
hlist_del(&rule->hlist); hlist_del(&rule->hlist);
kfree(rule); kfree(rule);
} }
...@@ -462,12 +462,12 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, ...@@ -462,12 +462,12 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
return NULL; return NULL;
} }
static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule) struct arfs_rule *arfs_rule)
{ {
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs; struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple; struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_rule *rule = NULL; struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct arfs_table *arfs_table; struct arfs_table *arfs_table;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
...@@ -544,9 +544,9 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, ...@@ -544,9 +544,9 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
} }
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn; dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
rule = mlx5_add_flow_rule(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, rule = mlx5_add_flow_rules(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, MLX5_FS_DEFAULT_FLOW_TAG,
&dest); &dest, 1);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n", netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
...@@ -559,14 +559,14 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv, ...@@ -559,14 +559,14 @@ static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
} }
static void arfs_modify_rule_rq(struct mlx5e_priv *priv, static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
struct mlx5_flow_rule *rule, u16 rxq) struct mlx5_flow_handle *rule, u16 rxq)
{ {
struct mlx5_flow_destination dst; struct mlx5_flow_destination dst;
int err = 0; int err = 0;
dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dst.tir_num = priv->direct_tir[rxq].tirn; dst.tir_num = priv->direct_tir[rxq].tirn;
err = mlx5_modify_rule_destination(rule, &dst); err = mlx5_modify_rule_destination(rule, &dst, NULL);
if (err) if (err)
netdev_warn(priv->netdev, netdev_warn(priv->netdev,
"Failed to modfiy aRFS rule destination to rq=%d\n", rxq); "Failed to modfiy aRFS rule destination to rq=%d\n", rxq);
...@@ -578,7 +578,7 @@ static void arfs_handle_work(struct work_struct *work) ...@@ -578,7 +578,7 @@ static void arfs_handle_work(struct work_struct *work)
struct arfs_rule, struct arfs_rule,
arfs_work); arfs_work);
struct mlx5e_priv *priv = arfs_rule->priv; struct mlx5e_priv *priv = arfs_rule->priv;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
......
...@@ -160,7 +160,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, ...@@ -160,7 +160,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
{ {
struct mlx5_flow_table *ft = priv->fs.vlan.ft.t; struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_rule **rule_p; struct mlx5_flow_handle **rule_p;
int err = 0; int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
...@@ -187,10 +187,10 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, ...@@ -187,10 +187,10 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
break; break;
} }
*rule_p = mlx5_add_flow_rule(ft, spec, *rule_p = mlx5_add_flow_rules(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, MLX5_FS_DEFAULT_FLOW_TAG,
&dest); &dest, 1);
if (IS_ERR(*rule_p)) { if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p); err = PTR_ERR(*rule_p);
...@@ -229,20 +229,20 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, ...@@ -229,20 +229,20 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
switch (rule_type) { switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED: case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
if (priv->fs.vlan.untagged_rule) { if (priv->fs.vlan.untagged_rule) {
mlx5_del_flow_rule(priv->fs.vlan.untagged_rule); mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
priv->fs.vlan.untagged_rule = NULL; priv->fs.vlan.untagged_rule = NULL;
} }
break; break;
case MLX5E_VLAN_RULE_TYPE_ANY_VID: case MLX5E_VLAN_RULE_TYPE_ANY_VID:
if (priv->fs.vlan.any_vlan_rule) { if (priv->fs.vlan.any_vlan_rule) {
mlx5_del_flow_rule(priv->fs.vlan.any_vlan_rule); mlx5_del_flow_rules(priv->fs.vlan.any_vlan_rule);
priv->fs.vlan.any_vlan_rule = NULL; priv->fs.vlan.any_vlan_rule = NULL;
} }
break; break;
case MLX5E_VLAN_RULE_TYPE_MATCH_VID: case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
mlx5e_vport_context_update_vlans(priv); mlx5e_vport_context_update_vlans(priv);
if (priv->fs.vlan.active_vlans_rule[vid]) { if (priv->fs.vlan.active_vlans_rule[vid]) {
mlx5_del_flow_rule(priv->fs.vlan.active_vlans_rule[vid]); mlx5_del_flow_rules(priv->fs.vlan.active_vlans_rule[vid]);
priv->fs.vlan.active_vlans_rule[vid] = NULL; priv->fs.vlan.active_vlans_rule[vid] = NULL;
} }
mlx5e_vport_context_update_vlans(priv); mlx5e_vport_context_update_vlans(priv);
...@@ -560,7 +560,7 @@ static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc) ...@@ -560,7 +560,7 @@ static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
for (i = 0; i < MLX5E_NUM_TT; i++) { for (i = 0; i < MLX5E_NUM_TT; i++) {
if (!IS_ERR_OR_NULL(ttc->rules[i])) { if (!IS_ERR_OR_NULL(ttc->rules[i])) {
mlx5_del_flow_rule(ttc->rules[i]); mlx5_del_flow_rules(ttc->rules[i]);
ttc->rules[i] = NULL; ttc->rules[i] = NULL;
} }
} }
...@@ -616,13 +616,14 @@ static struct { ...@@ -616,13 +616,14 @@ static struct {
}, },
}; };
static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, static struct mlx5_flow_handle *
struct mlx5_flow_table *ft, mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
struct mlx5_flow_destination *dest, struct mlx5_flow_table *ft,
u16 etype, struct mlx5_flow_destination *dest,
u8 proto) u16 etype,
u8 proto)
{ {
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
int err = 0; int err = 0;
...@@ -643,10 +644,10 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, ...@@ -643,10 +644,10 @@ static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype); MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
} }
rule = mlx5_add_flow_rule(ft, spec, rule = mlx5_add_flow_rules(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, MLX5_FS_DEFAULT_FLOW_TAG,
dest); dest, 1);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: add rule failed\n", __func__); netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
...@@ -660,7 +661,7 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv) ...@@ -660,7 +661,7 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
{ {
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5e_ttc_table *ttc; struct mlx5e_ttc_table *ttc;
struct mlx5_flow_rule **rules; struct mlx5_flow_handle **rules;
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
int tt; int tt;
int err; int err;
...@@ -801,7 +802,7 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, ...@@ -801,7 +802,7 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai) struct mlx5e_l2_rule *ai)
{ {
if (!IS_ERR_OR_NULL(ai->rule)) { if (!IS_ERR_OR_NULL(ai->rule)) {
mlx5_del_flow_rule(ai->rule); mlx5_del_flow_rules(ai->rule);
ai->rule = NULL; ai->rule = NULL;
} }
} }
...@@ -847,9 +848,9 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, ...@@ -847,9 +848,9 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
break; break;
} }
ai->rule = mlx5_add_flow_rule(ft, spec, ai->rule = mlx5_add_flow_rules(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG, &dest); MLX5_FS_DEFAULT_FLOW_TAG, &dest, 1);
if (IS_ERR(ai->rule)) { if (IS_ERR(ai->rule)) {
netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n", netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
__func__, mv_dmac); __func__, mv_dmac);
......
...@@ -36,7 +36,7 @@ ...@@ -36,7 +36,7 @@
struct mlx5e_ethtool_rule { struct mlx5e_ethtool_rule {
struct list_head list; struct list_head list;
struct ethtool_rx_flow_spec flow_spec; struct ethtool_rx_flow_spec flow_spec;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
struct mlx5e_ethtool_table *eth_ft; struct mlx5e_ethtool_table *eth_ft;
}; };
...@@ -284,13 +284,14 @@ static bool outer_header_zero(u32 *match_criteria) ...@@ -284,13 +284,14 @@ static bool outer_header_zero(u32 *match_criteria)
size - 1); size - 1);
} }
static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv, static struct mlx5_flow_handle *
struct mlx5_flow_table *ft, add_ethtool_flow_rule(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs) struct mlx5_flow_table *ft,
struct ethtool_rx_flow_spec *fs)
{ {
struct mlx5_flow_destination *dst = NULL; struct mlx5_flow_destination *dst = NULL;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
int err = 0; int err = 0;
u32 action; u32 action;
...@@ -317,8 +318,8 @@ static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv, ...@@ -317,8 +318,8 @@ static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv,
} }
spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria)); spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
rule = mlx5_add_flow_rule(ft, spec, action, rule = mlx5_add_flow_rules(ft, spec, action,
MLX5_FS_DEFAULT_FLOW_TAG, dst); MLX5_FS_DEFAULT_FLOW_TAG, dst, 1);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n", netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
...@@ -335,7 +336,7 @@ static void del_ethtool_rule(struct mlx5e_priv *priv, ...@@ -335,7 +336,7 @@ static void del_ethtool_rule(struct mlx5e_priv *priv,
struct mlx5e_ethtool_rule *eth_rule) struct mlx5e_ethtool_rule *eth_rule)
{ {
if (eth_rule->rule) if (eth_rule->rule)
mlx5_del_flow_rule(eth_rule->rule); mlx5_del_flow_rules(eth_rule->rule);
list_del(&eth_rule->list); list_del(&eth_rule->list);
priv->fs.ethtool.tot_num_rules--; priv->fs.ethtool.tot_num_rules--;
put_flow_table(eth_rule->eth_ft); put_flow_table(eth_rule->eth_ft);
...@@ -475,7 +476,7 @@ int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, ...@@ -475,7 +476,7 @@ int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
{ {
struct mlx5e_ethtool_table *eth_ft; struct mlx5e_ethtool_table *eth_ft;
struct mlx5e_ethtool_rule *eth_rule; struct mlx5e_ethtool_rule *eth_rule;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
int num_tuples; int num_tuples;
int err; int err;
......
...@@ -328,7 +328,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) ...@@ -328,7 +328,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_eswitch_rep *rep = priv->ppriv; struct mlx5_eswitch_rep *rep = priv->ppriv;
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_flow_rule *flow_rule; struct mlx5_flow_handle *flow_rule;
int err; int err;
int i; int i;
...@@ -360,7 +360,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) ...@@ -360,7 +360,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
return 0; return 0;
err_del_flow_rule: err_del_flow_rule:
mlx5_del_flow_rule(rep->vport_rx_rule); mlx5_del_flow_rules(rep->vport_rx_rule);
err_destroy_direct_tirs: err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_direct_tirs(priv);
err_destroy_direct_rqts: err_destroy_direct_rqts:
...@@ -375,7 +375,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) ...@@ -375,7 +375,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
int i; int i;
mlx5e_tc_cleanup(priv); mlx5e_tc_cleanup(priv);
mlx5_del_flow_rule(rep->vport_rx_rule); mlx5_del_flow_rules(rep->vport_rx_rule);
mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_direct_tirs(priv);
for (i = 0; i < priv->params.num_channels; i++) for (i = 0; i < priv->params.num_channels; i++)
mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt); mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
......
...@@ -47,21 +47,22 @@ ...@@ -47,21 +47,22 @@
struct mlx5e_tc_flow { struct mlx5e_tc_flow {
struct rhash_head node; struct rhash_head node;
u64 cookie; u64 cookie;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
struct mlx5_esw_flow_attr *attr; struct mlx5_esw_flow_attr *attr;
}; };
#define MLX5E_TC_TABLE_NUM_ENTRIES 1024 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
#define MLX5E_TC_TABLE_NUM_GROUPS 4 #define MLX5E_TC_TABLE_NUM_GROUPS 4
static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, static struct mlx5_flow_handle *
struct mlx5_flow_spec *spec, mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
u32 action, u32 flow_tag) struct mlx5_flow_spec *spec,
u32 action, u32 flow_tag)
{ {
struct mlx5_core_dev *dev = priv->mdev; struct mlx5_core_dev *dev = priv->mdev;
struct mlx5_flow_destination dest = { 0 }; struct mlx5_flow_destination dest = { 0 };
struct mlx5_fc *counter = NULL; struct mlx5_fc *counter = NULL;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
bool table_created = false; bool table_created = false;
if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
...@@ -94,9 +95,9 @@ static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, ...@@ -94,9 +95,9 @@ static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
} }
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
rule = mlx5_add_flow_rule(priv->fs.tc.t, spec, rule = mlx5_add_flow_rules(priv->fs.tc.t, spec,
action, flow_tag, action, flow_tag,
&dest); &dest, 1);
if (IS_ERR(rule)) if (IS_ERR(rule))
goto err_add_rule; goto err_add_rule;
...@@ -114,9 +115,10 @@ static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, ...@@ -114,9 +115,10 @@ static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
return rule; return rule;
} }
static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, static struct mlx5_flow_handle *
struct mlx5_flow_spec *spec, mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr) struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
int err; int err;
...@@ -129,7 +131,7 @@ static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -129,7 +131,7 @@ static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
} }
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5_flow_rule *rule, struct mlx5_flow_handle *rule,
struct mlx5_esw_flow_attr *attr) struct mlx5_esw_flow_attr *attr)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
...@@ -140,7 +142,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, ...@@ -140,7 +142,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
if (esw && esw->mode == SRIOV_OFFLOADS) if (esw && esw->mode == SRIOV_OFFLOADS)
mlx5_eswitch_del_vlan_action(esw, attr); mlx5_eswitch_del_vlan_action(esw, attr);
mlx5_del_flow_rule(rule); mlx5_del_flow_rules(rule);
mlx5_fc_destroy(priv->mdev, counter); mlx5_fc_destroy(priv->mdev, counter);
...@@ -450,7 +452,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, ...@@ -450,7 +452,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
u32 flow_tag, action; u32 flow_tag, action;
struct mlx5e_tc_flow *flow; struct mlx5e_tc_flow *flow;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
struct mlx5_flow_rule *old = NULL; struct mlx5_flow_handle *old = NULL;
struct mlx5_esw_flow_attr *old_attr = NULL; struct mlx5_esw_flow_attr *old_attr = NULL;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
...@@ -511,7 +513,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, ...@@ -511,7 +513,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
goto out; goto out;
err_del_rule: err_del_rule:
mlx5_del_flow_rule(flow->rule); mlx5_del_flow_rules(flow->rule);
err_free: err_free:
if (!old) if (!old)
......
...@@ -56,7 +56,7 @@ struct esw_uc_addr { ...@@ -56,7 +56,7 @@ struct esw_uc_addr {
/* E-Switch MC FDB table hash node */ /* E-Switch MC FDB table hash node */
struct esw_mc_addr { /* SRIOV only */ struct esw_mc_addr { /* SRIOV only */
struct l2addr_node node; struct l2addr_node node;
struct mlx5_flow_rule *uplink_rule; /* Forward to uplink rule */ struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
u32 refcnt; u32 refcnt;
}; };
...@@ -65,7 +65,7 @@ struct vport_addr { ...@@ -65,7 +65,7 @@ struct vport_addr {
struct l2addr_node node; struct l2addr_node node;
u8 action; u8 action;
u32 vport; u32 vport;
struct mlx5_flow_rule *flow_rule; /* SRIOV only */ struct mlx5_flow_handle *flow_rule; /* SRIOV only */
/* A flag indicating that mac was added due to mc promiscuous vport */ /* A flag indicating that mac was added due to mc promiscuous vport */
bool mc_promisc; bool mc_promisc;
}; };
...@@ -237,13 +237,13 @@ static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index) ...@@ -237,13 +237,13 @@ static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
} }
/* E-Switch FDB */ /* E-Switch FDB */
static struct mlx5_flow_rule * static struct mlx5_flow_handle *
__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN]) u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
{ {
int match_header = (is_zero_ether_addr(mac_c) ? 0 : int match_header = (is_zero_ether_addr(mac_c) ? 0 :
MLX5_MATCH_OUTER_HEADERS); MLX5_MATCH_OUTER_HEADERS);
struct mlx5_flow_rule *flow_rule = NULL; struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
void *mv_misc = NULL; void *mv_misc = NULL;
...@@ -286,9 +286,9 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, ...@@ -286,9 +286,9 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
dmac_v, dmac_c, vport); dmac_v, dmac_c, vport);
spec->match_criteria_enable = match_header; spec->match_criteria_enable = match_header;
flow_rule = flow_rule =
mlx5_add_flow_rule(esw->fdb_table.fdb, spec, mlx5_add_flow_rules(esw->fdb_table.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest); 0, &dest, 1);
if (IS_ERR(flow_rule)) { if (IS_ERR(flow_rule)) {
esw_warn(esw->dev, esw_warn(esw->dev,
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
...@@ -300,7 +300,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, ...@@ -300,7 +300,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
return flow_rule; return flow_rule;
} }
static struct mlx5_flow_rule * static struct mlx5_flow_handle *
esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
{ {
u8 mac_c[ETH_ALEN]; u8 mac_c[ETH_ALEN];
...@@ -309,7 +309,7 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) ...@@ -309,7 +309,7 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac); return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
} }
static struct mlx5_flow_rule * static struct mlx5_flow_handle *
esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport) esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
{ {
u8 mac_c[ETH_ALEN]; u8 mac_c[ETH_ALEN];
...@@ -322,7 +322,7 @@ esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport) ...@@ -322,7 +322,7 @@ esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v); return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
} }
static struct mlx5_flow_rule * static struct mlx5_flow_handle *
esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport) esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
{ {
u8 mac_c[ETH_ALEN]; u8 mac_c[ETH_ALEN];
...@@ -515,7 +515,7 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) ...@@ -515,7 +515,7 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
del_l2_table_entry(esw->dev, esw_uc->table_index); del_l2_table_entry(esw->dev, esw_uc->table_index);
if (vaddr->flow_rule) if (vaddr->flow_rule)
mlx5_del_flow_rule(vaddr->flow_rule); mlx5_del_flow_rules(vaddr->flow_rule);
vaddr->flow_rule = NULL; vaddr->flow_rule = NULL;
l2addr_hash_del(esw_uc); l2addr_hash_del(esw_uc);
...@@ -562,7 +562,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw, ...@@ -562,7 +562,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw,
case MLX5_ACTION_DEL: case MLX5_ACTION_DEL:
if (!iter_vaddr) if (!iter_vaddr)
continue; continue;
mlx5_del_flow_rule(iter_vaddr->flow_rule); mlx5_del_flow_rules(iter_vaddr->flow_rule);
l2addr_hash_del(iter_vaddr); l2addr_hash_del(iter_vaddr);
break; break;
} }
...@@ -632,7 +632,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) ...@@ -632,7 +632,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
esw_mc->uplink_rule); esw_mc->uplink_rule);
if (vaddr->flow_rule) if (vaddr->flow_rule)
mlx5_del_flow_rule(vaddr->flow_rule); mlx5_del_flow_rules(vaddr->flow_rule);
vaddr->flow_rule = NULL; vaddr->flow_rule = NULL;
/* If the multicast mac is added as a result of mc promiscuous vport, /* If the multicast mac is added as a result of mc promiscuous vport,
...@@ -645,7 +645,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) ...@@ -645,7 +645,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
update_allmulti_vports(esw, vaddr, esw_mc); update_allmulti_vports(esw, vaddr, esw_mc);
if (esw_mc->uplink_rule) if (esw_mc->uplink_rule)
mlx5_del_flow_rule(esw_mc->uplink_rule); mlx5_del_flow_rules(esw_mc->uplink_rule);
l2addr_hash_del(esw_mc); l2addr_hash_del(esw_mc);
return 0; return 0;
...@@ -828,14 +828,14 @@ static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num, ...@@ -828,14 +828,14 @@ static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
UPLINK_VPORT); UPLINK_VPORT);
allmulti_addr->refcnt++; allmulti_addr->refcnt++;
} else if (vport->allmulti_rule) { } else if (vport->allmulti_rule) {
mlx5_del_flow_rule(vport->allmulti_rule); mlx5_del_flow_rules(vport->allmulti_rule);
vport->allmulti_rule = NULL; vport->allmulti_rule = NULL;
if (--allmulti_addr->refcnt > 0) if (--allmulti_addr->refcnt > 0)
goto promisc; goto promisc;
if (allmulti_addr->uplink_rule) if (allmulti_addr->uplink_rule)
mlx5_del_flow_rule(allmulti_addr->uplink_rule); mlx5_del_flow_rules(allmulti_addr->uplink_rule);
allmulti_addr->uplink_rule = NULL; allmulti_addr->uplink_rule = NULL;
} }
...@@ -847,7 +847,7 @@ static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num, ...@@ -847,7 +847,7 @@ static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw, vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
vport_num); vport_num);
} else if (vport->promisc_rule) { } else if (vport->promisc_rule) {
mlx5_del_flow_rule(vport->promisc_rule); mlx5_del_flow_rules(vport->promisc_rule);
vport->promisc_rule = NULL; vport->promisc_rule = NULL;
} }
} }
...@@ -1015,10 +1015,10 @@ static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, ...@@ -1015,10 +1015,10 @@ static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport) struct mlx5_vport *vport)
{ {
if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
mlx5_del_flow_rule(vport->egress.allowed_vlan); mlx5_del_flow_rules(vport->egress.allowed_vlan);
if (!IS_ERR_OR_NULL(vport->egress.drop_rule)) if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
mlx5_del_flow_rule(vport->egress.drop_rule); mlx5_del_flow_rules(vport->egress.drop_rule);
vport->egress.allowed_vlan = NULL; vport->egress.allowed_vlan = NULL;
vport->egress.drop_rule = NULL; vport->egress.drop_rule = NULL;
...@@ -1173,10 +1173,10 @@ static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, ...@@ -1173,10 +1173,10 @@ static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport) struct mlx5_vport *vport)
{ {
if (!IS_ERR_OR_NULL(vport->ingress.drop_rule)) if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
mlx5_del_flow_rule(vport->ingress.drop_rule); mlx5_del_flow_rules(vport->ingress.drop_rule);
if (!IS_ERR_OR_NULL(vport->ingress.allow_rule)) if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
mlx5_del_flow_rule(vport->ingress.allow_rule); mlx5_del_flow_rules(vport->ingress.allow_rule);
vport->ingress.drop_rule = NULL; vport->ingress.drop_rule = NULL;
vport->ingress.allow_rule = NULL; vport->ingress.allow_rule = NULL;
...@@ -1253,9 +1253,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, ...@@ -1253,9 +1253,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
vport->ingress.allow_rule = vport->ingress.allow_rule =
mlx5_add_flow_rule(vport->ingress.acl, spec, mlx5_add_flow_rules(vport->ingress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_ALLOW, MLX5_FLOW_CONTEXT_ACTION_ALLOW,
0, NULL); 0, NULL, 0);
if (IS_ERR(vport->ingress.allow_rule)) { if (IS_ERR(vport->ingress.allow_rule)) {
err = PTR_ERR(vport->ingress.allow_rule); err = PTR_ERR(vport->ingress.allow_rule);
esw_warn(esw->dev, esw_warn(esw->dev,
...@@ -1267,9 +1267,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, ...@@ -1267,9 +1267,9 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
memset(spec, 0, sizeof(*spec)); memset(spec, 0, sizeof(*spec));
vport->ingress.drop_rule = vport->ingress.drop_rule =
mlx5_add_flow_rule(vport->ingress.acl, spec, mlx5_add_flow_rules(vport->ingress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_DROP, MLX5_FLOW_CONTEXT_ACTION_DROP,
0, NULL); 0, NULL, 0);
if (IS_ERR(vport->ingress.drop_rule)) { if (IS_ERR(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule); err = PTR_ERR(vport->ingress.drop_rule);
esw_warn(esw->dev, esw_warn(esw->dev,
...@@ -1321,9 +1321,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, ...@@ -1321,9 +1321,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
vport->egress.allowed_vlan = vport->egress.allowed_vlan =
mlx5_add_flow_rule(vport->egress.acl, spec, mlx5_add_flow_rules(vport->egress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_ALLOW, MLX5_FLOW_CONTEXT_ACTION_ALLOW,
0, NULL); 0, NULL, 0);
if (IS_ERR(vport->egress.allowed_vlan)) { if (IS_ERR(vport->egress.allowed_vlan)) {
err = PTR_ERR(vport->egress.allowed_vlan); err = PTR_ERR(vport->egress.allowed_vlan);
esw_warn(esw->dev, esw_warn(esw->dev,
...@@ -1336,9 +1336,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, ...@@ -1336,9 +1336,9 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
/* Drop others rule (star rule) */ /* Drop others rule (star rule) */
memset(spec, 0, sizeof(*spec)); memset(spec, 0, sizeof(*spec));
vport->egress.drop_rule = vport->egress.drop_rule =
mlx5_add_flow_rule(vport->egress.acl, spec, mlx5_add_flow_rules(vport->egress.acl, spec,
MLX5_FLOW_CONTEXT_ACTION_DROP, MLX5_FLOW_CONTEXT_ACTION_DROP,
0, NULL); 0, NULL, 0);
if (IS_ERR(vport->egress.drop_rule)) { if (IS_ERR(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule); err = PTR_ERR(vport->egress.drop_rule);
esw_warn(esw->dev, esw_warn(esw->dev,
...@@ -1667,7 +1667,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) ...@@ -1667,7 +1667,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
esw_disable_vport(esw, i); esw_disable_vport(esw, i);
if (mc_promisc && mc_promisc->uplink_rule) if (mc_promisc && mc_promisc->uplink_rule)
mlx5_del_flow_rule(mc_promisc->uplink_rule); mlx5_del_flow_rules(mc_promisc->uplink_rule);
esw_destroy_tsar(esw); esw_destroy_tsar(esw);
......
...@@ -97,16 +97,16 @@ struct vport_ingress { ...@@ -97,16 +97,16 @@ struct vport_ingress {
struct mlx5_flow_group *allow_spoofchk_only_grp; struct mlx5_flow_group *allow_spoofchk_only_grp;
struct mlx5_flow_group *allow_untagged_only_grp; struct mlx5_flow_group *allow_untagged_only_grp;
struct mlx5_flow_group *drop_grp; struct mlx5_flow_group *drop_grp;
struct mlx5_flow_rule *allow_rule; struct mlx5_flow_handle *allow_rule;
struct mlx5_flow_rule *drop_rule; struct mlx5_flow_handle *drop_rule;
}; };
struct vport_egress { struct vport_egress {
struct mlx5_flow_table *acl; struct mlx5_flow_table *acl;
struct mlx5_flow_group *allowed_vlans_grp; struct mlx5_flow_group *allowed_vlans_grp;
struct mlx5_flow_group *drop_grp; struct mlx5_flow_group *drop_grp;
struct mlx5_flow_rule *allowed_vlan; struct mlx5_flow_handle *allowed_vlan;
struct mlx5_flow_rule *drop_rule; struct mlx5_flow_handle *drop_rule;
}; };
struct mlx5_vport_info { struct mlx5_vport_info {
...@@ -125,8 +125,8 @@ struct mlx5_vport { ...@@ -125,8 +125,8 @@ struct mlx5_vport {
int vport; int vport;
struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE]; struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE];
struct mlx5_flow_rule *promisc_rule; struct mlx5_flow_handle *promisc_rule;
struct mlx5_flow_rule *allmulti_rule; struct mlx5_flow_handle *allmulti_rule;
struct work_struct vport_change_handler; struct work_struct vport_change_handler;
struct vport_ingress ingress; struct vport_ingress ingress;
...@@ -162,7 +162,7 @@ struct mlx5_eswitch_fdb { ...@@ -162,7 +162,7 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_table *fdb; struct mlx5_flow_table *fdb;
struct mlx5_flow_group *send_to_vport_grp; struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *miss_grp; struct mlx5_flow_group *miss_grp;
struct mlx5_flow_rule *miss_rule; struct mlx5_flow_handle *miss_rule;
int vlan_push_pop_refcount; int vlan_push_pop_refcount;
} offloads; } offloads;
}; };
...@@ -175,7 +175,7 @@ enum { ...@@ -175,7 +175,7 @@ enum {
}; };
struct mlx5_esw_sq { struct mlx5_esw_sq {
struct mlx5_flow_rule *send_to_vport_rule; struct mlx5_flow_handle *send_to_vport_rule;
struct list_head list; struct list_head list;
}; };
...@@ -188,7 +188,7 @@ struct mlx5_eswitch_rep { ...@@ -188,7 +188,7 @@ struct mlx5_eswitch_rep {
u8 hw_id[ETH_ALEN]; u8 hw_id[ETH_ALEN];
void *priv_data; void *priv_data;
struct mlx5_flow_rule *vport_rx_rule; struct mlx5_flow_handle *vport_rx_rule;
struct list_head vport_sqs_list; struct list_head vport_sqs_list;
u16 vlan; u16 vlan;
u32 vlan_refcount; u32 vlan_refcount;
...@@ -257,11 +257,11 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, ...@@ -257,11 +257,11 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
struct mlx5_flow_spec; struct mlx5_flow_spec;
struct mlx5_esw_flow_attr; struct mlx5_esw_flow_attr;
struct mlx5_flow_rule * struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr); struct mlx5_esw_flow_attr *attr);
struct mlx5_flow_rule * struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn); mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
enum { enum {
......
...@@ -43,14 +43,14 @@ enum { ...@@ -43,14 +43,14 @@ enum {
FDB_SLOW_PATH FDB_SLOW_PATH
}; };
struct mlx5_flow_rule * struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr) struct mlx5_esw_flow_attr *attr)
{ {
struct mlx5_flow_destination dest = { 0 }; struct mlx5_flow_destination dest = { 0 };
struct mlx5_fc *counter = NULL; struct mlx5_fc *counter = NULL;
struct mlx5_flow_rule *rule; struct mlx5_flow_handle *rule;
void *misc; void *misc;
int action; int action;
...@@ -80,8 +80,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -80,8 +80,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
MLX5_MATCH_MISC_PARAMETERS; MLX5_MATCH_MISC_PARAMETERS;
rule = mlx5_add_flow_rule((struct mlx5_flow_table *)esw->fdb_table.fdb, rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
spec, action, 0, &dest); spec, action, 0, &dest, 1);
if (IS_ERR(rule)) if (IS_ERR(rule))
mlx5_fc_destroy(esw->dev, counter); mlx5_fc_destroy(esw->dev, counter);
...@@ -269,11 +269,11 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, ...@@ -269,11 +269,11 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
return err; return err;
} }
static struct mlx5_flow_rule * static struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn) mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
{ {
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_rule *flow_rule; struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
void *misc; void *misc;
...@@ -296,9 +296,9 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn ...@@ -296,9 +296,9 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = vport; dest.vport_num = vport;
flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec, flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest); 0, &dest, 1);
if (IS_ERR(flow_rule)) if (IS_ERR(flow_rule))
esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule)); esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
out: out:
...@@ -315,7 +315,7 @@ void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw, ...@@ -315,7 +315,7 @@ void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
return; return;
list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) { list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
mlx5_del_flow_rule(esw_sq->send_to_vport_rule); mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
list_del(&esw_sq->list); list_del(&esw_sq->list);
kfree(esw_sq); kfree(esw_sq);
} }
...@@ -325,7 +325,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -325,7 +325,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, struct mlx5_eswitch_rep *rep,
u16 *sqns_array, int sqns_num) u16 *sqns_array, int sqns_num)
{ {
struct mlx5_flow_rule *flow_rule; struct mlx5_flow_handle *flow_rule;
struct mlx5_esw_sq *esw_sq; struct mlx5_esw_sq *esw_sq;
int err; int err;
int i; int i;
...@@ -362,7 +362,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw, ...@@ -362,7 +362,7 @@ int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
{ {
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_rule *flow_rule = NULL; struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
int err = 0; int err = 0;
...@@ -376,9 +376,9 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) ...@@ -376,9 +376,9 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = 0; dest.vport_num = 0;
flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec, flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest); 0, &dest, 1);
if (IS_ERR(flow_rule)) { if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule); err = PTR_ERR(flow_rule);
esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err); esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
...@@ -501,7 +501,7 @@ static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw) ...@@ -501,7 +501,7 @@ static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
return; return;
esw_debug(esw->dev, "Destroy offloads FDB Table\n"); esw_debug(esw->dev, "Destroy offloads FDB Table\n");
mlx5_del_flow_rule(esw->fdb_table.offloads.miss_rule); mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
...@@ -585,11 +585,11 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw) ...@@ -585,11 +585,11 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->offloads.vport_rx_group); mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
} }
struct mlx5_flow_rule * struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
{ {
struct mlx5_flow_destination dest; struct mlx5_flow_destination dest;
struct mlx5_flow_rule *flow_rule; struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
void *misc; void *misc;
...@@ -610,9 +610,9 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn) ...@@ -610,9 +610,9 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = tirn; dest.tir_num = tirn;
flow_rule = mlx5_add_flow_rule(esw->offloads.ft_offloads, spec, flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest); 0, &dest, 1);
if (IS_ERR(flow_rule)) { if (IS_ERR(flow_rule)) {
esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule)); esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
goto out; goto out;
......
...@@ -94,6 +94,11 @@ struct mlx5_flow_rule { ...@@ -94,6 +94,11 @@ struct mlx5_flow_rule {
u32 sw_action; u32 sw_action;
}; };
struct mlx5_flow_handle {
int num_rules;
struct mlx5_flow_rule *rule[];
};
/* Type of children is mlx5_flow_group */ /* Type of children is mlx5_flow_group */
struct mlx5_flow_table { struct mlx5_flow_table {
struct fs_node node; struct fs_node node;
......
...@@ -69,8 +69,8 @@ enum mlx5_flow_namespace_type { ...@@ -69,8 +69,8 @@ enum mlx5_flow_namespace_type {
struct mlx5_flow_table; struct mlx5_flow_table;
struct mlx5_flow_group; struct mlx5_flow_group;
struct mlx5_flow_rule;
struct mlx5_flow_namespace; struct mlx5_flow_namespace;
struct mlx5_flow_handle;
struct mlx5_flow_spec { struct mlx5_flow_spec {
u8 match_criteria_enable; u8 match_criteria_enable;
...@@ -127,18 +127,20 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg); ...@@ -127,18 +127,20 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
/* Single destination per rule. /* Single destination per rule.
* Group ID is implied by the match criteria. * Group ID is implied by the match criteria.
*/ */
struct mlx5_flow_rule * struct mlx5_flow_handle *
mlx5_add_flow_rule(struct mlx5_flow_table *ft, mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
u32 action, u32 action,
u32 flow_tag, u32 flow_tag,
struct mlx5_flow_destination *dest); struct mlx5_flow_destination *dest,
void mlx5_del_flow_rule(struct mlx5_flow_rule *fr); int dest_num);
void mlx5_del_flow_rules(struct mlx5_flow_handle *fr);
int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
struct mlx5_flow_destination *dest); int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
struct mlx5_flow_destination *new_dest,
struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule); struct mlx5_flow_destination *old_dest);
struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler);
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter, void mlx5_fc_query_cached(struct mlx5_fc *counter,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment