Commit 1cabe6b0 authored by Maor Gottlieb's avatar Maor Gottlieb Committed by David S. Miller

net/mlx5e: Create aRFS flow tables

Create the following four flow tables for aRFS usage:
1. IPv4 TCP - filtering 4-tuple of IPv4 TCP packets.
2. IPv6 TCP - filtering 4-tuple of IPv6 TCP packets.
3. IPv4 UDP - filtering 4-tuple of IPv4 UDP packets.
4. IPv6 UDP - filtering 4-tuple of IPv6 UDP packets.

Each flow table has two flow groups: one for the 4-tuple
filtering (full match)  and the other contains * rule for miss rule.

Full match rule means a hit for aRFS and packet will be forwarded
to the dedicated RQ/Core, miss rule packets will be forwarded to
default RSS hashing.
Signed-off-by: default avatarMaor Gottlieb <maorg@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 5a7b27eb
...@@ -9,3 +9,4 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \ ...@@ -9,3 +9,4 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \
en_txrx.o en_clock.o vxlan.o en_tc.o en_txrx.o en_clock.o vxlan.o en_tc.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
mlx5_core-$(CONFIG_RFS_ACCEL) += en_arfs.o
...@@ -48,6 +48,8 @@ ...@@ -48,6 +48,8 @@
#include "mlx5_core.h" #include "mlx5_core.h"
#include "en_stats.h" #include "en_stats.h"
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
#define MLX5E_MAX_NUM_TC 8 #define MLX5E_MAX_NUM_TC 8
#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6 #define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
...@@ -446,12 +448,38 @@ struct mlx5e_ttc_table { ...@@ -446,12 +448,38 @@ struct mlx5e_ttc_table {
struct mlx5_flow_rule *rules[MLX5E_NUM_TT]; struct mlx5_flow_rule *rules[MLX5E_NUM_TT];
}; };
struct arfs_table {
struct mlx5e_flow_table ft;
struct mlx5_flow_rule *default_rule;
};
enum arfs_type {
ARFS_IPV4_TCP,
ARFS_IPV6_TCP,
ARFS_IPV4_UDP,
ARFS_IPV6_UDP,
ARFS_NUM_TYPES,
};
struct mlx5e_arfs_tables {
struct arfs_table arfs_tables[ARFS_NUM_TYPES];
};
/* NIC prio FTS */
enum {
MLX5E_VLAN_FT_LEVEL = 0,
MLX5E_L2_FT_LEVEL,
MLX5E_TTC_FT_LEVEL,
MLX5E_ARFS_FT_LEVEL
};
struct mlx5e_flow_steering { struct mlx5e_flow_steering {
struct mlx5_flow_namespace *ns; struct mlx5_flow_namespace *ns;
struct mlx5e_tc_table tc; struct mlx5e_tc_table tc;
struct mlx5e_vlan_table vlan; struct mlx5e_vlan_table vlan;
struct mlx5e_l2_table l2; struct mlx5e_l2_table l2;
struct mlx5e_ttc_table ttc; struct mlx5e_ttc_table ttc;
struct mlx5e_arfs_tables arfs;
}; };
struct mlx5e_direct_tir { struct mlx5e_direct_tir {
...@@ -570,6 +598,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv); ...@@ -570,6 +598,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv); int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
void mlx5e_init_l2_addr(struct mlx5e_priv *priv); void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
void mlx5e_set_rx_mode_work(struct work_struct *work); void mlx5e_set_rx_mode_work(struct work_struct *work);
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp, void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
...@@ -646,6 +675,18 @@ extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; ...@@ -646,6 +675,18 @@ extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets); int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
#endif #endif
#ifndef CONFIG_RFS_ACCEL
static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
{
return 0;
}
static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
#else
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
#endif
u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev); u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
#endif /* __MLX5_EN_H__ */ #endif /* __MLX5_EN_H__ */
/*
* Copyright (c) 2016, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "en.h"
#include <linux/mlx5/fs.h>
static void arfs_destroy_table(struct arfs_table *arfs_t)
{
mlx5_del_flow_rule(arfs_t->default_rule);
mlx5e_destroy_flow_table(&arfs_t->ft);
}
void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
{
int i;
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
return;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t))
arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]);
}
}
static int arfs_add_default_rule(struct mlx5e_priv *priv,
enum arfs_type type)
{
struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
struct mlx5_flow_destination dest;
u8 match_criteria_enable = 0;
u32 *tirn = priv->indir_tirn;
u32 *match_criteria;
u32 *match_value;
int err = 0;
match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
if (!match_value || !match_criteria) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
err = -ENOMEM;
goto out;
}
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
switch (type) {
case ARFS_IPV4_TCP:
dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
break;
case ARFS_IPV4_UDP:
dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
break;
case ARFS_IPV6_TCP:
dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
break;
case ARFS_IPV6_UDP:
dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
break;
default:
err = -EINVAL;
goto out;
}
arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, match_criteria_enable,
match_criteria, match_value,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
&dest);
if (IS_ERR(arfs_t->default_rule)) {
err = PTR_ERR(arfs_t->default_rule);
arfs_t->default_rule = NULL;
netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n",
__func__, type);
}
out:
kvfree(match_criteria);
kvfree(match_value);
return err;
}
#define MLX5E_ARFS_NUM_GROUPS 2
#define MLX5E_ARFS_GROUP1_SIZE BIT(12)
#define MLX5E_ARFS_GROUP2_SIZE BIT(0)
#define MLX5E_ARFS_TABLE_SIZE (MLX5E_ARFS_GROUP1_SIZE +\
MLX5E_ARFS_GROUP2_SIZE)
static int arfs_create_groups(struct mlx5e_flow_table *ft,
enum arfs_type type)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
void *outer_headers_c;
int ix = 0;
u32 *in;
int err;
u8 *mc;
ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
sizeof(*ft->g), GFP_KERNEL);
in = mlx5_vzalloc(inlen);
if (!in || !ft->g) {
kvfree(ft->g);
kvfree(in);
return -ENOMEM;
}
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc,
outer_headers);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
switch (type) {
case ARFS_IPV4_TCP:
case ARFS_IPV6_TCP:
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
break;
case ARFS_IPV4_UDP:
case ARFS_IPV6_UDP:
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_sport);
break;
default:
err = -EINVAL;
goto out;
}
switch (type) {
case ARFS_IPV4_TCP:
case ARFS_IPV4_UDP:
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
src_ipv4_src_ipv6.ipv4_layout.ipv4);
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
break;
case ARFS_IPV6_TCP:
case ARFS_IPV6_UDP:
memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
0xff, 16);
memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
0xff, 16);
break;
default:
err = -EINVAL;
goto out;
}
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_ARFS_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err;
ft->num_groups++;
memset(in, 0, inlen);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_ARFS_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err;
ft->num_groups++;
kvfree(in);
return 0;
err:
err = PTR_ERR(ft->g[ft->num_groups]);
ft->g[ft->num_groups] = NULL;
out:
kvfree(in);
return err;
}
static int arfs_create_table(struct mlx5e_priv *priv,
enum arfs_type type)
{
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
int err;
ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
MLX5E_ARFS_TABLE_SIZE, MLX5E_ARFS_FT_LEVEL);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
return err;
}
err = arfs_create_groups(ft, type);
if (err)
goto err;
err = arfs_add_default_rule(priv, type);
if (err)
goto err;
return 0;
err:
mlx5e_destroy_flow_table(ft);
return err;
}
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
{
int err = 0;
int i;
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
return 0;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
err = arfs_create_table(priv, i);
if (err)
goto err;
}
return 0;
err:
mlx5e_arfs_destroy_tables(priv);
return err;
}
...@@ -42,15 +42,6 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, ...@@ -42,15 +42,6 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai); struct mlx5e_l2_rule *ai);
/* NIC prio FTS */
enum {
MLX5E_VLAN_FT_LEVEL = 0,
MLX5E_L2_FT_LEVEL,
MLX5E_TTC_FT_LEVEL
};
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
enum { enum {
MLX5E_FULLMATCH = 0, MLX5E_FULLMATCH = 0,
MLX5E_ALLMULTI = 1, MLX5E_ALLMULTI = 1,
...@@ -530,7 +521,7 @@ void mlx5e_init_l2_addr(struct mlx5e_priv *priv) ...@@ -530,7 +521,7 @@ void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast); ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
} }
static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft) void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
{ {
mlx5e_destroy_groups(ft); mlx5e_destroy_groups(ft);
kfree(ft->g); kfree(ft->g);
...@@ -1083,11 +1074,18 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) ...@@ -1083,11 +1074,18 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
if (!priv->fs.ns) if (!priv->fs.ns)
return -EINVAL; return -EINVAL;
err = mlx5e_arfs_create_tables(priv);
if (err) {
netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
err);
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
err = mlx5e_create_ttc_table(priv); err = mlx5e_create_ttc_table(priv);
if (err) { if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err); err);
return err; goto err_destroy_arfs_tables;
} }
err = mlx5e_create_l2_table(priv); err = mlx5e_create_l2_table(priv);
...@@ -1110,6 +1108,8 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) ...@@ -1110,6 +1108,8 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
mlx5e_destroy_l2_table(priv); mlx5e_destroy_l2_table(priv);
err_destroy_ttc_table: err_destroy_ttc_table:
mlx5e_destroy_ttc_table(priv); mlx5e_destroy_ttc_table(priv);
err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv);
return err; return err;
} }
...@@ -1120,4 +1120,5 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv) ...@@ -1120,4 +1120,5 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
mlx5e_destroy_vlan_table(priv); mlx5e_destroy_vlan_table(priv);
mlx5e_destroy_l2_table(priv); mlx5e_destroy_l2_table(priv);
mlx5e_destroy_ttc_table(priv); mlx5e_destroy_ttc_table(priv);
mlx5e_arfs_destroy_tables(priv);
} }
...@@ -2803,8 +2803,12 @@ static void mlx5e_build_netdev(struct net_device *netdev) ...@@ -2803,8 +2803,12 @@ static void mlx5e_build_netdev(struct net_device *netdev)
if (FT_CAP(flow_modify_en) && if (FT_CAP(flow_modify_en) &&
FT_CAP(modify_root) && FT_CAP(modify_root) &&
FT_CAP(identified_miss_table_mode) && FT_CAP(identified_miss_table_mode) &&
FT_CAP(flow_table_modify)) FT_CAP(flow_table_modify)) {
priv->netdev->hw_features |= NETIF_F_HW_TC; netdev->hw_features |= NETIF_F_HW_TC;
#ifdef CONFIG_RFS_ACCEL
netdev->hw_features |= NETIF_F_NTUPLE;
#endif
}
netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HIGHDMA;
......
...@@ -74,7 +74,8 @@ ...@@ -74,7 +74,8 @@
#define BY_PASS_MIN_LEVEL (KERNEL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ #define BY_PASS_MIN_LEVEL (KERNEL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
LEFTOVERS_NUM_PRIOS) LEFTOVERS_NUM_PRIOS)
#define KERNEL_NIC_PRIO_NUM_LEVELS 3 /* Vlan, mac, ttc, aRFS */
#define KERNEL_NIC_PRIO_NUM_LEVELS 4
#define KERNEL_NIC_NUM_PRIOS 1 #define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc */ /* One more level for tc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment