Commit 95302c39 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5e-updates-2018-12-11' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5e-updates-2018-12-11

From Eli Britstein,
Patches 1-10 adds remote mirroring support.
Patches 1-4 refactor encap related code as pre-steps for using per
destination encapsulation properties.
Patches 5-7 use extended destination feature for single/multi
destination scenarios that have a single encap destination.
Patches 8-10 enable multiple encap destinations for a TC flow.

From, Daniel Jurgens,
Patch 11, Use CQE padding for Ethernet CQs, PPC showed up to a 24%
improvement in small packet throughput

From Eyal Davidovich,
patches 12-14, FW monitor counter support
FW monitor counters feature came to solve the delayed reporting of
FW stats in the atomic get_stats64 ndo, since we can't access the
FW at that stage, this feature will enable immediate FW stats updates
in the driver via fw events on specific stats updates.

Patch 12, cleanup to avoid querying a FW counter when it is not
supported
Patch 13, Monitor counters FW commands support
Patch 14, Use monitor counters in ethernet netdevice to update FW
stats reported in the atomic get_stats64 ndo.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 43d4b297 5c7e8bbb
...@@ -22,7 +22,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ ...@@ -22,7 +22,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
# #
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
en_selftest.o en/port.o en_selftest.o en/port.o en/monitor_stats.o
# #
# Netdev extra # Netdev extra
......
...@@ -685,6 +685,8 @@ struct mlx5e_priv { ...@@ -685,6 +685,8 @@ struct mlx5e_priv {
struct work_struct set_rx_mode_work; struct work_struct set_rx_mode_work;
struct work_struct tx_timeout_work; struct work_struct tx_timeout_work;
struct work_struct update_stats_work; struct work_struct update_stats_work;
struct work_struct monitor_counters_work;
struct mlx5_nb monitor_counters_nb;
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
struct net_device *netdev; struct net_device *netdev;
...@@ -940,6 +942,7 @@ int mlx5e_create_tises(struct mlx5e_priv *priv); ...@@ -940,6 +942,7 @@ int mlx5e_create_tises(struct mlx5e_priv *priv);
void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv); void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev); int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev); int mlx5e_open(struct net_device *netdev);
void mlx5e_update_ndo_stats(struct mlx5e_priv *priv);
void mlx5e_queue_update_stats(struct mlx5e_priv *priv); void mlx5e_queue_update_stats(struct mlx5e_priv *priv);
int mlx5e_bits_invert(unsigned long a, int size); int mlx5e_bits_invert(unsigned long a, int size);
......
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Mellanox Technologies. */
#include "en.h"
#include "monitor_stats.h"
#include "lib/eq.h"
/* Driver will set the following watch counters list:
* Ppcnt.802_3:
* a_in_range_length_errors Type: 0x0, Counter: 0x0, group_id = N/A
* a_out_of_range_length_field Type: 0x0, Counter: 0x1, group_id = N/A
* a_frame_too_long_errors Type: 0x0, Counter: 0x2, group_id = N/A
* a_frame_check_sequence_errors Type: 0x0, Counter: 0x3, group_id = N/A
* a_alignment_errors Type: 0x0, Counter: 0x4, group_id = N/A
* if_out_discards Type: 0x0, Counter: 0x5, group_id = N/A
* Q_Counters:
* Q[index].rx_out_of_buffer Type: 0x1, Counter: 0x4, group_id = counter_ix
*/
#define NUM_REQ_PPCNT_COUNTER_S1 MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1
#define NUM_REQ_Q_COUNTERS_S1 MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1
int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
if (!MLX5_CAP_GEN(mdev, max_num_of_monitor_counters))
return false;
if (MLX5_CAP_PCAM_REG(mdev, ppcnt) &&
MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters) <
NUM_REQ_PPCNT_COUNTER_S1)
return false;
if (MLX5_CAP_GEN(mdev, num_q_monitor_counters) <
NUM_REQ_Q_COUNTERS_S1)
return false;
return true;
}
void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv)
{
u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {};
u32 out[MLX5_ST_SZ_DW(arm_monitor_counter_out)] = {};
MLX5_SET(arm_monitor_counter_in, in, opcode,
MLX5_CMD_OP_ARM_MONITOR_COUNTER);
mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
}
static void mlx5e_monitor_counters_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
monitor_counters_work);
mutex_lock(&priv->state_lock);
mlx5e_update_ndo_stats(priv);
mutex_unlock(&priv->state_lock);
mlx5e_monitor_counter_arm(priv);
}
static int mlx5e_monitor_event_handler(struct notifier_block *nb,
unsigned long event, void *eqe)
{
struct mlx5e_priv *priv = mlx5_nb_cof(nb, struct mlx5e_priv,
monitor_counters_nb);
queue_work(priv->wq, &priv->monitor_counters_work);
return NOTIFY_OK;
}
void mlx5e_monitor_counter_start(struct mlx5e_priv *priv)
{
MLX5_NB_INIT(&priv->monitor_counters_nb, mlx5e_monitor_event_handler,
MONITOR_COUNTER);
mlx5_eq_notifier_register(priv->mdev, &priv->monitor_counters_nb);
}
static void mlx5e_monitor_counter_stop(struct mlx5e_priv *priv)
{
mlx5_eq_notifier_unregister(priv->mdev, &priv->monitor_counters_nb);
cancel_work_sync(&priv->monitor_counters_work);
}
static int fill_monitor_counter_ppcnt_set1(int cnt, u32 *in)
{
enum mlx5_monitor_counter_ppcnt ppcnt_cnt;
for (ppcnt_cnt = 0;
ppcnt_cnt < NUM_REQ_PPCNT_COUNTER_S1;
ppcnt_cnt++, cnt++) {
MLX5_SET(set_monitor_counter_in, in,
monitor_counter[cnt].type,
MLX5_QUERY_MONITOR_CNT_TYPE_PPCNT);
MLX5_SET(set_monitor_counter_in, in,
monitor_counter[cnt].counter,
ppcnt_cnt);
}
return ppcnt_cnt;
}
static int fill_monitor_counter_q_counter_set1(int cnt, int q_counter, u32 *in)
{
MLX5_SET(set_monitor_counter_in, in,
monitor_counter[cnt].type,
MLX5_QUERY_MONITOR_CNT_TYPE_Q_COUNTER);
MLX5_SET(set_monitor_counter_in, in,
monitor_counter[cnt].counter,
MLX5_QUERY_MONITOR_Q_COUNTER_RX_OUT_OF_BUFFER);
MLX5_SET(set_monitor_counter_in, in,
monitor_counter[cnt].counter_group_id,
q_counter);
return 1;
}
/* check if mlx5e_monitor_counter_supported before calling this function*/
static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
int max_num_of_counters = MLX5_CAP_GEN(mdev, max_num_of_monitor_counters);
int num_q_counters = MLX5_CAP_GEN(mdev, num_q_monitor_counters);
int num_ppcnt_counters = !MLX5_CAP_PCAM_REG(mdev, ppcnt) ? 0 :
MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters);
u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
u32 out[MLX5_ST_SZ_DW(set_monitor_counter_out)] = {};
int q_counter = priv->q_counter;
int cnt = 0;
if (num_ppcnt_counters >= NUM_REQ_PPCNT_COUNTER_S1 &&
max_num_of_counters >= (NUM_REQ_PPCNT_COUNTER_S1 + cnt))
cnt += fill_monitor_counter_ppcnt_set1(cnt, in);
if (num_q_counters >= NUM_REQ_Q_COUNTERS_S1 &&
max_num_of_counters >= (NUM_REQ_Q_COUNTERS_S1 + cnt) &&
q_counter)
cnt += fill_monitor_counter_q_counter_set1(cnt, q_counter, in);
MLX5_SET(set_monitor_counter_in, in, num_of_counters, cnt);
MLX5_SET(set_monitor_counter_in, in, opcode,
MLX5_CMD_OP_SET_MONITOR_COUNTER);
mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
}
/* check if mlx5e_monitor_counter_supported before calling this function*/
void mlx5e_monitor_counter_init(struct mlx5e_priv *priv)
{
INIT_WORK(&priv->monitor_counters_work, mlx5e_monitor_counters_work);
mlx5e_monitor_counter_start(priv);
mlx5e_set_monitor_counter(priv);
mlx5e_monitor_counter_arm(priv);
queue_work(priv->wq, &priv->update_stats_work);
}
static void mlx5e_monitor_counter_disable(struct mlx5e_priv *priv)
{
u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {};
u32 out[MLX5_ST_SZ_DW(set_monitor_counter_out)] = {};
MLX5_SET(set_monitor_counter_in, in, num_of_counters, 0);
MLX5_SET(set_monitor_counter_in, in, opcode,
MLX5_CMD_OP_SET_MONITOR_COUNTER);
mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out));
}
/* check if mlx5e_monitor_counter_supported before calling this function*/
void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv)
{
mlx5e_monitor_counter_disable(priv);
mlx5e_monitor_counter_stop(priv);
}
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2018 Mellanox Technologies. */
#ifndef __MLX5_MONITOR_H__
#define __MLX5_MONITOR_H__
int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv);
void mlx5e_monitor_counter_init(struct mlx5e_priv *priv);
void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv);
void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv);
#endif /* __MLX5_MONITOR_H__ */
...@@ -50,6 +50,7 @@ ...@@ -50,6 +50,7 @@
#include "en/port.h" #include "en/port.h"
#include "en/xdp.h" #include "en/xdp.h"
#include "lib/eq.h" #include "lib/eq.h"
#include "en/monitor_stats.h"
struct mlx5e_rq_param { struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)]; u32 rqc[MLX5_ST_SZ_DW(rqc)];
...@@ -263,7 +264,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) ...@@ -263,7 +264,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
mlx5e_stats_grps[i].update_stats(priv); mlx5e_stats_grps[i].update_stats(priv);
} }
static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv) void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
{ {
int i; int i;
...@@ -2224,6 +2225,8 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, ...@@ -2224,6 +2225,8 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
void *cqc = param->cqc; void *cqc = param->cqc;
MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index); MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
if (MLX5_CAP_GEN(priv->mdev, cqe_128_always) && cache_line_size() >= 128)
MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
} }
static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
...@@ -3457,8 +3460,10 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) ...@@ -3457,8 +3460,10 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_vport_stats *vstats = &priv->stats.vport; struct mlx5e_vport_stats *vstats = &priv->stats.vport;
struct mlx5e_pport_stats *pstats = &priv->stats.pport; struct mlx5e_pport_stats *pstats = &priv->stats.pport;
if (!mlx5e_monitor_counter_supported(priv)) {
/* update HW stats in background for next time */ /* update HW stats in background for next time */
mlx5e_queue_update_stats(priv); mlx5e_queue_update_stats(priv);
}
if (mlx5e_is_uplink_rep(priv)) { if (mlx5e_is_uplink_rep(priv)) {
stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok); stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
...@@ -4899,6 +4904,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) ...@@ -4899,6 +4904,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5_lag_add(mdev, netdev); mlx5_lag_add(mdev, netdev);
mlx5e_enable_async_events(priv); mlx5e_enable_async_events(priv);
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_init(priv);
if (MLX5_ESWITCH_MANAGER(priv->mdev)) if (MLX5_ESWITCH_MANAGER(priv->mdev))
mlx5e_register_vport_reps(priv); mlx5e_register_vport_reps(priv);
...@@ -4938,6 +4945,9 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) ...@@ -4938,6 +4945,9 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
if (MLX5_ESWITCH_MANAGER(priv->mdev)) if (MLX5_ESWITCH_MANAGER(priv->mdev))
mlx5e_unregister_vport_reps(priv); mlx5e_unregister_vport_reps(priv);
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_cleanup(priv);
mlx5e_disable_async_events(priv); mlx5e_disable_async_events(priv);
mlx5_lag_remove(mdev); mlx5_lag_remove(mdev);
} }
......
...@@ -483,6 +483,9 @@ static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data, ...@@ -483,6 +483,9 @@ static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
return idx; return idx;
} }
#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
(MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1)
static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv) static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
{ {
struct mlx5e_pport_stats *pstats = &priv->stats.pport; struct mlx5e_pport_stats *pstats = &priv->stats.pport;
...@@ -491,6 +494,9 @@ static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv) ...@@ -491,6 +494,9 @@ static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv)
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
void *out; void *out;
if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
return;
MLX5_SET(ppcnt_reg, in, local_port, 1); MLX5_SET(ppcnt_reg, in, local_port, 1);
out = pstats->IEEE_802_3_counters; out = pstats->IEEE_802_3_counters;
MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
...@@ -603,6 +609,9 @@ static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv) ...@@ -603,6 +609,9 @@ static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv)
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
void *out; void *out;
if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
return;
MLX5_SET(ppcnt_reg, in, local_port, 1); MLX5_SET(ppcnt_reg, in, local_port, 1);
out = pstats->RFC_2819_counters; out = pstats->RFC_2819_counters;
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
...@@ -1078,6 +1087,9 @@ static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv) ...@@ -1078,6 +1087,9 @@ static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv)
int prio; int prio;
void *out; void *out;
if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
return;
MLX5_SET(ppcnt_reg, in, local_port, 1); MLX5_SET(ppcnt_reg, in, local_port, 1);
MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
......
...@@ -78,13 +78,39 @@ enum { ...@@ -78,13 +78,39 @@ enum {
#define MLX5E_TC_MAX_SPLITS 1 #define MLX5E_TC_MAX_SPLITS 1
/* Helper struct for accessing a struct containing list_head array.
* Containing struct
* |- Helper array
* [0] Helper item 0
* |- list_head item 0
* |- index (0)
* [1] Helper item 1
* |- list_head item 1
* |- index (1)
* To access the containing struct from one of the list_head items:
* 1. Get the helper item from the list_head item using
* helper item =
* container_of(list_head item, helper struct type, list_head field)
* 2. Get the contining struct from the helper item and its index in the array:
* containing struct =
* container_of(helper item, containing struct type, helper field[index])
*/
struct encap_flow_item {
struct list_head list;
int index;
};
struct mlx5e_tc_flow { struct mlx5e_tc_flow {
struct rhash_head node; struct rhash_head node;
struct mlx5e_priv *priv; struct mlx5e_priv *priv;
u64 cookie; u64 cookie;
u16 flags; u16 flags;
struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1]; struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
struct list_head encap; /* flows sharing the same encap ID */ /* Flow can be associated with multiple encap IDs.
* The number of encaps is bounded by the number of supported
* destinations.
*/
struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
struct list_head mod_hdr; /* flows sharing the same mod hdr ID */ struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
struct list_head hairpin; /* flows sharing the same hairpin */ struct list_head hairpin; /* flows sharing the same hairpin */
union { union {
...@@ -94,12 +120,12 @@ struct mlx5e_tc_flow { ...@@ -94,12 +120,12 @@ struct mlx5e_tc_flow {
}; };
struct mlx5e_tc_flow_parse_attr { struct mlx5e_tc_flow_parse_attr {
struct ip_tunnel_info tun_info; struct ip_tunnel_info tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
struct net_device *filter_dev; struct net_device *filter_dev;
struct mlx5_flow_spec spec; struct mlx5_flow_spec spec;
int num_mod_hdr_actions; int num_mod_hdr_actions;
void *mod_hdr_actions; void *mod_hdr_actions;
int mirred_ifindex; int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
}; };
#define MLX5E_TC_TABLE_NUM_GROUPS 4 #define MLX5E_TC_TABLE_NUM_GROUPS 4
...@@ -571,7 +597,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, ...@@ -571,7 +597,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow_parse_attr *parse_attr,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
int peer_ifindex = parse_attr->mirred_ifindex; int peer_ifindex = parse_attr->mirred_ifindex[0];
struct mlx5_hairpin_params params; struct mlx5_hairpin_params params;
struct mlx5_core_dev *peer_mdev; struct mlx5_core_dev *peer_mdev;
struct mlx5e_hairpin_entry *hpe; struct mlx5e_hairpin_entry *hpe;
...@@ -817,14 +843,15 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, ...@@ -817,14 +843,15 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
} }
static void mlx5e_detach_encap(struct mlx5e_priv *priv, static void mlx5e_detach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow); struct mlx5e_tc_flow *flow, int out_index);
static int mlx5e_attach_encap(struct mlx5e_priv *priv, static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct ip_tunnel_info *tun_info, struct ip_tunnel_info *tun_info,
struct net_device *mirred_dev, struct net_device *mirred_dev,
struct net_device **encap_dev, struct net_device **encap_dev,
struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack); struct netlink_ext_ack *extack,
int out_index);
static struct mlx5_flow_handle * static struct mlx5_flow_handle *
mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
...@@ -838,7 +865,7 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, ...@@ -838,7 +865,7 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
if (IS_ERR(rule)) if (IS_ERR(rule))
return rule; return rule;
if (attr->mirror_count) { if (attr->split_count) {
flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr); flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
if (IS_ERR(flow->rule[1])) { if (IS_ERR(flow->rule[1])) {
mlx5_eswitch_del_offloaded_rule(esw, rule, attr); mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
...@@ -857,7 +884,7 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, ...@@ -857,7 +884,7 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
{ {
flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
if (attr->mirror_count) if (attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr); mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
...@@ -873,7 +900,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, ...@@ -873,7 +900,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
slow_attr->mirror_count = 0, slow_attr->split_count = 0,
slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN, slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN,
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
...@@ -908,6 +935,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -908,6 +935,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_rep_priv *rpriv; struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *out_priv; struct mlx5e_priv *out_priv;
int err = 0, encap_err = 0; int err = 0, encap_err = 0;
int out_index;
/* if prios are not supported, keep the old behaviour of using same prio /* if prios are not supported, keep the old behaviour of using same prio
* for all offloaded rules. * for all offloaded rules.
...@@ -927,20 +955,27 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -927,20 +955,27 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
goto err_max_prio_chain; goto err_max_prio_chain;
} }
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) { for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
int mirred_ifindex;
if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
continue;
mirred_ifindex = attr->parse_attr->mirred_ifindex[out_index];
out_dev = __dev_get_by_index(dev_net(priv->netdev), out_dev = __dev_get_by_index(dev_net(priv->netdev),
attr->parse_attr->mirred_ifindex); mirred_ifindex);
encap_err = mlx5e_attach_encap(priv, &parse_attr->tun_info, err = mlx5e_attach_encap(priv,
&parse_attr->tun_info[out_index],
out_dev, &encap_dev, flow, out_dev, &encap_dev, flow,
extack); extack, out_index);
if (encap_err && encap_err != -EAGAIN) { if (err && err != -EAGAIN)
err = encap_err;
goto err_attach_encap; goto err_attach_encap;
} if (err == -EAGAIN)
encap_err = err;
out_priv = netdev_priv(encap_dev); out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv; rpriv = out_priv->ppriv;
attr->out_rep[attr->out_count] = rpriv->rep; attr->dests[out_index].rep = rpriv->rep;
attr->out_mdev[attr->out_count++] = out_priv->mdev; attr->dests[out_index].mdev = out_priv->mdev;
} }
err = mlx5_eswitch_add_vlan_action(esw, attr); err = mlx5_eswitch_add_vlan_action(esw, attr);
...@@ -991,8 +1026,9 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -991,8 +1026,9 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
err_mod_hdr: err_mod_hdr:
mlx5_eswitch_del_vlan_action(esw, attr); mlx5_eswitch_del_vlan_action(esw, attr);
err_add_vlan: err_add_vlan:
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
mlx5e_detach_encap(priv, flow); if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
mlx5e_detach_encap(priv, flow, out_index);
err_attach_encap: err_attach_encap:
err_max_prio_chain: err_max_prio_chain:
return err; return err;
...@@ -1004,6 +1040,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, ...@@ -1004,6 +1040,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->esw_attr; struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5_esw_flow_attr slow_attr; struct mlx5_esw_flow_attr slow_attr;
int out_index;
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
if (flow->flags & MLX5E_TC_FLOW_SLOW) if (flow->flags & MLX5E_TC_FLOW_SLOW)
...@@ -1014,10 +1051,10 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, ...@@ -1014,10 +1051,10 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5_eswitch_del_vlan_action(esw, attr); mlx5_eswitch_del_vlan_action(esw, attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) { for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
mlx5e_detach_encap(priv, flow); if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
mlx5e_detach_encap(priv, flow, out_index);
kvfree(attr->parse_attr); kvfree(attr->parse_attr);
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow); mlx5e_detach_mod_hdr(priv, flow);
...@@ -1033,6 +1070,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, ...@@ -1033,6 +1070,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr slow_attr, *esw_attr; struct mlx5_esw_flow_attr slow_attr, *esw_attr;
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
struct encap_flow_item *efi;
struct mlx5e_tc_flow *flow; struct mlx5e_tc_flow *flow;
int err; int err;
...@@ -1049,11 +1087,31 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, ...@@ -1049,11 +1087,31 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
e->flags |= MLX5_ENCAP_ENTRY_VALID; e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(priv); mlx5e_rep_queue_neigh_stats_work(priv);
list_for_each_entry(flow, &e->flows, encap) { list_for_each_entry(efi, &e->flows, list) {
bool all_flow_encaps_valid = true;
int i;
flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
esw_attr = flow->esw_attr; esw_attr = flow->esw_attr;
esw_attr->encap_id = e->encap_id;
spec = &esw_attr->parse_attr->spec; spec = &esw_attr->parse_attr->spec;
esw_attr->dests[efi->index].encap_id = e->encap_id;
esw_attr->dests[efi->index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
/* Flow can be associated with multiple encap entries.
* Before offloading the flow verify that all of them have
* a valid neighbour.
*/
for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
continue;
if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
all_flow_encaps_valid = false;
break;
}
}
/* Do not offload flows with unresolved neighbors */
if (!all_flow_encaps_valid)
continue;
/* update from slow path rule to encap rule */ /* update from slow path rule to encap rule */
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr); rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
...@@ -1076,14 +1134,18 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, ...@@ -1076,14 +1134,18 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr slow_attr; struct mlx5_esw_flow_attr slow_attr;
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec; struct mlx5_flow_spec *spec;
struct encap_flow_item *efi;
struct mlx5e_tc_flow *flow; struct mlx5e_tc_flow *flow;
int err; int err;
list_for_each_entry(flow, &e->flows, encap) { list_for_each_entry(efi, &e->flows, list) {
flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
spec = &flow->esw_attr->parse_attr->spec; spec = &flow->esw_attr->parse_attr->spec;
/* update from encap rule to slow path rule */ /* update from encap rule to slow path rule */
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr); rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
/* mark the flow's encap dest as non-valid */
flow->esw_attr->dests[efi->index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
if (IS_ERR(rule)) { if (IS_ERR(rule)) {
err = PTR_ERR(rule); err = PTR_ERR(rule);
...@@ -1132,9 +1194,12 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) ...@@ -1132,9 +1194,12 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
return; return;
list_for_each_entry(e, &nhe->encap_list, encap_list) { list_for_each_entry(e, &nhe->encap_list, encap_list) {
struct encap_flow_item *efi;
if (!(e->flags & MLX5_ENCAP_ENTRY_VALID)) if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
continue; continue;
list_for_each_entry(flow, &e->flows, encap) { list_for_each_entry(efi, &e->flows, list) {
flow = container_of(efi, struct mlx5e_tc_flow,
encaps[efi->index]);
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
counter = mlx5e_tc_get_counter(flow); counter = mlx5e_tc_get_counter(flow);
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
...@@ -1164,11 +1229,11 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) ...@@ -1164,11 +1229,11 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
} }
static void mlx5e_detach_encap(struct mlx5e_priv *priv, static void mlx5e_detach_encap(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow) struct mlx5e_tc_flow *flow, int out_index)
{ {
struct list_head *next = flow->encap.next; struct list_head *next = flow->encaps[out_index].list.next;
list_del(&flow->encap); list_del(&flow->encaps[out_index].list);
if (list_empty(next)) { if (list_empty(next)) {
struct mlx5e_encap_entry *e; struct mlx5e_encap_entry *e;
...@@ -2210,7 +2275,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -2210,7 +2275,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (priv->netdev->netdev_ops == peer_dev->netdev_ops && if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
same_hw_devs(priv, netdev_priv(peer_dev))) { same_hw_devs(priv, netdev_priv(peer_dev))) {
parse_attr->mirred_ifindex = peer_dev->ifindex; parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
flow->flags |= MLX5E_TC_FLOW_HAIRPIN; flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT; MLX5_FLOW_CONTEXT_ACTION_COUNT;
...@@ -2281,7 +2346,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, ...@@ -2281,7 +2346,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct net_device *mirred_dev, struct net_device *mirred_dev,
struct net_device **encap_dev, struct net_device **encap_dev,
struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack,
int out_index)
{ {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
unsigned short family = ip_tunnel_info_af(tun_info); unsigned short family = ip_tunnel_info_af(tun_info);
...@@ -2328,12 +2394,15 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, ...@@ -2328,12 +2394,15 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key); hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
attach_flow: attach_flow:
list_add(&flow->encap, &e->flows); list_add(&flow->encaps[out_index].list, &e->flows);
flow->encaps[out_index].index = out_index;
*encap_dev = e->out_dev; *encap_dev = e->out_dev;
if (e->flags & MLX5_ENCAP_ENTRY_VALID) if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
attr->encap_id = e->encap_id; attr->dests[out_index].encap_id = e->encap_id;
else attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
} else {
err = -EAGAIN; err = -EAGAIN;
}
return err; return err;
...@@ -2427,7 +2496,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -2427,7 +2496,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return err; return err;
action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
attr->mirror_count = attr->out_count; attr->split_count = attr->out_count;
continue; continue;
} }
...@@ -2461,23 +2530,28 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -2461,23 +2530,28 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
if (switchdev_port_same_parent_id(priv->netdev, if (switchdev_port_same_parent_id(priv->netdev,
out_dev) || out_dev) ||
is_merged_eswitch_dev(priv, out_dev)) { is_merged_eswitch_dev(priv, out_dev)) {
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
out_priv = netdev_priv(out_dev); out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv; rpriv = out_priv->ppriv;
attr->out_rep[attr->out_count] = rpriv->rep; attr->dests[attr->out_count].rep = rpriv->rep;
attr->out_mdev[attr->out_count++] = out_priv->mdev; attr->dests[attr->out_count].mdev = out_priv->mdev;
attr->out_count++;
} else if (encap) { } else if (encap) {
parse_attr->mirred_ifindex = out_dev->ifindex; parse_attr->mirred_ifindex[attr->out_count] =
parse_attr->tun_info = *info; out_dev->ifindex;
parse_attr->tun_info[attr->out_count] = *info;
encap = false;
attr->parse_attr = parse_attr; attr->parse_attr = parse_attr;
action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT | attr->dests[attr->out_count].flags |=
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_ESW_DEST_ENCAP;
MLX5_FLOW_CONTEXT_ACTION_COUNT; attr->out_count++;
/* attr->out_rep is resolved when we handle encap */ /* attr->dests[].rep is resolved when we
* handle encap
*/
} else if (parse_attr->filter_dev != priv->netdev) { } else if (parse_attr->filter_dev != priv->netdev) {
/* All mlx5 devices are called to configure /* All mlx5 devices are called to configure
* high level device filters. Therefore, the * high level device filters. Therefore, the
...@@ -2501,7 +2575,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -2501,7 +2575,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
encap = true; encap = true;
else else
return -EOPNOTSUPP; return -EOPNOTSUPP;
attr->mirror_count = attr->out_count;
continue; continue;
} }
...@@ -2511,7 +2584,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -2511,7 +2584,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (err) if (err)
return err; return err;
attr->mirror_count = attr->out_count; attr->split_count = attr->out_count;
continue; continue;
} }
...@@ -2546,7 +2619,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, ...@@ -2546,7 +2619,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (!actions_match_supported(priv, exts, parse_attr, flow, extack)) if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (attr->mirror_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
NL_SET_ERR_MSG_MOD(extack, NL_SET_ERR_MSG_MOD(extack,
"current firmware doesn't support split rule for port mirroring"); "current firmware doesn't support split rule for port mirroring");
netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n"); netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
...@@ -2654,10 +2727,6 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -2654,10 +2727,6 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err) if (err)
goto err_free; goto err_free;
if (!(flow->esw_attr->action &
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT))
kvfree(parse_attr);
*__flow = flow; *__flow = flow;
return 0; return 0;
......
...@@ -281,13 +281,16 @@ enum mlx5_flow_match_level { ...@@ -281,13 +281,16 @@ enum mlx5_flow_match_level {
/* current maximum for flow based vport multicasting */ /* current maximum for flow based vport multicasting */
#define MLX5_MAX_FLOW_FWD_VPORTS 2 #define MLX5_MAX_FLOW_FWD_VPORTS 2
enum {
MLX5_ESW_DEST_ENCAP = BIT(0),
MLX5_ESW_DEST_ENCAP_VALID = BIT(1),
};
struct mlx5_esw_flow_attr { struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep; struct mlx5_eswitch_rep *in_rep;
struct mlx5_eswitch_rep *out_rep[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5_core_dev *out_mdev[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5_core_dev *in_mdev; struct mlx5_core_dev *in_mdev;
int mirror_count; int split_count;
int out_count; int out_count;
int action; int action;
...@@ -296,7 +299,12 @@ struct mlx5_esw_flow_attr { ...@@ -296,7 +299,12 @@ struct mlx5_esw_flow_attr {
u8 vlan_prio[MLX5_FS_VLAN_DEPTH]; u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
u8 total_vlan; u8 total_vlan;
bool vlan_handled; bool vlan_handled;
struct {
u32 flags;
struct mlx5_eswitch_rep *rep;
struct mlx5_core_dev *mdev;
u32 encap_id; u32 encap_id;
} dests[MLX5_MAX_FLOW_FWD_VPORTS];
u32 mod_hdr_id; u32 mod_hdr_id;
u8 match_level; u8 match_level;
struct mlx5_fc *counter; struct mlx5_fc *counter;
......
...@@ -81,7 +81,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -81,7 +81,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
{ {
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
bool mirror = !!(attr->mirror_count); bool split = !!(attr->split_count);
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
struct mlx5_flow_table *fdb; struct mlx5_flow_table *fdb;
int j, i = 0; int j, i = 0;
...@@ -120,14 +120,21 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -120,14 +120,21 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
dest[i].ft = ft; dest[i].ft = ft;
i++; i++;
} else { } else {
for (j = attr->mirror_count; j < attr->out_count; j++) { for (j = attr->split_count; j < attr->out_count; j++) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest[i].vport.num = attr->out_rep[j]->vport; dest[i].vport.num = attr->dests[j].rep->vport;
dest[i].vport.vhca_id = dest[i].vport.vhca_id =
MLX5_CAP_GEN(attr->out_mdev[j], vhca_id); MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
dest[i].vport.flags |= dest[i].vport.flags |=
MLX5_FLOW_DEST_VPORT_VHCA_ID; MLX5_FLOW_DEST_VPORT_VHCA_ID;
if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
flow_act.reformat_id = attr->dests[j].encap_id;
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
dest[i].vport.reformat_id =
attr->dests[j].encap_id;
}
i++; i++;
} }
} }
...@@ -164,10 +171,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -164,10 +171,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
flow_act.modify_id = attr->mod_hdr_id; flow_act.modify_id = attr->mod_hdr_id;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split);
flow_act.reformat_id = attr->encap_id;
fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!mirror);
if (IS_ERR(fdb)) { if (IS_ERR(fdb)) {
rule = ERR_CAST(fdb); rule = ERR_CAST(fdb);
goto err_esw_get; goto err_esw_get;
...@@ -182,7 +186,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, ...@@ -182,7 +186,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
return rule; return rule;
err_add_rule: err_add_rule:
esw_put_prio_table(esw, attr->chain, attr->prio, !!mirror); esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
err_esw_get: err_esw_get:
if (attr->dest_chain) if (attr->dest_chain)
esw_put_prio_table(esw, attr->dest_chain, 1, 0); esw_put_prio_table(esw, attr->dest_chain, 1, 0);
...@@ -216,13 +220,17 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, ...@@ -216,13 +220,17 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
} }
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
for (i = 0; i < attr->mirror_count; i++) { for (i = 0; i < attr->split_count; i++) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest[i].vport.num = attr->out_rep[i]->vport; dest[i].vport.num = attr->dests[i].rep->vport;
dest[i].vport.vhca_id = dest[i].vport.vhca_id =
MLX5_CAP_GEN(attr->out_mdev[i], vhca_id); MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
dest[i].vport.reformat_id = attr->dests[i].encap_id;
}
} }
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = fwd_fdb, dest[i].ft = fwd_fdb,
...@@ -270,7 +278,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, ...@@ -270,7 +278,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr, struct mlx5_esw_flow_attr *attr,
bool fwd_rule) bool fwd_rule)
{ {
bool mirror = (attr->mirror_count > 0); bool split = (attr->split_count > 0);
mlx5_del_flow_rules(rule); mlx5_del_flow_rules(rule);
esw->offloads.num_flows--; esw->offloads.num_flows--;
...@@ -279,7 +287,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, ...@@ -279,7 +287,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
esw_put_prio_table(esw, attr->chain, attr->prio, 1); esw_put_prio_table(esw, attr->chain, attr->prio, 1);
esw_put_prio_table(esw, attr->chain, attr->prio, 0); esw_put_prio_table(esw, attr->chain, attr->prio, 0);
} else { } else {
esw_put_prio_table(esw, attr->chain, attr->prio, !!mirror); esw_put_prio_table(esw, attr->chain, attr->prio, !!split);
if (attr->dest_chain) if (attr->dest_chain)
esw_put_prio_table(esw, attr->dest_chain, 1, 0); esw_put_prio_table(esw, attr->dest_chain, 1, 0);
} }
...@@ -327,7 +335,7 @@ esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop) ...@@ -327,7 +335,7 @@ esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL; struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
in_rep = attr->in_rep; in_rep = attr->in_rep;
out_rep = attr->out_rep[0]; out_rep = attr->dests[0].rep;
if (push) if (push)
vport = in_rep; vport = in_rep;
...@@ -348,7 +356,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, ...@@ -348,7 +356,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
goto out_notsupp; goto out_notsupp;
in_rep = attr->in_rep; in_rep = attr->in_rep;
out_rep = attr->out_rep[0]; out_rep = attr->dests[0].rep;
if (push && in_rep->vport == FDB_UPLINK_VPORT) if (push && in_rep->vport == FDB_UPLINK_VPORT)
goto out_notsupp; goto out_notsupp;
...@@ -400,7 +408,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, ...@@ -400,7 +408,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (!push && !pop && fwd) { if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */ /* tracks VF --> wire rules without vlan push action */
if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) { if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT) {
vport->vlan_refcount++; vport->vlan_refcount++;
attr->vlan_handled = true; attr->vlan_handled = true;
} }
...@@ -460,7 +468,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, ...@@ -460,7 +468,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
if (!push && !pop && fwd) { if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */ /* tracks VF --> wire rules without vlan push action */
if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT)
vport->vlan_refcount--; vport->vlan_refcount--;
return 0; return 0;
......
...@@ -1373,7 +1373,10 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, ...@@ -1373,7 +1373,10 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
{ {
if (d1->type == d2->type) { if (d1->type == d2->type) {
if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT && if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT &&
d1->vport.num == d2->vport.num) || d1->vport.num == d2->vport.num &&
d1->vport.flags == d2->vport.flags &&
((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
(d1->vport.reformat_id == d2->vport.reformat_id) : true)) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE && (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
d1->ft == d2->ft) || d1->ft == d2->ft) ||
(d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR && (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
......
...@@ -155,7 +155,8 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, ...@@ -155,7 +155,8 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *cqc, struct mlx5_cqwq *wq, void *cqc, struct mlx5_cqwq *wq,
struct mlx5_wq_ctrl *wq_ctrl) struct mlx5_wq_ctrl *wq_ctrl)
{ {
u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) + 6; /* CQE_STRIDE_128 and CQE_STRIDE_128_PAD both mean 128B stride */
u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) == CQE_STRIDE_64 ? 6 : 7;
u8 log_wq_sz = MLX5_GET(cqc, cqc, log_cq_size); u8 log_wq_sz = MLX5_GET(cqc, cqc, log_cq_size);
int err; int err;
......
...@@ -179,7 +179,12 @@ static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq) ...@@ -179,7 +179,12 @@ static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
static inline struct mlx5_cqe64 *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) static inline struct mlx5_cqe64 *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
{ {
return mlx5_frag_buf_get_wqe(&wq->fbc, ix); struct mlx5_cqe64 *cqe = mlx5_frag_buf_get_wqe(&wq->fbc, ix);
/* For 128B CQEs the data is in the last 64B */
cqe += wq->fbc.log_stride == 7;
return cqe;
} }
static inline u32 mlx5_cqwq_get_ctr_wrap_cnt(struct mlx5_cqwq *wq, u32 ctr) static inline u32 mlx5_cqwq_get_ctr_wrap_cnt(struct mlx5_cqwq *wq, u32 ctr)
......
...@@ -125,9 +125,9 @@ struct mlx5_cq_modify_params { ...@@ -125,9 +125,9 @@ struct mlx5_cq_modify_params {
}; };
enum { enum {
CQE_SIZE_64 = 0, CQE_STRIDE_64 = 0,
CQE_SIZE_128 = 1, CQE_STRIDE_128 = 1,
CQE_SIZE_128_PAD = 2, CQE_STRIDE_128_PAD = 2,
}; };
#define MLX5_MAX_CQ_PERIOD (BIT(__mlx5_bit_sz(cqc, cq_period)) - 1) #define MLX5_MAX_CQ_PERIOD (BIT(__mlx5_bit_sz(cqc, cq_period)) - 1)
...@@ -135,8 +135,8 @@ enum { ...@@ -135,8 +135,8 @@ enum {
static inline int cqe_sz_to_mlx_sz(u8 size, int padding_128_en) static inline int cqe_sz_to_mlx_sz(u8 size, int padding_128_en)
{ {
return padding_128_en ? CQE_SIZE_128_PAD : return padding_128_en ? CQE_STRIDE_128_PAD :
size == 64 ? CQE_SIZE_64 : CQE_SIZE_128; size == 64 ? CQE_STRIDE_64 : CQE_STRIDE_128;
} }
static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq) static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq)
......
...@@ -8283,7 +8283,9 @@ struct mlx5_ifc_pcam_regs_5000_to_507f_bits { ...@@ -8283,7 +8283,9 @@ struct mlx5_ifc_pcam_regs_5000_to_507f_bits {
u8 port_access_reg_cap_mask_31_to_13[0x13]; u8 port_access_reg_cap_mask_31_to_13[0x13];
u8 pbmc[0x1]; u8 pbmc[0x1];
u8 pptb[0x1]; u8 pptb[0x1];
u8 port_access_reg_cap_mask_10_to_0[0xb]; u8 port_access_reg_cap_mask_10_to_09[0x2];
u8 ppcnt[0x1];
u8 port_access_reg_cap_mask_07_to_00[0x8];
}; };
struct mlx5_ifc_pcam_reg_bits { struct mlx5_ifc_pcam_reg_bits {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment