Commit 402a66ed authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2021-03-29' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2021-03-29

Coexistence of CQE compression and HW PTP time-stamp:

From Aya this series improves mlx5 netdev driver to allow
both mlx5 CQE compression (RX descriptor compression, that saves on PCI
transaction) and HW time-stamp PTP to co-exists.

Prior to this series both features were mutually exclusive due to the
nature of CQE compression which reduces the size of RX descriptor for
the price of trimming some data, such as the time-stamp.

In order to allow CQE compression when PTP time stamping is enabled,
We enable it on the regular performance critical RX queues which will
service all the data path traffic that is not PTP.

PTP traffic will be re-directed to dedicated RX queues on which we will
not enable CQE compression and thus keep the time-stamp intact.

Having both features is critical for systems with low PCI BW, e.g.
Multi-Host.

The series will be adding:
1) Infrastructure to create a dedicated RX queue to service the PTP traffic
2) Flow steering plumbing to capture PTP traffic both UDP packets with
 destination port 319 and L2 packets with ethertype 0x88F7
3) Steer PTP traffic to the dedicated RX queue.
4) The feature will be enabled when PTP is being configured via the
   already existing PTP IOCTL when CQE compression is active, otherwise
   no change to the driver flow.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d0922bf7 885b8cfb
......@@ -27,7 +27,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
en/qos.o en/trap.o
en/qos.o en/trap.o en/fs_tt_redirect.o
#
# Netdev extra
......
......@@ -269,6 +269,7 @@ struct mlx5e_params {
struct mlx5e_xsk *xsk;
unsigned int sw_mtu;
int hard_mtu;
bool ptp_rx;
};
enum {
......@@ -730,6 +731,7 @@ struct mlx5e_ptp_stats {
struct mlx5e_ch_stats ch;
struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC];
struct mlx5e_rq_stats rq;
} ____cacheline_aligned_in_smp;
enum {
......@@ -836,6 +838,7 @@ struct mlx5e_priv {
struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
struct mlx5e_tir xsk_tir[MLX5E_MAX_NUM_CHANNELS];
struct mlx5e_tir ptp_tir;
struct mlx5e_rss_params rss_params;
u32 tx_rates[MLX5E_MAX_NUM_SQS];
......@@ -859,6 +862,7 @@ struct mlx5e_priv {
u16 max_nch;
u8 max_opened_tc;
bool tx_ptp_opened;
bool rx_ptp_opened;
struct hwtstamp_config tstamp;
u16 q_counter;
u16 drop_rq_q_counter;
......@@ -914,6 +918,7 @@ struct mlx5e_profile {
const struct mlx5e_rx_handlers *rx_handlers;
int max_tc;
u8 rq_groups;
bool rx_ptp_support;
};
void mlx5e_build_ptys2ethtool_map(void);
......@@ -1018,6 +1023,7 @@ int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx);
void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels);
......
......@@ -137,11 +137,13 @@ enum {
MLX5E_L2_FT_LEVEL,
MLX5E_TTC_FT_LEVEL,
MLX5E_INNER_TTC_FT_LEVEL,
MLX5E_FS_TT_UDP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
MLX5E_FS_TT_ANY_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#ifdef CONFIG_MLX5_EN_TLS
MLX5E_ACCEL_FS_TCP_FT_LEVEL,
MLX5E_ACCEL_FS_TCP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#endif
#ifdef CONFIG_MLX5_EN_ARFS
MLX5E_ARFS_FT_LEVEL,
MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
......@@ -241,6 +243,10 @@ static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSU
struct mlx5e_accel_fs_tcp;
#endif
struct mlx5e_fs_udp;
struct mlx5e_fs_any;
struct mlx5e_ptp_fs;
struct mlx5e_flow_steering {
struct mlx5_flow_namespace *ns;
struct mlx5_flow_namespace *egress_ns;
......@@ -259,6 +265,9 @@ struct mlx5e_flow_steering {
#ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_accel_fs_tcp *accel_tcp;
#endif
struct mlx5e_fs_udp *udp;
struct mlx5e_fs_any *any;
struct mlx5e_ptp_fs *ptp_fs;
};
struct ttc_params {
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021 Mellanox Technologies. */
#ifndef __MLX5E_FS_TT_REDIRECT_H__
#define __MLX5E_FS_TT_REDIRECT_H__
#include "en.h"
#include "en/fs.h"
void mlx5e_fs_tt_redirect_del_rule(struct mlx5_flow_handle *rule);
/* UDP traffic type redirect */
struct mlx5_flow_handle *
mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
enum mlx5e_traffic_types ttc_type,
u32 tir_num, u16 d_port);
void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv);
int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv);
/* ANY traffic type redirect*/
struct mlx5_flow_handle *
mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
u32 tir_num, u16 ether_type);
void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv);
int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv);
#endif
......@@ -16,9 +16,16 @@ struct mlx5e_ptpsq {
struct mlx5e_ptp_cq_stats *cq_stats;
};
enum {
MLX5E_PTP_STATE_TX,
MLX5E_PTP_STATE_RX,
MLX5E_PTP_STATE_NUM_STATES,
};
struct mlx5e_ptp {
/* data path */
struct mlx5e_ptpsq ptpsq[MLX5E_MAX_NUM_TC];
struct mlx5e_rq rq;
struct napi_struct napi;
struct device *pdev;
struct net_device *netdev;
......@@ -33,6 +40,7 @@ struct mlx5e_ptp {
struct mlx5e_priv *priv;
struct mlx5_core_dev *mdev;
struct hwtstamp_config *tstamp;
DECLARE_BITMAP(state, MLX5E_PTP_STATE_NUM_STATES);
};
int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
......@@ -40,6 +48,10 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
void mlx5e_ptp_close(struct mlx5e_ptp *c);
void mlx5e_ptp_activate_channel(struct mlx5e_ptp *c);
void mlx5e_ptp_deactivate_channel(struct mlx5e_ptp *c);
int mlx5e_ptp_get_rqn(struct mlx5e_ptp *c, u32 *rqn);
int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv);
void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv);
int mlx5e_ptp_rx_manage_fs(struct mlx5e_priv *priv, bool set);
enum {
MLX5E_SKB_CB_CQE_HWTSTAMP = BIT(0),
......
......@@ -5,6 +5,7 @@
#include "params.h"
#include "txrx.h"
#include "devlink.h"
#include "ptp.h"
static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state)
{
......@@ -230,8 +231,9 @@ static int mlx5e_reporter_icosq_diagnose(struct mlx5e_icosq *icosq, u8 hw_state,
return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
struct devlink_fmsg *fmsg)
static int
mlx5e_rx_reporter_build_diagnose_output_rq_common(struct mlx5e_rq *rq,
struct devlink_fmsg *fmsg)
{
u16 wqe_counter;
int wqes_sz;
......@@ -247,14 +249,6 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
wq_head = mlx5e_rqwq_get_head(rq);
wqe_counter = mlx5e_rqwq_get_wqe_counter(rq);
err = devlink_fmsg_obj_nest_start(fmsg);
if (err)
return err;
err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix);
if (err)
return err;
err = devlink_fmsg_u32_pair_put(fmsg, "rqn", rq->rqn);
if (err)
return err;
......@@ -300,61 +294,149 @@ static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
return err;
}
err = devlink_fmsg_obj_nest_end(fmsg);
return 0;
}
static int mlx5e_rx_reporter_build_diagnose_output(struct mlx5e_rq *rq,
struct devlink_fmsg *fmsg)
{
int err;
err = devlink_fmsg_obj_nest_start(fmsg);
if (err)
return err;
return 0;
err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", rq->ix);
if (err)
return err;
err = mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg);
if (err)
return err;
return devlink_fmsg_obj_nest_end(fmsg);
}
static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg,
struct netlink_ext_ack *extack)
static int mlx5e_rx_reporter_diagnose_generic_rq(struct mlx5e_rq *rq,
struct devlink_fmsg *fmsg)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_params *params = &priv->channels.params;
struct mlx5e_rq *generic_rq;
struct mlx5e_priv *priv = rq->priv;
struct mlx5e_params *params;
u32 rq_stride, rq_sz;
int i, err = 0;
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
int err;
generic_rq = &priv->channels.c[0]->rq;
rq_sz = mlx5e_rqwq_get_size(generic_rq);
params = &priv->channels.params;
rq_sz = mlx5e_rqwq_get_size(rq);
rq_stride = BIT(mlx5e_mpwqe_get_log_stride_size(priv->mdev, params, NULL));
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common config");
if (err)
goto unlock;
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "RQ");
if (err)
goto unlock;
return err;
err = devlink_fmsg_u8_pair_put(fmsg, "type", params->rq_wq_type);
if (err)
goto unlock;
return err;
err = devlink_fmsg_u64_pair_put(fmsg, "stride size", rq_stride);
if (err)
goto unlock;
return err;
err = devlink_fmsg_u32_pair_put(fmsg, "size", rq_sz);
if (err)
goto unlock;
return err;
err = mlx5e_health_cq_common_diag_fmsg(&generic_rq->cq, fmsg);
err = mlx5e_health_cq_common_diag_fmsg(&rq->cq, fmsg);
if (err)
goto unlock;
return err;
err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
static int
mlx5e_rx_reporter_diagnose_common_ptp_config(struct mlx5e_priv *priv, struct mlx5e_ptp *ptp_ch,
struct devlink_fmsg *fmsg)
{
int err;
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
if (err)
return err;
err = devlink_fmsg_u32_pair_put(fmsg, "filter_type", priv->tstamp.rx_filter);
if (err)
return err;
err = mlx5e_rx_reporter_diagnose_generic_rq(&ptp_ch->rq, fmsg);
if (err)
return err;
return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
static int
mlx5e_rx_reporter_diagnose_common_config(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_rq *generic_rq = &priv->channels.c[0]->rq;
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
int err;
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "Common config");
if (err)
return err;
err = mlx5e_rx_reporter_diagnose_generic_rq(generic_rq, fmsg);
if (err)
return err;
if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) {
err = mlx5e_rx_reporter_diagnose_common_ptp_config(priv, ptp_ch, fmsg);
if (err)
return err;
}
return mlx5e_health_fmsg_named_obj_nest_end(fmsg);
}
static int mlx5e_rx_reporter_build_diagnose_output_ptp_rq(struct mlx5e_rq *rq,
struct devlink_fmsg *fmsg)
{
int err;
err = devlink_fmsg_obj_nest_start(fmsg);
if (err)
return err;
err = devlink_fmsg_string_pair_put(fmsg, "channel", "ptp");
if (err)
return err;
err = mlx5e_rx_reporter_build_diagnose_output_rq_common(rq, fmsg);
if (err)
return err;
err = devlink_fmsg_obj_nest_end(fmsg);
if (err)
return err;
return 0;
}
static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg,
struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
int i, err = 0;
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
err = mlx5e_health_fmsg_named_obj_nest_end(fmsg);
err = mlx5e_rx_reporter_diagnose_common_config(reporter, fmsg);
if (err)
goto unlock;
......@@ -369,9 +451,12 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
if (err)
goto unlock;
}
if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) {
err = mlx5e_rx_reporter_build_diagnose_output_ptp_rq(&ptp_ch->rq, fmsg);
if (err)
goto unlock;
}
err = devlink_fmsg_arr_pair_nest_end(fmsg);
if (err)
goto unlock;
unlock:
mutex_unlock(&priv->state_lock);
return err;
......@@ -503,6 +588,7 @@ static int mlx5e_rx_reporter_dump_rq(struct mlx5e_priv *priv, struct devlink_fms
static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv,
struct devlink_fmsg *fmsg)
{
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
struct mlx5_rsc_key key = {};
int i, err;
......@@ -535,6 +621,12 @@ static int mlx5e_rx_reporter_dump_all_rqs(struct mlx5e_priv *priv,
return err;
}
if (ptp_ch && test_bit(MLX5E_PTP_STATE_RX, ptp_ch->state)) {
err = mlx5e_health_queue_dump(priv, fmsg, ptp_ch->rq.rqn, "PTP RQ");
if (err)
return err;
}
return devlink_fmsg_arr_pair_nest_end(fmsg);
}
......
......@@ -304,6 +304,7 @@ mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporte
{
struct mlx5e_priv *priv = devlink_health_reporter_priv(reporter);
struct mlx5e_txqsq *generic_sq = priv->txq2sq[0];
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
struct mlx5e_ptpsq *generic_ptpsq;
int err;
......@@ -315,12 +316,11 @@ mlx5e_tx_reporter_diagnose_common_config(struct devlink_health_reporter *reporte
if (err)
return err;
generic_ptpsq = priv->channels.ptp ?
&priv->channels.ptp->ptpsq[0] :
NULL;
if (!generic_ptpsq)
if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
goto out;
generic_ptpsq = &ptp_ch->ptpsq[0];
err = mlx5e_health_fmsg_named_obj_nest_start(fmsg, "PTP");
if (err)
return err;
......@@ -375,7 +375,7 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter,
}
}
if (!ptp_ch)
if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state))
goto close_sqs_nest;
for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
......@@ -497,7 +497,7 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv,
}
}
if (ptp_ch) {
if (ptp_ch && test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) {
for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq;
......
......@@ -34,6 +34,7 @@
#include "en/port.h"
#include "en/params.h"
#include "en/xsk/pool.h"
#include "en/ptp.h"
#include "lib/clock.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
......@@ -1865,13 +1866,19 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
new_channels.params = priv->channels.params;
MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
if (priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE)
new_channels.params.ptp_rx = new_val;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
return 0;
}
err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
if (new_channels.params.ptp_rx == priv->channels.params.ptp_rx)
err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL);
else
err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_ptp_rx_manage_fs_ctx,
&new_channels.params.ptp_rx);
if (err)
return err;
......@@ -1892,11 +1899,6 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
if (!MLX5_CAP_GEN(mdev, cqe_compression))
return -EOPNOTSUPP;
if (enable && priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
return -EINVAL;
}
err = mlx5e_modify_rx_cqe_compression_locked(priv, enable);
if (err)
return err;
......
......@@ -38,6 +38,7 @@
#include "en.h"
#include "en_rep.h"
#include "lib/mpfs.h"
#include "en/ptp.h"
static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai, int type);
......@@ -1792,10 +1793,16 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
goto err_destroy_l2_table;
}
err = mlx5e_ptp_alloc_rx_fs(priv);
if (err)
goto err_destory_vlan_table;
mlx5e_ethtool_init_steering(priv);
return 0;
err_destory_vlan_table:
mlx5e_destroy_vlan_table(priv);
err_destroy_l2_table:
mlx5e_destroy_l2_table(priv);
err_destroy_ttc_table:
......@@ -1810,6 +1817,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_ptp_free_rx_fs(priv);
mlx5e_destroy_vlan_table(priv);
mlx5e_destroy_l2_table(priv);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
......
......@@ -2087,7 +2087,7 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
goto err_close_channels;
}
if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS)) {
if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS) || chs->params.ptp_rx) {
err = mlx5e_ptp_open(priv, &chs->params, chs->c[0]->lag_port, &chs->ptp);
if (err)
goto err_close_channels;
......@@ -2329,7 +2329,8 @@ static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
}
static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
struct mlx5e_redirect_rqt_param rrp)
struct mlx5e_redirect_rqt_param rrp,
struct mlx5e_redirect_rqt_param *ptp_rrp)
{
u32 rqtn;
int ix;
......@@ -2355,11 +2356,17 @@ static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
rqtn = priv->direct_tir[ix].rqt.rqtn;
mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
}
if (ptp_rrp) {
rqtn = priv->ptp_tir.rqt.rqtn;
mlx5e_redirect_rqt(priv, rqtn, 1, *ptp_rrp);
}
}
static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *chs)
{
bool rx_ptp_support = priv->profile->rx_ptp_support;
struct mlx5e_redirect_rqt_param *ptp_rrp_p = NULL;
struct mlx5e_redirect_rqt_param rrp = {
.is_rss = true,
{
......@@ -2369,12 +2376,22 @@ static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
}
},
};
struct mlx5e_redirect_rqt_param ptp_rrp;
if (rx_ptp_support) {
u32 ptp_rqn;
mlx5e_redirect_rqts(priv, rrp);
ptp_rrp.is_rss = false;
ptp_rrp.rqn = mlx5e_ptp_get_rqn(priv->channels.ptp, &ptp_rqn) ?
priv->drop_rq.rqn : ptp_rqn;
ptp_rrp_p = &ptp_rrp;
}
mlx5e_redirect_rqts(priv, rrp, ptp_rrp_p);
}
static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
{
bool rx_ptp_support = priv->profile->rx_ptp_support;
struct mlx5e_redirect_rqt_param drop_rrp = {
.is_rss = false,
{
......@@ -2382,7 +2399,7 @@ static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
},
};
mlx5e_redirect_rqts(priv, drop_rrp);
mlx5e_redirect_rqts(priv, drop_rrp, rx_ptp_support ? &drop_rrp : NULL);
}
static const struct mlx5e_tirc_config tirc_default_config[MLX5E_NUM_INDIR_TIRS] = {
......@@ -2671,6 +2688,8 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
nch = priv->channels.params.num_channels;
ntc = priv->channels.params.num_tc;
num_rxqs = nch * priv->profile->rq_groups;
if (priv->channels.params.ptp_rx)
num_rxqs++;
mlx5e_netdev_set_tcs(netdev, nch, ntc);
......@@ -2759,6 +2778,9 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
if (!priv->channels.ptp)
return;
if (!test_bit(MLX5E_PTP_STATE_TX, priv->channels.ptp->state))
return;
for (tc = 0; tc < num_tc; tc++) {
struct mlx5e_ptp *c = priv->channels.ptp;
struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq;
......@@ -3493,6 +3515,13 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
s->tx_dropped += sq_stats->dropped;
}
}
if (priv->rx_ptp_opened) {
struct mlx5e_rq_stats *rq_stats = &priv->ptp_stats.rq;
s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes;
s->multicast += rq_stats->mcast_packets;
}
}
void
......@@ -3941,9 +3970,18 @@ static int mlx5e_change_nic_mtu(struct net_device *netdev, int new_mtu)
return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu_ctx);
}
int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx)
{
bool set = *(bool *)ctx;
return mlx5e_ptp_rx_manage_fs(priv, set);
}
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
{
struct mlx5e_channels new_channels = {};
struct hwtstamp_config config;
bool rx_cqe_compress_def;
int err;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
......@@ -3963,11 +4001,13 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
}
mutex_lock(&priv->state_lock);
new_channels.params = priv->channels.params;
rx_cqe_compress_def = priv->channels.params.rx_cqe_compress_def;
/* RX HW timestamp */
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
/* Reset CQE compression to Admin default */
mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
new_channels.params.ptp_rx = false;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
......@@ -3984,15 +4024,7 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
/* Disable CQE compression */
if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS))
netdev_warn(priv->netdev, "Disabling RX cqe compression\n");
err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
if (err) {
netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
mutex_unlock(&priv->state_lock);
return err;
}
new_channels.params.ptp_rx = rx_cqe_compress_def;
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
......@@ -4000,6 +4032,20 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
return -ERANGE;
}
if (new_channels.params.ptp_rx == priv->channels.params.ptp_rx)
goto out;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
goto out;
}
err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_ptp_rx_manage_fs_ctx,
&new_channels.params.ptp_rx);
if (err) {
mutex_unlock(&priv->state_lock);
return err;
}
out:
memcpy(&priv->tstamp, &config, sizeof(config));
mutex_unlock(&priv->state_lock);
......@@ -4934,10 +4980,18 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
if (unlikely(err))
goto err_destroy_xsk_rqts;
err = mlx5e_create_direct_rqts(priv, &priv->ptp_tir, 1);
if (err)
goto err_destroy_xsk_tirs;
err = mlx5e_create_direct_tirs(priv, &priv->ptp_tir, 1);
if (err)
goto err_destroy_ptp_rqt;
err = mlx5e_create_flow_steering(priv);
if (err) {
mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
goto err_destroy_xsk_tirs;
goto err_destroy_ptp_direct_tir;
}
err = mlx5e_tc_nic_init(priv);
......@@ -4958,6 +5012,10 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
mlx5e_tc_nic_cleanup(priv);
err_destroy_flow_steering:
mlx5e_destroy_flow_steering(priv);
err_destroy_ptp_direct_tir:
mlx5e_destroy_direct_tirs(priv, &priv->ptp_tir, 1);
err_destroy_ptp_rqt:
mlx5e_destroy_direct_rqts(priv, &priv->ptp_tir, 1);
err_destroy_xsk_tirs:
mlx5e_destroy_direct_tirs(priv, priv->xsk_tir, max_nch);
err_destroy_xsk_rqts:
......@@ -4984,6 +5042,8 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
mlx5e_accel_cleanup_rx(priv);
mlx5e_tc_nic_cleanup(priv);
mlx5e_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv, &priv->ptp_tir, 1);
mlx5e_destroy_direct_rqts(priv, &priv->ptp_tir, 1);
mlx5e_destroy_direct_tirs(priv, priv->xsk_tir, max_nch);
mlx5e_destroy_direct_rqts(priv, priv->xsk_tir, max_nch);
mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch);
......@@ -5096,6 +5156,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_nic_stats_grps,
.stats_grps_num = mlx5e_nic_stats_grps_num,
.rx_ptp_support = true,
};
/* mlx5e generic netdev management API (move to en_common.c) */
......
......@@ -1062,6 +1062,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5e_rep_stats_grps,
.stats_grps_num = mlx5e_rep_stats_grps_num,
.rx_ptp_support = false,
};
static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
......@@ -1082,6 +1083,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
.stats_grps = mlx5e_ul_rep_stats_grps,
.stats_grps_num = mlx5e_ul_rep_stats_grps_num,
.rx_ptp_support = false,
};
/* e-Switch vport representors */
......
......@@ -407,13 +407,21 @@ static void mlx5e_stats_grp_sw_update_stats_ptp(struct mlx5e_priv *priv,
{
int i;
if (!priv->tx_ptp_opened)
if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
return;
mlx5e_stats_grp_sw_update_stats_ch_stats(s, &priv->ptp_stats.ch);
for (i = 0; i < priv->max_opened_tc; i++) {
mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]);
if (priv->tx_ptp_opened) {
for (i = 0; i < priv->max_opened_tc; i++) {
mlx5e_stats_grp_sw_update_stats_sq(s, &priv->ptp_stats.sq[i]);
/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
barrier();
}
}
if (priv->rx_ptp_opened) {
mlx5e_stats_grp_sw_update_stats_rq_stats(s, &priv->ptp_stats.rq);
/* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=92657 */
barrier();
......@@ -1760,6 +1768,38 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
};
static const struct counter_desc ptp_rq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, bytes) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_bytes) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, ecn_mark) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, wqe_err) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_reuse) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_full) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_empty) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_busy) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, cache_waive) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, congst_umr) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, arfs_err) },
{ MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, recover) },
};
static const struct counter_desc qos_sq_stats_desc[] = {
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, packets) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, bytes) },
......@@ -1805,6 +1845,7 @@ static const struct counter_desc qos_sq_stats_desc[] = {
#define NUM_PTP_SQ_STATS ARRAY_SIZE(ptp_sq_stats_desc)
#define NUM_PTP_CH_STATS ARRAY_SIZE(ptp_ch_stats_desc)
#define NUM_PTP_CQ_STATS ARRAY_SIZE(ptp_cq_stats_desc)
#define NUM_PTP_RQ_STATS ARRAY_SIZE(ptp_rq_stats_desc)
#define NUM_QOS_SQ_STATS ARRAY_SIZE(qos_sq_stats_desc)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(qos)
......@@ -1851,32 +1892,46 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ptp)
{
return priv->tx_ptp_opened ?
NUM_PTP_CH_STATS +
((NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc) :
0;
int num = NUM_PTP_CH_STATS;
if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
return 0;
if (priv->tx_ptp_opened)
num += (NUM_PTP_SQ_STATS + NUM_PTP_CQ_STATS) * priv->max_opened_tc;
if (priv->rx_ptp_opened)
num += NUM_PTP_RQ_STATS;
return num;
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
{
int i, tc;
if (!priv->tx_ptp_opened)
if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
return idx;
for (i = 0; i < NUM_PTP_CH_STATS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
ptp_ch_stats_desc[i].format);
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_SQ_STATS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
ptp_sq_stats_desc[i].format, tc);
if (priv->tx_ptp_opened) {
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_SQ_STATS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
ptp_sq_stats_desc[i].format, tc);
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_CQ_STATS; i++)
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_CQ_STATS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
ptp_cq_stats_desc[i].format, tc);
}
if (priv->rx_ptp_opened) {
for (i = 0; i < NUM_PTP_RQ_STATS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
ptp_cq_stats_desc[i].format, tc);
ptp_rq_stats_desc[i].format);
}
return idx;
}
......@@ -1884,7 +1939,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
{
int i, tc;
if (!priv->tx_ptp_opened)
if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
return idx;
for (i = 0; i < NUM_PTP_CH_STATS; i++)
......@@ -1892,18 +1947,25 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
ptp_ch_stats_desc, i);
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_SQ_STATS; i++)
data[idx++] =
MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc],
ptp_sq_stats_desc, i);
if (priv->tx_ptp_opened) {
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_SQ_STATS; i++)
data[idx++] =
MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc],
ptp_sq_stats_desc, i);
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_CQ_STATS; i++)
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_CQ_STATS; i++)
data[idx++] =
MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc],
ptp_cq_stats_desc, i);
}
if (priv->rx_ptp_opened) {
for (i = 0; i < NUM_PTP_RQ_STATS; i++)
data[idx++] =
MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc],
ptp_cq_stats_desc, i);
MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq,
ptp_rq_stats_desc, i);
}
return idx;
}
......
......@@ -54,6 +54,7 @@
#define MLX5E_DECLARE_PTP_TX_STAT(type, fld) "ptp_tx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_CH_STAT(type, fld) "ptp_ch_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_CQ_STAT(type, fld) "ptp_cq%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_PTP_RQ_STAT(type, fld) "ptp_rq%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_QOS_TX_STAT(type, fld) "qos_tx%d_"#fld, offsetof(type, fld)
......
......@@ -133,6 +133,8 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
/* Sync with mlx5e_update_num_tc_x_num_ch - avoid refetching. */
num_tc_x_num_ch = READ_ONCE(priv->num_tc_x_num_ch);
if (unlikely(dev->real_num_tx_queues > num_tc_x_num_ch)) {
struct mlx5e_ptp *ptp_channel;
/* Order maj_id before defcls - pairs with mlx5e_htb_root_add. */
u16 htb_maj_id = smp_load_acquire(&priv->htb.maj_id);
......@@ -142,10 +144,11 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq_ix;
}
if (unlikely(priv->channels.ptp))
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
mlx5e_use_ptpsq(skb))
return mlx5e_select_ptpsq(dev, skb);
ptp_channel = READ_ONCE(priv->channels.ptp);
if (unlikely(ptp_channel) &&
test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state) &&
mlx5e_use_ptpsq(skb))
return mlx5e_select_ptpsq(dev, skb);
txq_ix = netdev_pick_tx(dev, skb, NULL);
/* Fix netdev_pick_tx() not to choose ptp_channel and HTB txqs.
......
......@@ -105,7 +105,7 @@
#define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
/* Promiscuous, Vlan, mac, ttc, inner ttc, {aRFS/accel and esp/esp_err} */
/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}} */
#define KERNEL_NIC_PRIO_NUM_LEVELS 7
#define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc */
......
......@@ -473,6 +473,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.stats_grps = mlx5i_stats_grps,
.stats_grps_num = mlx5i_stats_grps_num,
.rx_ptp_support = false,
};
/* mlx5i netdev NDos */
......
......@@ -350,6 +350,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.rx_handlers = &mlx5i_rx_handlers,
.max_tc = MLX5I_MAX_NUM_TC,
.rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
.rx_ptp_support = false,
};
const struct mlx5e_profile *mlx5i_pkey_get_profile(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment