Commit ddc9cc01 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5e-updates-2018-09-05' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5e-updates-2018-09-05

This series provides updates to mlx5 ethernet driver.

1) Starting with a four patches series to optimize flow counters updates,
From Vlad Buslov:
==============================================

By default mlx5 driver updates cached counters each second. Update function
consumes noticeable amount of CPU resources. The goal of this patch series
is to optimize update function.

Investigation revealed following bottlenecks in fs counters
implementation:
 1) Update code(scheduled each second) iterates over all counters twice.
 (first for finding and deleting counters that are marked for deletion,
 second iteration is for actually updating the counters)
 2) Counters are stored in rb tree. Linear iteration over all rb tree
 elements(rb_next in profiling data) consumed ~65% of time spent in
 update function.

Following optimizations were implemented:
 1) Instead of just marking counters for deletion, store them in
 standalone list. This removes first iteration over whole counters tree.
 2) Store counters in sorted list to optimize traversing them and remove
 calls to rb_next.

First implementation of these changes caused degradation of performance,
instead of improving it. Investigation revealed that there first cache
line of struct mlx5_fc is full and adding anything to it causes amount
of cache misses to double. To mitigate that, following refactorings were
implemented:
 - Change 'addlist' list type from double linked to single linked. This
 allowes to get free space for one additional pointer that is used to
 store deletion list(optimization 1)
 - Substitute rb tree with idr. Idr is non-intrusive data structure and
 doesn't require adding any new members to struct mlx5_fc. Use free
 space that became available for double linked sorted list that is used
 for traversing all counters. (optimization 2)

Described changes reduced CPU time spent in mlx5_fc_stats_work from 70%
to 44%. (global perf profile mode)
============================================

The rest of the series are misc updates:

2) From Kamal, Move mlx5e_priv_flags into en_ethtool.c, to avoid a
compilation warning.

3) From Roi Dayan, Move Q counters allocation and drop RQ to init_rx profile
function to avoid allocating Q counters when not required.

4) From Shay Agroskin, Replace PTP clock lock from RW lock to seq lock.
Almost double the packet rate when timestamping is active on multiple TX
queues.

5) From: Natali Shechtman, set ECN for received packets using CQE indication.

6) From: Alaa Hleihel, don't set CHECKSUM_COMPLETE on SCTP packets.
CHECKSUM_COMPLETE is not applicable to SCTP protocol.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2002bc32 fe1dc069
...@@ -204,13 +204,6 @@ struct mlx5e_umr_wqe { ...@@ -204,13 +204,6 @@ struct mlx5e_umr_wqe {
extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
"rx_cqe_moder",
"tx_cqe_moder",
"rx_cqe_compress",
"rx_striding_rq",
};
enum mlx5e_priv_flag { enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0), MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1), MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
...@@ -905,6 +898,12 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); ...@@ -905,6 +898,12 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb); int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb);
/* common netdev helpers */ /* common netdev helpers */
void mlx5e_create_q_counters(struct mlx5e_priv *priv);
void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_rq *drop_rq);
void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv); int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv); int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv);
......
...@@ -135,6 +135,13 @@ void mlx5e_build_ptys2ethtool_map(void) ...@@ -135,6 +135,13 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT); ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT);
} }
static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
"rx_cqe_moder",
"tx_cqe_moder",
"rx_cqe_compress",
"rx_striding_rq",
};
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
{ {
int i, num_stats = 0; int i, num_stats = 0;
......
...@@ -3049,7 +3049,7 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev, ...@@ -3049,7 +3049,7 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
return mlx5e_alloc_cq_common(mdev, param, cq); return mlx5e_alloc_cq_common(mdev, param, cq);
} }
static int mlx5e_open_drop_rq(struct mlx5e_priv *priv, int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_rq *drop_rq) struct mlx5e_rq *drop_rq)
{ {
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
...@@ -3094,7 +3094,7 @@ static int mlx5e_open_drop_rq(struct mlx5e_priv *priv, ...@@ -3094,7 +3094,7 @@ static int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
return err; return err;
} }
static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq) void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
{ {
mlx5e_destroy_rq(drop_rq); mlx5e_destroy_rq(drop_rq);
mlx5e_free_rq(drop_rq); mlx5e_free_rq(drop_rq);
...@@ -4726,7 +4726,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) ...@@ -4726,7 +4726,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
mlx5e_tls_build_netdev(priv); mlx5e_tls_build_netdev(priv);
} }
static void mlx5e_create_q_counters(struct mlx5e_priv *priv) void mlx5e_create_q_counters(struct mlx5e_priv *priv)
{ {
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
int err; int err;
...@@ -4744,7 +4744,7 @@ static void mlx5e_create_q_counters(struct mlx5e_priv *priv) ...@@ -4744,7 +4744,7 @@ static void mlx5e_create_q_counters(struct mlx5e_priv *priv)
} }
} }
static void mlx5e_destroy_q_counters(struct mlx5e_priv *priv) void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
{ {
if (priv->q_counter) if (priv->q_counter)
mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter); mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
...@@ -4783,9 +4783,17 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) ...@@ -4783,9 +4783,17 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_core_dev *mdev = priv->mdev;
int err; int err;
mlx5e_create_q_counters(priv);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) {
mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
goto err_destroy_q_counters;
}
err = mlx5e_create_indirect_rqt(priv); err = mlx5e_create_indirect_rqt(priv);
if (err) if (err)
return err; goto err_close_drop_rq;
err = mlx5e_create_direct_rqts(priv); err = mlx5e_create_direct_rqts(priv);
if (err) if (err)
...@@ -4821,6 +4829,10 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) ...@@ -4821,6 +4829,10 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
mlx5e_destroy_direct_rqts(priv); mlx5e_destroy_direct_rqts(priv);
err_destroy_indirect_rqts: err_destroy_indirect_rqts:
mlx5e_destroy_rqt(priv, &priv->indir_rqt); mlx5e_destroy_rqt(priv, &priv->indir_rqt);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
err_destroy_q_counters:
mlx5e_destroy_q_counters(priv);
return err; return err;
} }
...@@ -4832,6 +4844,8 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) ...@@ -4832,6 +4844,8 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
mlx5e_destroy_indirect_tirs(priv); mlx5e_destroy_indirect_tirs(priv);
mlx5e_destroy_direct_rqts(priv); mlx5e_destroy_direct_rqts(priv);
mlx5e_destroy_rqt(priv, &priv->indir_rqt); mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
} }
static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
...@@ -4975,7 +4989,6 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, ...@@ -4975,7 +4989,6 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
int mlx5e_attach_netdev(struct mlx5e_priv *priv) int mlx5e_attach_netdev(struct mlx5e_priv *priv)
{ {
struct mlx5_core_dev *mdev = priv->mdev;
const struct mlx5e_profile *profile; const struct mlx5e_profile *profile;
int err; int err;
...@@ -4986,28 +4999,16 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) ...@@ -4986,28 +4999,16 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
if (err) if (err)
goto out; goto out;
mlx5e_create_q_counters(priv);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) {
mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
goto err_destroy_q_counters;
}
err = profile->init_rx(priv); err = profile->init_rx(priv);
if (err) if (err)
goto err_close_drop_rq; goto err_cleanup_tx;
if (profile->enable) if (profile->enable)
profile->enable(priv); profile->enable(priv);
return 0; return 0;
err_close_drop_rq: err_cleanup_tx:
mlx5e_close_drop_rq(&priv->drop_rq);
err_destroy_q_counters:
mlx5e_destroy_q_counters(priv);
profile->cleanup_tx(priv); profile->cleanup_tx(priv);
out: out:
...@@ -5025,8 +5026,6 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv) ...@@ -5025,8 +5026,6 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
flush_workqueue(priv->wq); flush_workqueue(priv->wq);
profile->cleanup_rx(priv); profile->cleanup_rx(priv);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
profile->cleanup_tx(priv); profile->cleanup_tx(priv);
cancel_delayed_work_sync(&priv->update_stats_work); cancel_delayed_work_sync(&priv->update_stats_work);
} }
......
...@@ -999,14 +999,21 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) ...@@ -999,14 +999,21 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_flow_handle *flow_rule; struct mlx5_flow_handle *flow_rule;
int err; int err;
mlx5e_init_l2_addr(priv); mlx5e_init_l2_addr(priv);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) {
mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
return err;
}
err = mlx5e_create_direct_rqts(priv); err = mlx5e_create_direct_rqts(priv);
if (err) if (err)
return err; goto err_close_drop_rq;
err = mlx5e_create_direct_tirs(priv); err = mlx5e_create_direct_tirs(priv);
if (err) if (err)
...@@ -1027,6 +1034,8 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) ...@@ -1027,6 +1034,8 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_direct_tirs(priv);
err_destroy_direct_rqts: err_destroy_direct_rqts:
mlx5e_destroy_direct_rqts(priv); mlx5e_destroy_direct_rqts(priv);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
return err; return err;
} }
...@@ -1037,6 +1046,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) ...@@ -1037,6 +1046,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
mlx5_del_flow_rules(rpriv->vport_rx_rule); mlx5_del_flow_rules(rpriv->vport_rx_rule);
mlx5e_destroy_direct_tirs(priv); mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_direct_rqts(priv); mlx5e_destroy_direct_rqts(priv);
mlx5e_close_drop_rq(&priv->drop_rq);
} }
static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <net/busy_poll.h> #include <net/busy_poll.h>
#include <net/ip6_checksum.h> #include <net/ip6_checksum.h>
#include <net/page_pool.h> #include <net/page_pool.h>
#include <net/inet_ecn.h>
#include "en.h" #include "en.h"
#include "en_tc.h" #include "en_tc.h"
#include "eswitch.h" #include "eswitch.h"
...@@ -690,12 +691,29 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, ...@@ -690,12 +691,29 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht); skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
} }
static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth) static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
__be16 *proto)
{ {
__be16 ethertype = ((struct ethhdr *)skb->data)->h_proto; *proto = ((struct ethhdr *)skb->data)->h_proto;
*proto = __vlan_get_protocol(skb, *proto, network_depth);
return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6));
}
static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
{
int network_depth = 0;
__be16 proto;
void *ip;
int rc;
if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto)))
return;
ip = skb->data + network_depth;
rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) :
IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip));
ethertype = __vlan_get_protocol(skb, ethertype, network_depth); rq->stats->ecn_mark += !!rc;
return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
} }
static __be32 mlx5e_get_fcs(struct sk_buff *skb) static __be32 mlx5e_get_fcs(struct sk_buff *skb)
...@@ -737,6 +755,14 @@ static __be32 mlx5e_get_fcs(struct sk_buff *skb) ...@@ -737,6 +755,14 @@ static __be32 mlx5e_get_fcs(struct sk_buff *skb)
return fcs_bytes; return fcs_bytes;
} }
static u8 get_ip_proto(struct sk_buff *skb, __be16 proto)
{
void *ip_p = skb->data + sizeof(struct ethhdr);
return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
((struct ipv6hdr *)ip_p)->nexthdr;
}
static inline void mlx5e_handle_csum(struct net_device *netdev, static inline void mlx5e_handle_csum(struct net_device *netdev,
struct mlx5_cqe64 *cqe, struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq, struct mlx5e_rq *rq,
...@@ -745,6 +771,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, ...@@ -745,6 +771,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
{ {
struct mlx5e_rq_stats *stats = rq->stats; struct mlx5e_rq_stats *stats = rq->stats;
int network_depth = 0; int network_depth = 0;
__be16 proto;
if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
goto csum_none; goto csum_none;
...@@ -755,7 +782,10 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, ...@@ -755,7 +782,10 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return; return;
} }
if (likely(is_last_ethertype_ip(skb, &network_depth))) { if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
if (unlikely(get_ip_proto(skb, proto) == IPPROTO_SCTP))
goto csum_unnecessary;
skb->ip_summed = CHECKSUM_COMPLETE; skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum); skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
if (network_depth > ETH_HLEN) if (network_depth > ETH_HLEN)
...@@ -773,6 +803,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, ...@@ -773,6 +803,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return; return;
} }
csum_unnecessary:
if (likely((cqe->hds_ip_ext & CQE_L3_OK) && if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
(cqe->hds_ip_ext & CQE_L4_OK))) { (cqe->hds_ip_ext & CQE_L4_OK))) {
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
...@@ -790,6 +821,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, ...@@ -790,6 +821,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
stats->csum_none++; stats->csum_none++;
} }
#define MLX5E_CE_BIT_MASK 0x80
static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
u32 cqe_bcnt, u32 cqe_bcnt,
struct mlx5e_rq *rq, struct mlx5e_rq *rq,
...@@ -834,6 +867,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, ...@@ -834,6 +867,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg); mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
/* checking CE bit in cqe - MSB in ml_path field */
if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK))
mlx5e_enable_ecn(rq, skb);
skb->protocol = eth_type_trans(skb, netdev); skb->protocol = eth_type_trans(skb, netdev);
} }
......
...@@ -53,6 +53,7 @@ static const struct counter_desc sw_stats_desc[] = { ...@@ -53,6 +53,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
...@@ -144,6 +145,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) ...@@ -144,6 +145,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_bytes += rq_stats->bytes; s->rx_bytes += rq_stats->bytes;
s->rx_lro_packets += rq_stats->lro_packets; s->rx_lro_packets += rq_stats->lro_packets;
s->rx_lro_bytes += rq_stats->lro_bytes; s->rx_lro_bytes += rq_stats->lro_bytes;
s->rx_ecn_mark += rq_stats->ecn_mark;
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none; s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_complete += rq_stats->csum_complete; s->rx_csum_complete += rq_stats->csum_complete;
...@@ -1144,6 +1146,7 @@ static const struct counter_desc rq_stats_desc[] = { ...@@ -1144,6 +1146,7 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
......
...@@ -66,6 +66,7 @@ struct mlx5e_sw_stats { ...@@ -66,6 +66,7 @@ struct mlx5e_sw_stats {
u64 tx_nop; u64 tx_nop;
u64 rx_lro_packets; u64 rx_lro_packets;
u64 rx_lro_bytes; u64 rx_lro_bytes;
u64 rx_ecn_mark;
u64 rx_removed_vlan_packets; u64 rx_removed_vlan_packets;
u64 rx_csum_unnecessary; u64 rx_csum_unnecessary;
u64 rx_csum_none; u64 rx_csum_none;
...@@ -184,6 +185,7 @@ struct mlx5e_rq_stats { ...@@ -184,6 +185,7 @@ struct mlx5e_rq_stats {
u64 csum_none; u64 csum_none;
u64 lro_packets; u64 lro_packets;
u64 lro_bytes; u64 lro_bytes;
u64 ecn_mark;
u64 removed_vlan_packets; u64 removed_vlan_packets;
u64 xdp_drop; u64 xdp_drop;
u64 xdp_redirect; u64 xdp_redirect;
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/refcount.h> #include <linux/refcount.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include <linux/rhashtable.h> #include <linux/rhashtable.h>
#include <linux/llist.h>
enum fs_node_type { enum fs_node_type {
FS_TYPE_NAMESPACE, FS_TYPE_NAMESPACE,
...@@ -138,8 +139,9 @@ struct mlx5_fc_cache { ...@@ -138,8 +139,9 @@ struct mlx5_fc_cache {
}; };
struct mlx5_fc { struct mlx5_fc {
struct rb_node node;
struct list_head list; struct list_head list;
struct llist_node addlist;
struct llist_node dellist;
/* last{packets,bytes} members are used when calculating the delta since /* last{packets,bytes} members are used when calculating the delta since
* last reading * last reading
...@@ -148,7 +150,6 @@ struct mlx5_fc { ...@@ -148,7 +150,6 @@ struct mlx5_fc {
u64 lastbytes; u64 lastbytes;
u32 id; u32 id;
bool deleted;
bool aging; bool aging;
struct mlx5_fc_cache cache ____cacheline_aligned_in_smp; struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
......
...@@ -52,11 +52,13 @@ ...@@ -52,11 +52,13 @@
* access to counter list: * access to counter list:
* - create (user context) * - create (user context)
* - mlx5_fc_create() only adds to an addlist to be used by * - mlx5_fc_create() only adds to an addlist to be used by
* mlx5_fc_stats_query_work(). addlist is protected by a spinlock. * mlx5_fc_stats_query_work(). addlist is a lockless single linked list
* that doesn't require any additional synchronization when adding single
* node.
* - spawn thread to do the actual destroy * - spawn thread to do the actual destroy
* *
* - destroy (user context) * - destroy (user context)
* - mark a counter as deleted * - add a counter to lockless dellist
* - spawn thread to do the actual del * - spawn thread to do the actual del
* *
* - dump (user context) * - dump (user context)
...@@ -71,36 +73,43 @@ ...@@ -71,36 +73,43 @@
* elapsed, the thread will actually query the hardware. * elapsed, the thread will actually query the hardware.
*/ */
static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter) static struct list_head *mlx5_fc_counters_lookup_next(struct mlx5_core_dev *dev,
u32 id)
{ {
struct rb_node **new = &root->rb_node; struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
struct rb_node *parent = NULL; unsigned long next_id = (unsigned long)id + 1;
struct mlx5_fc *counter;
while (*new) {
struct mlx5_fc *this = rb_entry(*new, struct mlx5_fc, node); rcu_read_lock();
int result = counter->id - this->id; /* skip counters that are in idr, but not yet in counters list */
while ((counter = idr_get_next_ul(&fc_stats->counters_idr,
parent = *new; &next_id)) != NULL &&
if (result < 0) list_empty(&counter->list))
new = &((*new)->rb_left); next_id++;
else rcu_read_unlock();
new = &((*new)->rb_right);
}
/* Add new node and rebalance tree. */ return counter ? &counter->list : &fc_stats->counters;
rb_link_node(&counter->node, parent, new);
rb_insert_color(&counter->node, root);
} }
/* The function returns the last node that was queried so the caller static void mlx5_fc_stats_insert(struct mlx5_core_dev *dev,
struct mlx5_fc *counter)
{
struct list_head *next = mlx5_fc_counters_lookup_next(dev, counter->id);
list_add_tail(&counter->list, next);
}
/* The function returns the last counter that was queried so the caller
* function can continue calling it till all counters are queried. * function can continue calling it till all counters are queried.
*/ */
static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev, static struct mlx5_fc *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
struct mlx5_fc *first, struct mlx5_fc *first,
u32 last_id) u32 last_id)
{ {
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
struct mlx5_fc *counter = NULL;
struct mlx5_cmd_fc_bulk *b; struct mlx5_cmd_fc_bulk *b;
struct rb_node *node = NULL; bool more = false;
u32 afirst_id; u32 afirst_id;
int num; int num;
int err; int err;
...@@ -130,14 +139,16 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev, ...@@ -130,14 +139,16 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
goto out; goto out;
} }
for (node = &first->node; node; node = rb_next(node)) { counter = first;
struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node); list_for_each_entry_from(counter, &fc_stats->counters, list) {
struct mlx5_fc_cache *c = &counter->cache; struct mlx5_fc_cache *c = &counter->cache;
u64 packets; u64 packets;
u64 bytes; u64 bytes;
if (counter->id > last_id) if (counter->id > last_id) {
more = true;
break; break;
}
mlx5_cmd_fc_bulk_get(dev, b, mlx5_cmd_fc_bulk_get(dev, b,
counter->id, &packets, &bytes); counter->id, &packets, &bytes);
...@@ -153,7 +164,14 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev, ...@@ -153,7 +164,14 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
out: out:
mlx5_cmd_fc_bulk_free(b); mlx5_cmd_fc_bulk_free(b);
return node; return more ? counter : NULL;
}
static void mlx5_free_fc(struct mlx5_core_dev *dev,
struct mlx5_fc *counter)
{
mlx5_cmd_fc_free(dev, counter->id);
kfree(counter);
} }
static void mlx5_fc_stats_work(struct work_struct *work) static void mlx5_fc_stats_work(struct work_struct *work)
...@@ -161,52 +179,33 @@ static void mlx5_fc_stats_work(struct work_struct *work) ...@@ -161,52 +179,33 @@ static void mlx5_fc_stats_work(struct work_struct *work)
struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev, struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
priv.fc_stats.work.work); priv.fc_stats.work.work);
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
struct llist_node *tmplist = llist_del_all(&fc_stats->addlist);
struct mlx5_fc *counter = NULL, *last = NULL, *tmp;
unsigned long now = jiffies; unsigned long now = jiffies;
struct mlx5_fc *counter = NULL;
struct mlx5_fc *last = NULL;
struct rb_node *node;
LIST_HEAD(tmplist);
spin_lock(&fc_stats->addlist_lock); if (tmplist || !list_empty(&fc_stats->counters))
list_splice_tail_init(&fc_stats->addlist, &tmplist);
if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters))
queue_delayed_work(fc_stats->wq, &fc_stats->work, queue_delayed_work(fc_stats->wq, &fc_stats->work,
fc_stats->sampling_interval); fc_stats->sampling_interval);
spin_unlock(&fc_stats->addlist_lock); llist_for_each_entry(counter, tmplist, addlist)
mlx5_fc_stats_insert(dev, counter);
list_for_each_entry(counter, &tmplist, list)
mlx5_fc_stats_insert(&fc_stats->counters, counter);
node = rb_first(&fc_stats->counters); tmplist = llist_del_all(&fc_stats->dellist);
while (node) { llist_for_each_entry_safe(counter, tmp, tmplist, dellist) {
counter = rb_entry(node, struct mlx5_fc, node); list_del(&counter->list);
node = rb_next(node);
if (counter->deleted) {
rb_erase(&counter->node, &fc_stats->counters);
mlx5_cmd_fc_free(dev, counter->id);
kfree(counter);
continue;
}
last = counter; mlx5_free_fc(dev, counter);
} }
if (time_before(now, fc_stats->next_query) || !last) if (time_before(now, fc_stats->next_query) ||
list_empty(&fc_stats->counters))
return; return;
last = list_last_entry(&fc_stats->counters, struct mlx5_fc, list);
node = rb_first(&fc_stats->counters); counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
while (node) { list);
counter = rb_entry(node, struct mlx5_fc, node); while (counter)
counter = mlx5_fc_stats_query(dev, counter, last->id);
node = mlx5_fc_stats_query(dev, counter, last->id);
}
fc_stats->next_query = now + fc_stats->sampling_interval; fc_stats->next_query = now + fc_stats->sampling_interval;
} }
...@@ -220,24 +219,38 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging) ...@@ -220,24 +219,38 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
counter = kzalloc(sizeof(*counter), GFP_KERNEL); counter = kzalloc(sizeof(*counter), GFP_KERNEL);
if (!counter) if (!counter)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&counter->list);
err = mlx5_cmd_fc_alloc(dev, &counter->id); err = mlx5_cmd_fc_alloc(dev, &counter->id);
if (err) if (err)
goto err_out; goto err_out;
if (aging) { if (aging) {
u32 id = counter->id;
counter->cache.lastuse = jiffies; counter->cache.lastuse = jiffies;
counter->aging = true; counter->aging = true;
spin_lock(&fc_stats->addlist_lock); idr_preload(GFP_KERNEL);
list_add(&counter->list, &fc_stats->addlist); spin_lock(&fc_stats->counters_idr_lock);
spin_unlock(&fc_stats->addlist_lock);
err = idr_alloc_u32(&fc_stats->counters_idr, counter, &id, id,
GFP_NOWAIT);
spin_unlock(&fc_stats->counters_idr_lock);
idr_preload_end();
if (err)
goto err_out_alloc;
llist_add(&counter->addlist, &fc_stats->addlist);
mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
} }
return counter; return counter;
err_out_alloc:
mlx5_cmd_fc_free(dev, counter->id);
err_out: err_out:
kfree(counter); kfree(counter);
...@@ -253,13 +266,16 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter) ...@@ -253,13 +266,16 @@ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
return; return;
if (counter->aging) { if (counter->aging) {
counter->deleted = true; spin_lock(&fc_stats->counters_idr_lock);
WARN_ON(!idr_remove(&fc_stats->counters_idr, counter->id));
spin_unlock(&fc_stats->counters_idr_lock);
llist_add(&counter->dellist, &fc_stats->dellist);
mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
return; return;
} }
mlx5_cmd_fc_free(dev, counter->id); mlx5_free_fc(dev, counter);
kfree(counter);
} }
EXPORT_SYMBOL(mlx5_fc_destroy); EXPORT_SYMBOL(mlx5_fc_destroy);
...@@ -267,9 +283,11 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev) ...@@ -267,9 +283,11 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
{ {
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
fc_stats->counters = RB_ROOT; spin_lock_init(&fc_stats->counters_idr_lock);
INIT_LIST_HEAD(&fc_stats->addlist); idr_init(&fc_stats->counters_idr);
spin_lock_init(&fc_stats->addlist_lock); INIT_LIST_HEAD(&fc_stats->counters);
init_llist_head(&fc_stats->addlist);
init_llist_head(&fc_stats->dellist);
fc_stats->wq = create_singlethread_workqueue("mlx5_fc"); fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
if (!fc_stats->wq) if (!fc_stats->wq)
...@@ -284,34 +302,22 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev) ...@@ -284,34 +302,22 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev) void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
{ {
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats; struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
struct llist_node *tmplist;
struct mlx5_fc *counter; struct mlx5_fc *counter;
struct mlx5_fc *tmp; struct mlx5_fc *tmp;
struct rb_node *node;
cancel_delayed_work_sync(&dev->priv.fc_stats.work); cancel_delayed_work_sync(&dev->priv.fc_stats.work);
destroy_workqueue(dev->priv.fc_stats.wq); destroy_workqueue(dev->priv.fc_stats.wq);
dev->priv.fc_stats.wq = NULL; dev->priv.fc_stats.wq = NULL;
list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) { idr_destroy(&fc_stats->counters_idr);
list_del(&counter->list);
mlx5_cmd_fc_free(dev, counter->id);
kfree(counter);
}
node = rb_first(&fc_stats->counters); tmplist = llist_del_all(&fc_stats->addlist);
while (node) { llist_for_each_entry_safe(counter, tmp, tmplist, addlist)
counter = rb_entry(node, struct mlx5_fc, node); mlx5_free_fc(dev, counter);
node = rb_next(node); list_for_each_entry_safe(counter, tmp, &fc_stats->counters, list)
mlx5_free_fc(dev, counter);
rb_erase(&counter->node, &fc_stats->counters);
mlx5_cmd_fc_free(dev, counter->id);
kfree(counter);
}
} }
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter, int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
......
...@@ -349,11 +349,20 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) ...@@ -349,11 +349,20 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
static int mlx5i_init_rx(struct mlx5e_priv *priv) static int mlx5i_init_rx(struct mlx5e_priv *priv)
{ {
struct mlx5_core_dev *mdev = priv->mdev;
int err; int err;
mlx5e_create_q_counters(priv);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) {
mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
goto err_destroy_q_counters;
}
err = mlx5e_create_indirect_rqt(priv); err = mlx5e_create_indirect_rqt(priv);
if (err) if (err)
return err; goto err_close_drop_rq;
err = mlx5e_create_direct_rqts(priv); err = mlx5e_create_direct_rqts(priv);
if (err) if (err)
...@@ -381,6 +390,10 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) ...@@ -381,6 +390,10 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
mlx5e_destroy_direct_rqts(priv); mlx5e_destroy_direct_rqts(priv);
err_destroy_indirect_rqts: err_destroy_indirect_rqts:
mlx5e_destroy_rqt(priv, &priv->indir_rqt); mlx5e_destroy_rqt(priv, &priv->indir_rqt);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
err_destroy_q_counters:
mlx5e_destroy_q_counters(priv);
return err; return err;
} }
...@@ -391,6 +404,8 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) ...@@ -391,6 +404,8 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
mlx5e_destroy_indirect_tirs(priv); mlx5e_destroy_indirect_tirs(priv);
mlx5e_destroy_direct_rqts(priv); mlx5e_destroy_direct_rqts(priv);
mlx5e_destroy_rqt(priv, &priv->indir_rqt); mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_destroy_q_counters(priv);
} }
static const struct mlx5e_profile mlx5i_nic_profile = { static const struct mlx5e_profile mlx5i_nic_profile = {
......
...@@ -111,10 +111,10 @@ static void mlx5_pps_out(struct work_struct *work) ...@@ -111,10 +111,10 @@ static void mlx5_pps_out(struct work_struct *work)
for (i = 0; i < clock->ptp_info.n_pins; i++) { for (i = 0; i < clock->ptp_info.n_pins; i++) {
u64 tstart; u64 tstart;
write_lock_irqsave(&clock->lock, flags); write_seqlock_irqsave(&clock->lock, flags);
tstart = clock->pps_info.start[i]; tstart = clock->pps_info.start[i];
clock->pps_info.start[i] = 0; clock->pps_info.start[i] = 0;
write_unlock_irqrestore(&clock->lock, flags); write_sequnlock_irqrestore(&clock->lock, flags);
if (!tstart) if (!tstart)
continue; continue;
...@@ -132,10 +132,10 @@ static void mlx5_timestamp_overflow(struct work_struct *work) ...@@ -132,10 +132,10 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
overflow_work); overflow_work);
unsigned long flags; unsigned long flags;
write_lock_irqsave(&clock->lock, flags); write_seqlock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc); timecounter_read(&clock->tc);
mlx5_update_clock_info_page(clock->mdev); mlx5_update_clock_info_page(clock->mdev);
write_unlock_irqrestore(&clock->lock, flags); write_sequnlock_irqrestore(&clock->lock, flags);
schedule_delayed_work(&clock->overflow_work, clock->overflow_period); schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
} }
...@@ -147,10 +147,10 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp, ...@@ -147,10 +147,10 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
u64 ns = timespec64_to_ns(ts); u64 ns = timespec64_to_ns(ts);
unsigned long flags; unsigned long flags;
write_lock_irqsave(&clock->lock, flags); write_seqlock_irqsave(&clock->lock, flags);
timecounter_init(&clock->tc, &clock->cycles, ns); timecounter_init(&clock->tc, &clock->cycles, ns);
mlx5_update_clock_info_page(clock->mdev); mlx5_update_clock_info_page(clock->mdev);
write_unlock_irqrestore(&clock->lock, flags); write_sequnlock_irqrestore(&clock->lock, flags);
return 0; return 0;
} }
...@@ -162,9 +162,9 @@ static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) ...@@ -162,9 +162,9 @@ static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
u64 ns; u64 ns;
unsigned long flags; unsigned long flags;
write_lock_irqsave(&clock->lock, flags); write_seqlock_irqsave(&clock->lock, flags);
ns = timecounter_read(&clock->tc); ns = timecounter_read(&clock->tc);
write_unlock_irqrestore(&clock->lock, flags); write_sequnlock_irqrestore(&clock->lock, flags);
*ts = ns_to_timespec64(ns); *ts = ns_to_timespec64(ns);
...@@ -177,10 +177,10 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) ...@@ -177,10 +177,10 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
ptp_info); ptp_info);
unsigned long flags; unsigned long flags;
write_lock_irqsave(&clock->lock, flags); write_seqlock_irqsave(&clock->lock, flags);
timecounter_adjtime(&clock->tc, delta); timecounter_adjtime(&clock->tc, delta);
mlx5_update_clock_info_page(clock->mdev); mlx5_update_clock_info_page(clock->mdev);
write_unlock_irqrestore(&clock->lock, flags); write_sequnlock_irqrestore(&clock->lock, flags);
return 0; return 0;
} }
...@@ -203,12 +203,12 @@ static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta) ...@@ -203,12 +203,12 @@ static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
adj *= delta; adj *= delta;
diff = div_u64(adj, 1000000000ULL); diff = div_u64(adj, 1000000000ULL);
write_lock_irqsave(&clock->lock, flags); write_seqlock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc); timecounter_read(&clock->tc);
clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff : clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
clock->nominal_c_mult + diff; clock->nominal_c_mult + diff;
mlx5_update_clock_info_page(clock->mdev); mlx5_update_clock_info_page(clock->mdev);
write_unlock_irqrestore(&clock->lock, flags); write_sequnlock_irqrestore(&clock->lock, flags);
return 0; return 0;
} }
...@@ -307,12 +307,12 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, ...@@ -307,12 +307,12 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
ts.tv_nsec = rq->perout.start.nsec; ts.tv_nsec = rq->perout.start.nsec;
ns = timespec64_to_ns(&ts); ns = timespec64_to_ns(&ts);
cycles_now = mlx5_read_internal_timer(mdev); cycles_now = mlx5_read_internal_timer(mdev);
write_lock_irqsave(&clock->lock, flags); write_seqlock_irqsave(&clock->lock, flags);
nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
nsec_delta = ns - nsec_now; nsec_delta = ns - nsec_now;
cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
clock->cycles.mult); clock->cycles.mult);
write_unlock_irqrestore(&clock->lock, flags); write_sequnlock_irqrestore(&clock->lock, flags);
time_stamp = cycles_now + cycles_delta; time_stamp = cycles_now + cycles_delta;
field_select = MLX5_MTPPS_FS_PIN_MODE | field_select = MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN | MLX5_MTPPS_FS_PATTERN |
...@@ -471,14 +471,14 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev, ...@@ -471,14 +471,14 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
ts.tv_sec += 1; ts.tv_sec += 1;
ts.tv_nsec = 0; ts.tv_nsec = 0;
ns = timespec64_to_ns(&ts); ns = timespec64_to_ns(&ts);
write_lock_irqsave(&clock->lock, flags); write_seqlock_irqsave(&clock->lock, flags);
nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
nsec_delta = ns - nsec_now; nsec_delta = ns - nsec_now;
cycles_delta = div64_u64(nsec_delta << clock->cycles.shift, cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
clock->cycles.mult); clock->cycles.mult);
clock->pps_info.start[pin] = cycles_now + cycles_delta; clock->pps_info.start[pin] = cycles_now + cycles_delta;
schedule_work(&clock->pps_info.out_work); schedule_work(&clock->pps_info.out_work);
write_unlock_irqrestore(&clock->lock, flags); write_sequnlock_irqrestore(&clock->lock, flags);
break; break;
default: default:
mlx5_core_err(mdev, " Unhandled event\n"); mlx5_core_err(mdev, " Unhandled event\n");
...@@ -498,7 +498,7 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev) ...@@ -498,7 +498,7 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n"); mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
return; return;
} }
rwlock_init(&clock->lock); seqlock_init(&clock->lock);
clock->cycles.read = read_internal_timer; clock->cycles.read = read_internal_timer;
clock->cycles.shift = MLX5_CYCLES_SHIFT; clock->cycles.shift = MLX5_CYCLES_SHIFT;
clock->cycles.mult = clocksource_khz2mult(dev_freq, clock->cycles.mult = clocksource_khz2mult(dev_freq,
......
...@@ -46,11 +46,13 @@ static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev) ...@@ -46,11 +46,13 @@ static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock, static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
u64 timestamp) u64 timestamp)
{ {
unsigned int seq;
u64 nsec; u64 nsec;
read_lock(&clock->lock); do {
seq = read_seqbegin(&clock->lock);
nsec = timecounter_cyc2time(&clock->tc, timestamp); nsec = timecounter_cyc2time(&clock->tc, timestamp);
read_unlock(&clock->lock); } while (read_seqretry(&clock->lock, seq));
return ns_to_ktime(nsec); return ns_to_ktime(nsec);
} }
......
...@@ -583,10 +583,11 @@ struct mlx5_irq_info { ...@@ -583,10 +583,11 @@ struct mlx5_irq_info {
}; };
struct mlx5_fc_stats { struct mlx5_fc_stats {
struct rb_root counters; spinlock_t counters_idr_lock; /* protects counters_idr */
struct list_head addlist; struct idr counters_idr;
/* protect addlist add/splice operations */ struct list_head counters;
spinlock_t addlist_lock; struct llist_head addlist;
struct llist_head dellist;
struct workqueue_struct *wq; struct workqueue_struct *wq;
struct delayed_work work; struct delayed_work work;
...@@ -804,7 +805,7 @@ struct mlx5_pps { ...@@ -804,7 +805,7 @@ struct mlx5_pps {
}; };
struct mlx5_clock { struct mlx5_clock {
rwlock_t lock; seqlock_t lock;
struct cyclecounter cycles; struct cyclecounter cycles;
struct timecounter tc; struct timecounter tc;
struct hwtstamp_config hwtstamp_config; struct hwtstamp_config hwtstamp_config;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment