Commit 006e8964 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-updates-2024-02-01' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2024-02-01

1) IPSec global stats for xfrm and mlx5
2) XSK memory improvements for non-linear SKBs
3) Software steering debug dump to use seq_file ops
4) Various code clean-ups

* tag 'mlx5-updates-2024-02-01' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: XDP, Exclude headroom and tailroom from memory calculations
  net/mlx5e: XSK, Exclude tailroom from non-linear SKBs memory calculations
  net/mlx5: DR, Change SWS usage to debug fs seq_file interface
  net/mlx5: Change missing SyncE capability print to debug
  net/mlx5: Remove initial segmentation duplicate definitions
  net/mlx5: Return specific error code for timeout on wait_fw_init
  net/mlx5: SF, Stop waiting for FW as teardown was called
  net/mlx5: remove fw reporter dump option for non PF
  net/mlx5: remove fw_fatal reporter dump option for non PF
  net/mlx5: Rename mlx5_sf_dev_remove
  Documentation: Fix counter name of mlx5 vnic reporter
  net/mlx5e: Delete obsolete IPsec code
  net/mlx5e: Connect mlx5 IPsec statistics with XFRM core
  xfrm: get global statistics from the offloaded device
  xfrm: generalize xdo_dev_state_update_curlft to allow statistics update
====================

Link: https://lore.kernel.org/r/20240206005527.1353368-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 313fb184 a90f5591
...@@ -250,7 +250,7 @@ them in realtime. ...@@ -250,7 +250,7 @@ them in realtime.
Description of the vnic counters: Description of the vnic counters:
- total_q_under_processor_handle - total_error_queues
number of queues in an error state due to number of queues in an error state due to
an async error or errored command. an async error or errored command.
- send_queue_priority_update_flow - send_queue_priority_update_flow
...@@ -259,7 +259,8 @@ Description of the vnic counters: ...@@ -259,7 +259,8 @@ Description of the vnic counters:
number of times CQ entered an error state due to an overflow. number of times CQ entered an error state due to an overflow.
- async_eq_overrun - async_eq_overrun
number of times an EQ mapped to async events was overrun. number of times an EQ mapped to async events was overrun.
comp_eq_overrun number of times an EQ mapped to completion events was - comp_eq_overrun
number of times an EQ mapped to completion events was
overrun. overrun.
- quota_exceeded_command - quota_exceeded_command
number of commands issued and failed due to quota exceeded. number of commands issued and failed due to quota exceeded.
......
...@@ -71,9 +71,9 @@ Callbacks to implement ...@@ -71,9 +71,9 @@ Callbacks to implement
bool (*xdo_dev_offload_ok) (struct sk_buff *skb, bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x); struct xfrm_state *x);
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
void (*xdo_dev_state_update_stats) (struct xfrm_state *x);
/* Solely packet offload callbacks */ /* Solely packet offload callbacks */
void (*xdo_dev_state_update_curlft) (struct xfrm_state *x);
int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack); int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
void (*xdo_dev_policy_delete) (struct xfrm_policy *x); void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
void (*xdo_dev_policy_free) (struct xfrm_policy *x); void (*xdo_dev_policy_free) (struct xfrm_policy *x);
...@@ -191,6 +191,6 @@ xdo_dev_policy_free() on any remaining offloaded states. ...@@ -191,6 +191,6 @@ xdo_dev_policy_free() on any remaining offloaded states.
Outcome of HW handling packets, the XFRM core can't count hard, soft limits. Outcome of HW handling packets, the XFRM core can't count hard, soft limits.
The HW/driver are responsible to perform it and provide accurate data when The HW/driver are responsible to perform it and provide accurate data when
xdo_dev_state_update_curlft() is called. In case of one of these limits xdo_dev_state_update_stats() is called. In case of one of these limits
occuried, the driver needs to call to xfrm_state_check_expire() to make sure occuried, the driver needs to call to xfrm_state_check_expire() to make sure
that XFRM performs rekeying sequence. that XFRM performs rekeying sequence.
...@@ -210,7 +210,7 @@ static bool is_dpll_supported(struct mlx5_core_dev *dev) ...@@ -210,7 +210,7 @@ static bool is_dpll_supported(struct mlx5_core_dev *dev)
return false; return false;
if (!MLX5_CAP_MCAM_REG2(dev, synce_registers)) { if (!MLX5_CAP_MCAM_REG2(dev, synce_registers)) {
mlx5_core_warn(dev, "Missing SyncE capability\n"); mlx5_core_dbg(dev, "Missing SyncE capability\n");
return false; return false;
} }
......
...@@ -240,11 +240,14 @@ static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params, ...@@ -240,11 +240,14 @@ static u32 mlx5e_rx_get_linear_sz_xsk(struct mlx5e_params *params,
return xsk->headroom + hw_mtu; return xsk->headroom + hw_mtu;
} }
static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool xsk) static u32 mlx5e_rx_get_linear_sz_skb(struct mlx5e_params *params, bool no_head_tail_room)
{ {
/* SKBs built on XDP_PASS on XSK RQs don't have headroom. */
u16 headroom = xsk ? 0 : mlx5e_get_linear_rq_headroom(params, NULL);
u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
u16 headroom;
if (no_head_tail_room)
return SKB_DATA_ALIGN(hw_mtu);
headroom = mlx5e_get_linear_rq_headroom(params, NULL);
return MLX5_SKB_FRAG_SZ(headroom + hw_mtu); return MLX5_SKB_FRAG_SZ(headroom + hw_mtu);
} }
...@@ -254,6 +257,7 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev, ...@@ -254,6 +257,7 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
struct mlx5e_xsk_param *xsk, struct mlx5e_xsk_param *xsk,
bool mpwqe) bool mpwqe)
{ {
bool no_head_tail_room;
u32 sz; u32 sz;
/* XSK frames are mapped as individual pages, because frames may come in /* XSK frames are mapped as individual pages, because frames may come in
...@@ -262,7 +266,13 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev, ...@@ -262,7 +266,13 @@ static u32 mlx5e_rx_get_linear_stride_sz(struct mlx5_core_dev *mdev,
if (xsk) if (xsk)
return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE; return mpwqe ? 1 << mlx5e_mpwrq_page_shift(mdev, xsk) : PAGE_SIZE;
sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, false)); no_head_tail_room = params->xdp_prog && mpwqe && !mlx5e_rx_is_linear_skb(mdev, params, xsk);
/* When no_head_tail_room is set, headroom and tailroom are excluded from skb calculations.
* no_head_tail_room should be set in the case of XDP with Striding RQ
* when SKB is not linear. This is because another page is allocated for the linear part.
*/
sz = roundup_pow_of_two(mlx5e_rx_get_linear_sz_skb(params, no_head_tail_room));
/* XDP in mlx5e doesn't support multiple packets per page. /* XDP in mlx5e doesn't support multiple packets per page.
* Do not assume sz <= PAGE_SIZE if params->xdp_prog is set. * Do not assume sz <= PAGE_SIZE if params->xdp_prog is set.
...@@ -289,7 +299,11 @@ bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev, ...@@ -289,7 +299,11 @@ bool mlx5e_rx_is_linear_skb(struct mlx5_core_dev *mdev,
if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE)
return false; return false;
/* Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data /* Call mlx5e_rx_get_linear_sz_skb with the no_head_tail_room parameter set
* to exclude headroom and tailroom from calculations.
* no_head_tail_room is true when SKB is built on XDP_PASS on XSK RQs
* since packet data buffers don't have headroom and tailroom resreved for the SKB.
* Both XSK and non-XSK cases allocate an SKB on XDP_PASS. Packet data
* must fit into a CPU page. * must fit into a CPU page.
*/ */
if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE) if (mlx5e_rx_get_linear_sz_skb(params, xsk) > PAGE_SIZE)
......
...@@ -984,21 +984,41 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x) ...@@ -984,21 +984,41 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
queue_work(sa_entry->ipsec->wq, &work->work); queue_work(sa_entry->ipsec->wq, &work->work);
} }
static void mlx5e_xfrm_update_curlft(struct xfrm_state *x) static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
{ {
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule; struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
struct net *net = dev_net(x->xso.dev);
u64 packets, bytes, lastuse; u64 packets, bytes, lastuse;
lockdep_assert(lockdep_is_held(&x->lock) || lockdep_assert(lockdep_is_held(&x->lock) ||
lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex)); lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_state_lock));
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
return; return;
if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
mlx5_fc_query_cached(ipsec_rule->auth.fc, &bytes, &packets, &lastuse);
x->stats.integrity_failed += packets;
XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, packets);
mlx5_fc_query_cached(ipsec_rule->trailer.fc, &bytes, &packets, &lastuse);
XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, packets);
}
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
return;
mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse); mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
x->curlft.packets += packets; x->curlft.packets += packets;
x->curlft.bytes += bytes; x->curlft.bytes += bytes;
if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
mlx5_fc_query_cached(ipsec_rule->replay.fc, &bytes, &packets, &lastuse);
x->stats.replay += packets;
XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, packets);
}
} }
static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev, static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
...@@ -1156,7 +1176,7 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = { ...@@ -1156,7 +1176,7 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
.xdo_dev_offload_ok = mlx5e_ipsec_offload_ok, .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok,
.xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state, .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
.xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft, .xdo_dev_state_update_stats = mlx5e_xfrm_update_stats,
.xdo_dev_policy_add = mlx5e_xfrm_add_policy, .xdo_dev_policy_add = mlx5e_xfrm_add_policy,
.xdo_dev_policy_delete = mlx5e_xfrm_del_policy, .xdo_dev_policy_delete = mlx5e_xfrm_del_policy,
.xdo_dev_policy_free = mlx5e_xfrm_free_policy, .xdo_dev_policy_free = mlx5e_xfrm_free_policy,
......
...@@ -137,7 +137,6 @@ struct mlx5e_ipsec_hw_stats { ...@@ -137,7 +137,6 @@ struct mlx5e_ipsec_hw_stats {
struct mlx5e_ipsec_sw_stats { struct mlx5e_ipsec_sw_stats {
atomic64_t ipsec_rx_drop_sp_alloc; atomic64_t ipsec_rx_drop_sp_alloc;
atomic64_t ipsec_rx_drop_sadb_miss; atomic64_t ipsec_rx_drop_sadb_miss;
atomic64_t ipsec_rx_drop_syndrome;
atomic64_t ipsec_tx_drop_bundle; atomic64_t ipsec_tx_drop_bundle;
atomic64_t ipsec_tx_drop_no_state; atomic64_t ipsec_tx_drop_no_state;
atomic64_t ipsec_tx_drop_not_ip; atomic64_t ipsec_tx_drop_not_ip;
......
...@@ -304,12 +304,6 @@ bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, ...@@ -304,12 +304,6 @@ bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
return false; return false;
} }
enum {
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED,
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED,
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER,
};
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb, struct sk_buff *skb,
u32 ipsec_meta_data) u32 ipsec_meta_data)
...@@ -343,20 +337,7 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev, ...@@ -343,20 +337,7 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
xo = xfrm_offload(skb); xo = xfrm_offload(skb);
xo->flags = CRYPTO_DONE; xo->flags = CRYPTO_DONE;
xo->status = CRYPTO_SUCCESS;
switch (MLX5_IPSEC_METADATA_SYNDROM(ipsec_meta_data)) {
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED:
xo->status = CRYPTO_SUCCESS;
break;
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_AUTH_FAILED:
xo->status = CRYPTO_TUNNEL_ESP_AUTH_FAILED;
break;
case MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_BAD_TRAILER:
xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
break;
default:
atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome);
}
} }
int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata) int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metadata)
...@@ -374,8 +355,6 @@ int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metada ...@@ -374,8 +355,6 @@ int mlx5_esw_ipsec_rx_make_metadata(struct mlx5e_priv *priv, u32 id, u32 *metada
return err; return err;
} }
*metadata = MLX5_IPSEC_METADATA_CREATE(ipsec_obj_id, *metadata = ipsec_obj_id;
MLX5E_IPSEC_OFFLOAD_RX_SYNDROME_DECRYPTED);
return 0; return 0;
} }
...@@ -43,7 +43,6 @@ ...@@ -43,7 +43,6 @@
#define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1) #define MLX5_IPSEC_METADATA_MARKER(metadata) (((metadata) >> 31) & 0x1)
#define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0)) #define MLX5_IPSEC_METADATA_SYNDROM(metadata) (((metadata) >> 24) & GENMASK(5, 0))
#define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0)) #define MLX5_IPSEC_METADATA_HANDLE(metadata) ((metadata) & GENMASK(23, 0))
#define MLX5_IPSEC_METADATA_CREATE(id, syndrome) ((id) | ((syndrome) << 24))
struct mlx5e_accel_tx_ipsec_state { struct mlx5e_accel_tx_ipsec_state {
struct xfrm_offload *xo; struct xfrm_offload *xo;
......
...@@ -51,7 +51,6 @@ static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = { ...@@ -51,7 +51,6 @@ static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = { static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sadb_miss) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sadb_miss) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_syndrome) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_bundle) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_bundle) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_no_state) },
{ MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) }, { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_tx_drop_not_ip) },
......
...@@ -366,18 +366,18 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev) ...@@ -366,18 +366,18 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
return -EIO; return -EIO;
} }
mlx5_set_nic_state(dev, MLX5_NIC_IFC_DISABLED); mlx5_set_nic_state(dev, MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED);
/* Loop until device state turns to disable */ /* Loop until device state turns to disable */
end = jiffies + msecs_to_jiffies(delay_ms); end = jiffies + msecs_to_jiffies(delay_ms);
do { do {
if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break; break;
cond_resched(); cond_resched();
} while (!time_after(jiffies, end)); } while (!time_after(jiffies, end));
if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) { if (mlx5_get_nic_state(dev) != MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) {
dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n", dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
mlx5_get_nic_state(dev), delay_ms); mlx5_get_nic_state(dev), delay_ms);
return -EIO; return -EIO;
......
...@@ -116,9 +116,9 @@ u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev) ...@@ -116,9 +116,9 @@ u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev)
return MLX5_SENSOR_PCI_COMM_ERR; return MLX5_SENSOR_PCI_COMM_ERR;
if (pci_channel_offline(dev->pdev)) if (pci_channel_offline(dev->pdev))
return MLX5_SENSOR_PCI_ERR; return MLX5_SENSOR_PCI_ERR;
if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
return MLX5_SENSOR_NIC_DISABLED; return MLX5_SENSOR_NIC_DISABLED;
if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_SW_RESET) if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET)
return MLX5_SENSOR_NIC_SW_RESET; return MLX5_SENSOR_NIC_SW_RESET;
if (sensor_fw_synd_rfr(dev)) if (sensor_fw_synd_rfr(dev))
return MLX5_SENSOR_FW_SYND_RFR; return MLX5_SENSOR_FW_SYND_RFR;
...@@ -185,7 +185,7 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev) ...@@ -185,7 +185,7 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
/* Write the NIC interface field to initiate the reset, the command /* Write the NIC interface field to initiate the reset, the command
* interface address also resides here, don't overwrite it. * interface address also resides here, don't overwrite it.
*/ */
mlx5_set_nic_state(dev, MLX5_NIC_IFC_SW_RESET); mlx5_set_nic_state(dev, MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET);
return true; return true;
} }
...@@ -246,13 +246,13 @@ void mlx5_error_sw_reset(struct mlx5_core_dev *dev) ...@@ -246,13 +246,13 @@ void mlx5_error_sw_reset(struct mlx5_core_dev *dev)
/* Recover from SW reset */ /* Recover from SW reset */
end = jiffies + msecs_to_jiffies(delay_ms); end = jiffies + msecs_to_jiffies(delay_ms);
do { do {
if (mlx5_get_nic_state(dev) == MLX5_NIC_IFC_DISABLED) if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break; break;
msleep(20); msleep(20);
} while (!time_after(jiffies, end)); } while (!time_after(jiffies, end));
if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) { if (mlx5_get_nic_state(dev) != MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED) {
dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n", dev_err(&dev->pdev->dev, "NIC IFC still %d after %lums.\n",
mlx5_get_nic_state(dev), delay_ms); mlx5_get_nic_state(dev), delay_ms);
} }
...@@ -272,26 +272,26 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev) ...@@ -272,26 +272,26 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
u8 nic_interface = mlx5_get_nic_state(dev); u8 nic_interface = mlx5_get_nic_state(dev);
switch (nic_interface) { switch (nic_interface) {
case MLX5_NIC_IFC_FULL: case MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER:
mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n"); mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n");
break; break;
case MLX5_NIC_IFC_DISABLED: case MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED:
mlx5_core_warn(dev, "starting teardown\n"); mlx5_core_warn(dev, "starting teardown\n");
break; break;
case MLX5_NIC_IFC_NO_DRAM_NIC: case MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC:
mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n"); mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n");
break; break;
case MLX5_NIC_IFC_SW_RESET: case MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET:
/* The IFC mode field is 3 bits, so it will read 0x7 in 2 cases: /* The IFC mode field is 3 bits, so it will read 0x7 in 2 cases:
* 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded * 1. PCI has been disabled (ie. PCI-AER, PF driver unloaded
* and this is a VF), this is not recoverable by SW reset. * and this is a VF), this is not recoverable by SW reset.
* Logging of this is handled elsewhere. * Logging of this is handled elsewhere.
* 2. FW reset has been issued by another function, driver can * 2. FW reset has been issued by another function, driver can
* be reloaded to recover after the mode switches to * be reloaded to recover after the mode switches to
* MLX5_NIC_IFC_DISABLED. * MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED.
*/ */
if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR) if (dev->priv.health.fatal_error != MLX5_SENSOR_PCI_COMM_ERR)
mlx5_core_warn(dev, "NIC SW reset in progress\n"); mlx5_core_warn(dev, "NIC SW reset in progress\n");
...@@ -555,12 +555,17 @@ static void mlx5_fw_reporter_err_work(struct work_struct *work) ...@@ -555,12 +555,17 @@ static void mlx5_fw_reporter_err_work(struct work_struct *work)
&fw_reporter_ctx); &fw_reporter_ctx);
} }
static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = { static const struct devlink_health_reporter_ops mlx5_fw_reporter_pf_ops = {
.name = "fw", .name = "fw",
.diagnose = mlx5_fw_reporter_diagnose, .diagnose = mlx5_fw_reporter_diagnose,
.dump = mlx5_fw_reporter_dump, .dump = mlx5_fw_reporter_dump,
}; };
static const struct devlink_health_reporter_ops mlx5_fw_reporter_ops = {
.name = "fw",
.diagnose = mlx5_fw_reporter_diagnose,
};
static int static int
mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter, mlx5_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
void *priv_ctx, void *priv_ctx,
...@@ -646,12 +651,17 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work) ...@@ -646,12 +651,17 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
} }
} }
static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = { static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_pf_ops = {
.name = "fw_fatal", .name = "fw_fatal",
.recover = mlx5_fw_fatal_reporter_recover, .recover = mlx5_fw_fatal_reporter_recover,
.dump = mlx5_fw_fatal_reporter_dump, .dump = mlx5_fw_fatal_reporter_dump,
}; };
static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
.name = "fw_fatal",
.recover = mlx5_fw_fatal_reporter_recover,
};
#define MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD 180000 #define MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD 180000
#define MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD 60000 #define MLX5_FW_REPORTER_PF_GRACEFUL_PERIOD 60000
#define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000 #define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000
...@@ -659,10 +669,14 @@ static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = { ...@@ -659,10 +669,14 @@ static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
void mlx5_fw_reporters_create(struct mlx5_core_dev *dev) void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
{ {
const struct devlink_health_reporter_ops *fw_fatal_ops;
struct mlx5_core_health *health = &dev->priv.health; struct mlx5_core_health *health = &dev->priv.health;
const struct devlink_health_reporter_ops *fw_ops;
struct devlink *devlink = priv_to_devlink(dev); struct devlink *devlink = priv_to_devlink(dev);
u64 grace_period; u64 grace_period;
fw_fatal_ops = &mlx5_fw_fatal_reporter_pf_ops;
fw_ops = &mlx5_fw_reporter_pf_ops;
if (mlx5_core_is_ecpf(dev)) { if (mlx5_core_is_ecpf(dev)) {
grace_period = MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD; grace_period = MLX5_FW_REPORTER_ECPF_GRACEFUL_PERIOD;
} else if (mlx5_core_is_pf(dev)) { } else if (mlx5_core_is_pf(dev)) {
...@@ -670,18 +684,19 @@ void mlx5_fw_reporters_create(struct mlx5_core_dev *dev) ...@@ -670,18 +684,19 @@ void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
} else { } else {
/* VF or SF */ /* VF or SF */
grace_period = MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD; grace_period = MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD;
fw_fatal_ops = &mlx5_fw_fatal_reporter_ops;
fw_ops = &mlx5_fw_reporter_ops;
} }
health->fw_reporter = health->fw_reporter =
devl_health_reporter_create(devlink, &mlx5_fw_reporter_ops, devl_health_reporter_create(devlink, fw_ops, 0, dev);
0, dev);
if (IS_ERR(health->fw_reporter)) if (IS_ERR(health->fw_reporter))
mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n", mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n",
PTR_ERR(health->fw_reporter)); PTR_ERR(health->fw_reporter));
health->fw_fatal_reporter = health->fw_fatal_reporter =
devl_health_reporter_create(devlink, devl_health_reporter_create(devlink,
&mlx5_fw_fatal_reporter_ops, fw_fatal_ops,
grace_period, grace_period,
dev); dev);
if (IS_ERR(health->fw_fatal_reporter)) if (IS_ERR(health->fw_fatal_reporter))
......
...@@ -187,31 +187,36 @@ static struct mlx5_profile profile[] = { ...@@ -187,31 +187,36 @@ static struct mlx5_profile profile[] = {
}; };
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili, static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
u32 warn_time_mili) u32 warn_time_mili, const char *init_state)
{ {
unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili); unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili);
unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili); unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
u32 fw_initializing; u32 fw_initializing;
int err = 0;
do { do {
fw_initializing = ioread32be(&dev->iseg->initializing); fw_initializing = ioread32be(&dev->iseg->initializing);
if (!(fw_initializing >> 31)) if (!(fw_initializing >> 31))
break; break;
if (time_after(jiffies, end) || if (time_after(jiffies, end)) {
test_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) { mlx5_core_err(dev, "Firmware over %u MS in %s state, aborting\n",
err = -EBUSY; max_wait_mili, init_state);
break; return -ETIMEDOUT;
}
if (test_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
mlx5_core_warn(dev, "device is being removed, stop waiting for FW %s\n",
init_state);
return -ENODEV;
} }
if (warn_time_mili && time_after(jiffies, warn)) { if (warn_time_mili && time_after(jiffies, warn)) {
mlx5_core_warn(dev, "Waiting for FW initialization, timeout abort in %ds (0x%x)\n", mlx5_core_warn(dev, "Waiting for FW %s, timeout abort in %ds (0x%x)\n",
jiffies_to_msecs(end - warn) / 1000, fw_initializing); init_state, jiffies_to_msecs(end - warn) / 1000,
fw_initializing);
warn = jiffies + msecs_to_jiffies(warn_time_mili); warn = jiffies + msecs_to_jiffies(warn_time_mili);
} }
msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT)); msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT));
} while (true); } while (true);
return err; return 0;
} }
static void mlx5_set_driver_version(struct mlx5_core_dev *dev) static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
...@@ -1151,12 +1156,10 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou ...@@ -1151,12 +1156,10 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
/* wait for firmware to accept initialization segments configurations /* wait for firmware to accept initialization segments configurations
*/ */
err = wait_fw_init(dev, timeout, err = wait_fw_init(dev, timeout,
mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL)); mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL),
if (err) { "pre-initializing");
mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n", if (err)
timeout);
return err; return err;
}
err = mlx5_cmd_enable(dev); err = mlx5_cmd_enable(dev);
if (err) { if (err) {
...@@ -1166,12 +1169,9 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou ...@@ -1166,12 +1169,9 @@ static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeou
mlx5_tout_query_iseg(dev); mlx5_tout_query_iseg(dev);
err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0); err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0, "initializing");
if (err) { if (err)
mlx5_core_err(dev, "Firmware over %llu MS in initializing state, aborting\n",
mlx5_tout_ms(dev, FW_INIT));
goto err_cmd_cleanup; goto err_cmd_cleanup;
}
dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev); dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP); mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
......
...@@ -312,13 +312,6 @@ static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev) ...@@ -312,13 +312,6 @@ static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev)
return ret; return ret;
} }
enum {
MLX5_NIC_IFC_FULL = 0,
MLX5_NIC_IFC_DISABLED = 1,
MLX5_NIC_IFC_NO_DRAM_NIC = 2,
MLX5_NIC_IFC_SW_RESET = 7
};
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev); u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state); void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
......
...@@ -74,7 +74,8 @@ static void mlx5_sf_dev_release(struct device *device) ...@@ -74,7 +74,8 @@ static void mlx5_sf_dev_release(struct device *device)
kfree(sf_dev); kfree(sf_dev);
} }
static void mlx5_sf_dev_remove(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_dev) static void mlx5_sf_dev_remove_aux(struct mlx5_core_dev *dev,
struct mlx5_sf_dev *sf_dev)
{ {
int id; int id;
...@@ -138,7 +139,7 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id, ...@@ -138,7 +139,7 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id,
return; return;
xa_err: xa_err:
mlx5_sf_dev_remove(dev, sf_dev); mlx5_sf_dev_remove_aux(dev, sf_dev);
add_err: add_err:
mlx5_core_err(dev, "SF DEV: fail device add for index=%d sfnum=%d err=%d\n", mlx5_core_err(dev, "SF DEV: fail device add for index=%d sfnum=%d err=%d\n",
sf_index, sfnum, err); sf_index, sfnum, err);
...@@ -149,7 +150,7 @@ static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_de ...@@ -149,7 +150,7 @@ static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_de
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table; struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
xa_erase(&table->devices, sf_index); xa_erase(&table->devices, sf_index);
mlx5_sf_dev_remove(dev, sf_dev); mlx5_sf_dev_remove_aux(dev, sf_dev);
} }
static int static int
...@@ -367,7 +368,7 @@ static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table) ...@@ -367,7 +368,7 @@ static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table)
xa_for_each(&table->devices, index, sf_dev) { xa_for_each(&table->devices, index, sf_dev) {
xa_erase(&table->devices, index); xa_erase(&table->devices, index);
mlx5_sf_dev_remove(table->dev, sf_dev); mlx5_sf_dev_remove_aux(table->dev, sf_dev);
} }
} }
......
...@@ -95,24 +95,29 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia ...@@ -95,24 +95,29 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
static void mlx5_sf_dev_remove(struct auxiliary_device *adev) static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
{ {
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
struct devlink *devlink = priv_to_devlink(sf_dev->mdev); struct mlx5_core_dev *mdev = sf_dev->mdev;
struct devlink *devlink;
mlx5_drain_health_wq(sf_dev->mdev); devlink = priv_to_devlink(mdev);
set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
mlx5_drain_health_wq(mdev);
devlink_unregister(devlink); devlink_unregister(devlink);
if (mlx5_dev_is_lightweight(sf_dev->mdev)) if (mlx5_dev_is_lightweight(mdev))
mlx5_uninit_one_light(sf_dev->mdev); mlx5_uninit_one_light(mdev);
else else
mlx5_uninit_one(sf_dev->mdev); mlx5_uninit_one(mdev);
iounmap(sf_dev->mdev->iseg); iounmap(mdev->iseg);
mlx5_mdev_uninit(sf_dev->mdev); mlx5_mdev_uninit(mdev);
mlx5_devlink_free(devlink); mlx5_devlink_free(devlink);
} }
static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev) static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
{ {
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
struct mlx5_core_dev *mdev = sf_dev->mdev;
mlx5_unload_one(sf_dev->mdev, false); set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
mlx5_unload_one(mdev, false);
} }
static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = { static const struct auxiliary_device_id mlx5_sf_dev_id_table[] = {
......
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#define MLX5DR_DEBUG_DUMP_BUFF_SIZE (64 * 1024 * 1024)
#define MLX5DR_DEBUG_DUMP_BUFF_LENGTH 512
enum {
MLX5DR_DEBUG_DUMP_STATE_FREE,
MLX5DR_DEBUG_DUMP_STATE_IN_PROGRESS,
};
struct mlx5dr_dbg_dump_buff {
char *buff;
u32 index;
struct list_head node;
};
struct mlx5dr_dbg_dump_data {
struct list_head buff_list;
};
struct mlx5dr_dbg_dump_info { struct mlx5dr_dbg_dump_info {
struct mutex dbg_mutex; /* protect dbg lists */ struct mutex dbg_mutex; /* protect dbg lists */
struct dentry *steering_debugfs; struct dentry *steering_debugfs;
struct dentry *fdb_debugfs; struct dentry *fdb_debugfs;
struct mlx5dr_dbg_dump_data *dump_data;
atomic_t state;
}; };
void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn); void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn);
......
...@@ -10661,6 +10661,7 @@ enum { ...@@ -10661,6 +10661,7 @@ enum {
MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0, MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0,
MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1, MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1,
MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2, MLX5_INITIAL_SEG_NIC_INTERFACE_NO_DRAM_NIC = 0x2,
MLX5_INITIAL_SEG_NIC_INTERFACE_SW_RESET = 0x7,
}; };
enum { enum {
......
...@@ -1062,7 +1062,7 @@ struct xfrmdev_ops { ...@@ -1062,7 +1062,7 @@ struct xfrmdev_ops {
bool (*xdo_dev_offload_ok) (struct sk_buff *skb, bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
struct xfrm_state *x); struct xfrm_state *x);
void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); void (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
void (*xdo_dev_state_update_curlft) (struct xfrm_state *x); void (*xdo_dev_state_update_stats) (struct xfrm_state *x);
int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack); int (*xdo_dev_policy_add) (struct xfrm_policy *x, struct netlink_ext_ack *extack);
void (*xdo_dev_policy_delete) (struct xfrm_policy *x); void (*xdo_dev_policy_delete) (struct xfrm_policy *x);
void (*xdo_dev_policy_free) (struct xfrm_policy *x); void (*xdo_dev_policy_free) (struct xfrm_policy *x);
......
...@@ -51,8 +51,10 @@ ...@@ -51,8 +51,10 @@
#ifdef CONFIG_XFRM_STATISTICS #ifdef CONFIG_XFRM_STATISTICS
#define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field) #define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
#define XFRM_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.xfrm_statistics, field, val)
#else #else
#define XFRM_INC_STATS(net, field) ((void)(net)) #define XFRM_INC_STATS(net, field) ((void)(net))
#define XFRM_ADD_STATS(net, field, val) ((void)(net))
#endif #endif
...@@ -1577,22 +1579,20 @@ struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id, ...@@ -1577,22 +1579,20 @@ struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi, struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
unsigned short family); unsigned short family);
int xfrm_state_check_expire(struct xfrm_state *x); int xfrm_state_check_expire(struct xfrm_state *x);
void xfrm_state_update_stats(struct net *net);
#ifdef CONFIG_XFRM_OFFLOAD #ifdef CONFIG_XFRM_OFFLOAD
static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x) static inline void xfrm_dev_state_update_stats(struct xfrm_state *x)
{ {
struct xfrm_dev_offload *xdo = &x->xso; struct xfrm_dev_offload *xdo = &x->xso;
struct net_device *dev = xdo->dev; struct net_device *dev = xdo->dev;
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
return;
if (dev && dev->xfrmdev_ops && if (dev && dev->xfrmdev_ops &&
dev->xfrmdev_ops->xdo_dev_state_update_curlft) dev->xfrmdev_ops->xdo_dev_state_update_stats)
dev->xfrmdev_ops->xdo_dev_state_update_curlft(x); dev->xfrmdev_ops->xdo_dev_state_update_stats(x);
} }
#else #else
static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x) {} static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) {}
#endif #endif
void xfrm_state_insert(struct xfrm_state *x); void xfrm_state_insert(struct xfrm_state *x);
int xfrm_state_add(struct xfrm_state *x); int xfrm_state_add(struct xfrm_state *x);
......
...@@ -52,6 +52,7 @@ static int xfrm_statistics_seq_show(struct seq_file *seq, void *v) ...@@ -52,6 +52,7 @@ static int xfrm_statistics_seq_show(struct seq_file *seq, void *v)
memset(buff, 0, sizeof(unsigned long) * LINUX_MIB_XFRMMAX); memset(buff, 0, sizeof(unsigned long) * LINUX_MIB_XFRMMAX);
xfrm_state_update_stats(net);
snmp_get_cpu_field_batch(buff, xfrm_mib_list, snmp_get_cpu_field_batch(buff, xfrm_mib_list,
net->mib.xfrm_statistics); net->mib.xfrm_statistics);
for (i = 0; xfrm_mib_list[i].name; i++) for (i = 0; xfrm_mib_list[i].name; i++)
......
...@@ -570,7 +570,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me) ...@@ -570,7 +570,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
int err = 0; int err = 0;
spin_lock(&x->lock); spin_lock(&x->lock);
xfrm_dev_state_update_curlft(x); xfrm_dev_state_update_stats(x);
if (x->km.state == XFRM_STATE_DEAD) if (x->km.state == XFRM_STATE_DEAD)
goto out; goto out;
...@@ -1935,7 +1935,7 @@ EXPORT_SYMBOL(xfrm_state_update); ...@@ -1935,7 +1935,7 @@ EXPORT_SYMBOL(xfrm_state_update);
int xfrm_state_check_expire(struct xfrm_state *x) int xfrm_state_check_expire(struct xfrm_state *x)
{ {
xfrm_dev_state_update_curlft(x); xfrm_dev_state_update_stats(x);
if (!READ_ONCE(x->curlft.use_time)) if (!READ_ONCE(x->curlft.use_time))
WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds()); WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds());
...@@ -1957,6 +1957,19 @@ int xfrm_state_check_expire(struct xfrm_state *x) ...@@ -1957,6 +1957,19 @@ int xfrm_state_check_expire(struct xfrm_state *x)
} }
EXPORT_SYMBOL(xfrm_state_check_expire); EXPORT_SYMBOL(xfrm_state_check_expire);
void xfrm_state_update_stats(struct net *net)
{
struct xfrm_state *x;
int i;
spin_lock_bh(&net->xfrm.xfrm_state_lock);
for (i = 0; i <= net->xfrm.state_hmask; i++) {
hlist_for_each_entry(x, net->xfrm.state_bydst + i, bydst)
xfrm_dev_state_update_stats(x);
}
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
}
struct xfrm_state * struct xfrm_state *
xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi, xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
u8 proto, unsigned short family) u8 proto, unsigned short family)
......
...@@ -902,7 +902,7 @@ static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) ...@@ -902,7 +902,7 @@ static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
memcpy(&p->sel, &x->sel, sizeof(p->sel)); memcpy(&p->sel, &x->sel, sizeof(p->sel));
memcpy(&p->lft, &x->lft, sizeof(p->lft)); memcpy(&p->lft, &x->lft, sizeof(p->lft));
if (x->xso.dev) if (x->xso.dev)
xfrm_dev_state_update_curlft(x); xfrm_dev_state_update_stats(x);
memcpy(&p->curlft, &x->curlft, sizeof(p->curlft)); memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
put_unaligned(x->stats.replay_window, &p->stats.replay_window); put_unaligned(x->stats.replay_window, &p->stats.replay_window);
put_unaligned(x->stats.replay, &p->stats.replay); put_unaligned(x->stats.replay, &p->stats.replay);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment