Commit 50f477fe authored by Ben Ben-Ishay's avatar Ben Ben-Ishay Committed by Saeed Mahameed

net/mlx5e: Rename lro_timeout to packet_merge_timeout

TIR stands for transport interface receive, the TIR object is
responsible for performing all transport related operations on
the receive side like packet processing, demultiplexing the packets
to different RQ's, etc.
lro_timeout is a field in the TIR that is used to set the timeout for lro
session, this series introduces new packet merge type, therefore rename
lro_timeout to packet_merge_timeout for all packet merge types.
Signed-off-by: default avatarBen Ben-Ishay <benishay@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 54b2b3ec
...@@ -265,7 +265,7 @@ struct mlx5e_params { ...@@ -265,7 +265,7 @@ struct mlx5e_params {
bool scatter_fcs_en; bool scatter_fcs_en;
bool rx_dim_enabled; bool rx_dim_enabled;
bool tx_dim_enabled; bool tx_dim_enabled;
u32 lro_timeout; u32 packet_merge_timeout;
u32 pflags; u32 pflags;
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
struct mlx5e_xsk *xsk; struct mlx5e_xsk *xsk;
......
...@@ -173,7 +173,7 @@ struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params) ...@@ -173,7 +173,7 @@ struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params)
lro_param = (struct mlx5e_lro_param) { lro_param = (struct mlx5e_lro_param) {
.enabled = params->lro_en, .enabled = params->lro_en,
.timeout = params->lro_timeout, .timeout = params->packet_merge_timeout,
}; };
return lro_param; return lro_param;
......
...@@ -82,9 +82,9 @@ void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder, ...@@ -82,9 +82,9 @@ void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder,
if (!lro_param->enabled) if (!lro_param->enabled)
return; return;
MLX5_SET(tirc, tirc, lro_enable_mask, MLX5_SET(tirc, tirc, packet_merge_mask,
MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO |
MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO);
MLX5_SET(tirc, tirc, lro_max_ip_payload_size, MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
(MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8); (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8);
MLX5_SET(tirc, tirc, lro_timeout_period_usecs, lro_param->timeout); MLX5_SET(tirc, tirc, lro_timeout_period_usecs, lro_param->timeout);
......
...@@ -4404,7 +4404,7 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 ...@@ -4404,7 +4404,7 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL)) if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
params->lro_en = !slow_pci_heuristic(mdev); params->lro_en = !slow_pci_heuristic(mdev);
} }
params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); params->packet_merge_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */ /* CQ moderation params */
rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
......
...@@ -3361,8 +3361,8 @@ enum { ...@@ -3361,8 +3361,8 @@ enum {
}; };
enum { enum {
MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO = 0x1, MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO = BIT(0),
MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO = 0x2, MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO = BIT(1),
}; };
enum { enum {
...@@ -3387,7 +3387,7 @@ struct mlx5_ifc_tirc_bits { ...@@ -3387,7 +3387,7 @@ struct mlx5_ifc_tirc_bits {
u8 reserved_at_80[0x4]; u8 reserved_at_80[0x4];
u8 lro_timeout_period_usecs[0x10]; u8 lro_timeout_period_usecs[0x10];
u8 lro_enable_mask[0x4]; u8 packet_merge_mask[0x4];
u8 lro_max_ip_payload_size[0x8]; u8 lro_max_ip_payload_size[0x8];
u8 reserved_at_a0[0x40]; u8 reserved_at_a0[0x40];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment