Commit 0b4f5add authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx5-fixes'

Tariq Toukan says:

====================
mlx5 fixes 24-05-22

This patchset provides bug fixes to mlx5 core and Eth drivers.

Series generated against:
commit 9c91c7fa ("net: mana: Fix the extra HZ in mana_hwc_send_request")
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 128d54fb 83fea49f
......@@ -102,8 +102,14 @@ static inline void
mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
{
int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
struct udphdr *udphdr;
udp_hdr(skb)->len = htons(payload_len);
if (skb->encapsulation)
udphdr = (struct udphdr *)skb_inner_transport_header(skb);
else
udphdr = udp_hdr(skb);
udphdr->len = htons(payload_len);
}
struct mlx5e_accel_tx_state {
......
......@@ -750,8 +750,7 @@ static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
err_fs_ft:
if (rx->allow_tunnel_mode)
mlx5_eswitch_unblock_encap(mdev);
mlx5_del_flow_rules(rx->status.rule);
mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
mlx5_ipsec_rx_status_destroy(ipsec, rx);
err_add:
mlx5_destroy_flow_table(rx->ft.status);
err_fs_ft_status:
......
......@@ -97,18 +97,11 @@ mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
if (!x || !x->xso.offload_handle)
goto out_disable;
if (xo->inner_ipproto) {
/* Cannot support tunnel packet over IPsec tunnel mode
* because we cannot offload three IP header csum
*/
if (x->props.mode == XFRM_MODE_TUNNEL)
goto out_disable;
/* Only support UDP or TCP L4 checksum */
if (xo->inner_ipproto != IPPROTO_UDP &&
xo->inner_ipproto != IPPROTO_TCP)
goto out_disable;
}
/* Only support UDP or TCP L4 checksum */
if (xo->inner_ipproto &&
xo->inner_ipproto != IPPROTO_UDP &&
xo->inner_ipproto != IPPROTO_TCP)
goto out_disable;
return features;
......
......@@ -3886,7 +3886,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
mlx5e_fold_sw_stats64(priv, stats);
}
stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer;
stats->rx_length_errors =
PPORT_802_3_GET(pstats, a_in_range_length_errors) +
......
......@@ -1186,6 +1186,9 @@ void mlx5e_stats_ts_get(struct mlx5e_priv *priv,
ts_stats->err = 0;
ts_stats->lost = 0;
if (!ptp)
goto out;
/* Aggregate stats across all TCs */
for (i = 0; i < ptp->num_tc; i++) {
struct mlx5e_ptp_cq_stats *stats =
......@@ -1214,6 +1217,7 @@ void mlx5e_stats_ts_get(struct mlx5e_priv *priv,
}
}
out:
mutex_unlock(&priv->state_lock);
}
......
......@@ -153,7 +153,11 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
*hopbyhop = 0;
if (skb->encapsulation) {
ihs = skb_inner_tcp_all_headers(skb);
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
ihs = skb_inner_transport_offset(skb) +
sizeof(struct udphdr);
else
ihs = skb_inner_tcp_all_headers(skb);
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
......
......@@ -719,6 +719,7 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
struct mlx5_core_dev *dev;
u8 mode;
#endif
bool roce_support;
int i;
for (i = 0; i < ldev->ports; i++)
......@@ -743,6 +744,11 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
return false;
#endif
roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev);
for (i = 1; i < ldev->ports; i++)
if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support)
return false;
return true;
}
......@@ -910,8 +916,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
} else if (roce_lag) {
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
for (i = 1; i < ldev->ports; i++)
mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
for (i = 1; i < ldev->ports; i++) {
if (mlx5_get_roce_state(ldev->pf[i].dev))
mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
}
} else if (shared_fdb) {
int i;
......
......@@ -100,10 +100,6 @@ static bool ft_create_alias_supported(struct mlx5_core_dev *dev)
static bool mlx5_sd_is_supported(struct mlx5_core_dev *dev, u8 host_buses)
{
/* Feature is currently implemented for PFs only */
if (!mlx5_core_is_pf(dev))
return false;
/* Honor the SW implementation limit */
if (host_buses > MLX5_SD_MAX_GROUP_SZ)
return false;
......@@ -162,6 +158,14 @@ static int sd_init(struct mlx5_core_dev *dev)
bool sdm;
int err;
/* Feature is currently implemented for PFs only */
if (!mlx5_core_is_pf(dev))
return 0;
/* Block on embedded CPU PFs */
if (mlx5_core_is_ecpf(dev))
return 0;
if (!MLX5_CAP_MCAM_REG(dev, mpir))
return 0;
......
......@@ -10308,9 +10308,9 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 mfrl[0x1];
u8 regs_39_to_32[0x8];
u8 regs_31_to_10[0x16];
u8 regs_31_to_11[0x15];
u8 mtmp[0x1];
u8 regs_8_to_0[0x9];
u8 regs_9_to_0[0xa];
};
struct mlx5_ifc_mcam_access_reg_bits1 {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment