Commit 1d000323 authored by Emeel Hakim's avatar Emeel Hakim Committed by Saeed Mahameed

net/mlx5e: IPsec: Fix work queue entry ethernet segment checksum flags

Current Work Queue Entry (WQE) checksum (csum) flags in the ethernet
segment (eseg) in case of IPsec crypto offload datapath are not aligned
with PRM/HW expectations.

Currently the driver always sets the l3_inner_csum flag in case of IPsec
because of the wrong usage of skb->encapsulation as indicator for inner
IPsec header since skb->encapsulation is always ON for IPsec packets
since IPsec itself is an encapsulation protocol. The above forced a
failing attempts of calculating csum of non-existing segments (like in
the IP|ESP|TCP packet case which does not have an l3_inner) which led
to lots of packet drops hence the low throughput.

Fix by using xo->inner_ipproto as indicator for inner IPsec header
instead of skb->encapsulation in addition to setting the csum flags
as following:
* Tunnel Mode:
* Pkt: MAC  IP     ESP  IP    L4
* CSUM: l3_cs | l3_inner_cs | l4_inner_cs
*
* Transport Mode:
* Pkt: MAC  IP     ESP  L4
* CSUM: l3_cs [ | l4_cs (checksum partial case)]
*
* Tunnel(VXLAN TCP/UDP) over Transport Mode
* Pkt: MAC  IP     ESP  UDP  VXLAN  IP    L4
* CSUM: l3_cs | l3_inner_cs | l4_inner_cs

Fixes: f1267798 ("net/mlx5: Fix checksum issue of VXLAN and IPsec crypto offload")
Signed-off-by: default avatarEmeel Hakim <ehakim@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent d10457f8
...@@ -213,19 +213,18 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) ...@@ -213,19 +213,18 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz); memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
} }
/* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet),
* need to set L3 checksum flag for IPsec
*/
static void static void
ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg) struct mlx5_wqe_eth_seg *eseg)
{ {
struct xfrm_offload *xo = xfrm_offload(skb);
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
if (skb->encapsulation) { if (xo->inner_ipproto) {
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM; eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM;
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial_inner++; sq->stats->csum_partial_inner++;
} else {
sq->stats->csum_partial++;
} }
} }
...@@ -234,6 +233,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -234,6 +233,11 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_accel_tx_state *accel, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg) struct mlx5_wqe_eth_seg *eseg)
{ {
if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
return;
}
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
if (skb->encapsulation) { if (skb->encapsulation) {
...@@ -249,8 +253,6 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, ...@@ -249,8 +253,6 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial++; sq->stats->csum_partial++;
#endif #endif
} else if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
} else } else
sq->stats->csum_none++; sq->stats->csum_none++;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment