Commit 46a3ea98 authored by Tariq Toukan's avatar Tariq Toukan Committed by Saeed Mahameed

net/mlx5e: kTLS, Enhance TX resync flow

Once the kTLS TX resync function is called, it used to return
a binary value, for success or failure.

However, in case the TLS SKB is a retransmission of the connection
handshake, it initiates the resync flow (as the tcp seq check holds),
while regular packet handle is expected.

In this patch, we identify this case and skip the resync operation
accordingly.

Counters:
- Add a counter (tls_skip_no_sync_data) to monitor this.
- Bump the dump counters up as they are used more frequently.
- Add a missing counter descriptor declaration for tls_resync_bytes
  in sq_stats_desc.

Fixes: d2ead1f3 ("net/mlx5e: Add kTLS TX HW offload support")
Signed-off-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent af11a7a4
...@@ -185,26 +185,33 @@ struct tx_sync_info { ...@@ -185,26 +185,33 @@ struct tx_sync_info {
skb_frag_t frags[MAX_SKB_FRAGS]; skb_frag_t frags[MAX_SKB_FRAGS];
}; };
static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, enum mlx5e_ktls_sync_retval {
u32 tcp_seq, struct tx_sync_info *info) MLX5E_KTLS_SYNC_DONE,
MLX5E_KTLS_SYNC_FAIL,
MLX5E_KTLS_SYNC_SKIP_NO_DATA,
};
static enum mlx5e_ktls_sync_retval
tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
u32 tcp_seq, struct tx_sync_info *info)
{ {
struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx; struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
struct tls_record_info *record; struct tls_record_info *record;
int remaining, i = 0; int remaining, i = 0;
unsigned long flags; unsigned long flags;
bool ret = true;
spin_lock_irqsave(&tx_ctx->lock, flags); spin_lock_irqsave(&tx_ctx->lock, flags);
record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn); record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
if (unlikely(!record)) { if (unlikely(!record)) {
ret = false; ret = MLX5E_KTLS_SYNC_FAIL;
goto out; goto out;
} }
if (unlikely(tcp_seq < tls_record_start_seq(record))) { if (unlikely(tcp_seq < tls_record_start_seq(record))) {
if (!tls_record_is_start_marker(record)) ret = tls_record_is_start_marker(record) ?
ret = false; MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
goto out; goto out;
} }
...@@ -316,20 +323,26 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq) ...@@ -316,20 +323,26 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc); mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
} }
static struct sk_buff * static enum mlx5e_ktls_sync_retval
mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
struct mlx5e_txqsq *sq, struct mlx5e_txqsq *sq,
struct sk_buff *skb, int datalen,
u32 seq) u32 seq)
{ {
struct mlx5e_sq_stats *stats = sq->stats; struct mlx5e_sq_stats *stats = sq->stats;
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wq_cyc *wq = &sq->wq;
enum mlx5e_ktls_sync_retval ret;
struct tx_sync_info info = {}; struct tx_sync_info info = {};
u16 contig_wqebbs_room, pi; u16 contig_wqebbs_room, pi;
u8 num_wqebbs; u8 num_wqebbs;
int i = 0; int i = 0;
if (!tx_sync_info_get(priv_tx, seq, &info)) { ret = tx_sync_info_get(priv_tx, seq, &info);
if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
stats->tls_skip_no_sync_data++;
return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
}
/* We might get here if a retransmission reaches the driver /* We might get here if a retransmission reaches the driver
* after the relevant record is acked. * after the relevant record is acked.
* It should be safe to drop the packet in this case * It should be safe to drop the packet in this case
...@@ -339,13 +352,8 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, ...@@ -339,13 +352,8 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
} }
if (unlikely(info.sync_len < 0)) { if (unlikely(info.sync_len < 0)) {
u32 payload; if (likely(datalen <= -info.sync_len))
int headln; return MLX5E_KTLS_SYNC_DONE;
headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
payload = skb->len - headln;
if (likely(payload <= -info.sync_len))
return skb;
stats->tls_drop_bypass_req++; stats->tls_drop_bypass_req++;
goto err_out; goto err_out;
...@@ -360,7 +368,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, ...@@ -360,7 +368,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
*/ */
if (!info.nr_frags) { if (!info.nr_frags) {
tx_post_fence_nop(sq); tx_post_fence_nop(sq);
return skb; return MLX5E_KTLS_SYNC_DONE;
} }
num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len); num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len);
...@@ -397,7 +405,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, ...@@ -397,7 +405,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
page_ref_add(skb_frag_page(f), n - 1); page_ref_add(skb_frag_page(f), n - 1);
} }
return skb; return MLX5E_KTLS_SYNC_DONE;
err_out: err_out:
for (; i < info.nr_frags; i++) for (; i < info.nr_frags; i++)
...@@ -408,8 +416,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, ...@@ -408,8 +416,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
*/ */
put_page(skb_frag_page(&info.frags[i])); put_page(skb_frag_page(&info.frags[i]));
dev_kfree_skb_any(skb); return MLX5E_KTLS_SYNC_FAIL;
return NULL;
} }
struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
...@@ -445,10 +452,15 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, ...@@ -445,10 +452,15 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
seq = ntohl(tcp_hdr(skb)->seq); seq = ntohl(tcp_hdr(skb)->seq);
if (unlikely(priv_tx->expected_seq != seq)) { if (unlikely(priv_tx->expected_seq != seq)) {
skb = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, skb, seq); enum mlx5e_ktls_sync_retval ret =
if (unlikely(!skb)) mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
if (likely(ret == MLX5E_KTLS_SYNC_DONE))
*wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
else if (ret == MLX5E_KTLS_SYNC_FAIL)
goto err_out;
else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
goto out; goto out;
*wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
} }
priv_tx->expected_seq = seq + datalen; priv_tx->expected_seq = seq + datalen;
......
...@@ -52,11 +52,12 @@ static const struct counter_desc sw_stats_desc[] = { ...@@ -52,11 +52,12 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
#endif #endif
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
...@@ -288,11 +289,12 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) ...@@ -288,11 +289,12 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes; s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
s->tx_tls_ctx += sq_stats->tls_ctx; s->tx_tls_ctx += sq_stats->tls_ctx;
s->tx_tls_ooo += sq_stats->tls_ooo; s->tx_tls_ooo += sq_stats->tls_ooo;
s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes; s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data; s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req; s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
#endif #endif
s->tx_cqes += sq_stats->cqes; s->tx_cqes += sq_stats->cqes;
} }
...@@ -1472,10 +1474,12 @@ static const struct counter_desc sq_stats_desc[] = { ...@@ -1472,10 +1474,12 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
#endif #endif
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) }, { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
......
...@@ -129,11 +129,12 @@ struct mlx5e_sw_stats { ...@@ -129,11 +129,12 @@ struct mlx5e_sw_stats {
u64 tx_tls_encrypted_bytes; u64 tx_tls_encrypted_bytes;
u64 tx_tls_ctx; u64 tx_tls_ctx;
u64 tx_tls_ooo; u64 tx_tls_ooo;
u64 tx_tls_dump_packets;
u64 tx_tls_dump_bytes;
u64 tx_tls_resync_bytes; u64 tx_tls_resync_bytes;
u64 tx_tls_skip_no_sync_data;
u64 tx_tls_drop_no_sync_data; u64 tx_tls_drop_no_sync_data;
u64 tx_tls_drop_bypass_req; u64 tx_tls_drop_bypass_req;
u64 tx_tls_dump_packets;
u64 tx_tls_dump_bytes;
#endif #endif
u64 rx_xsk_packets; u64 rx_xsk_packets;
...@@ -273,11 +274,12 @@ struct mlx5e_sq_stats { ...@@ -273,11 +274,12 @@ struct mlx5e_sq_stats {
u64 tls_encrypted_bytes; u64 tls_encrypted_bytes;
u64 tls_ctx; u64 tls_ctx;
u64 tls_ooo; u64 tls_ooo;
u64 tls_dump_packets;
u64 tls_dump_bytes;
u64 tls_resync_bytes; u64 tls_resync_bytes;
u64 tls_skip_no_sync_data;
u64 tls_drop_no_sync_data; u64 tls_drop_no_sync_data;
u64 tls_drop_bypass_req; u64 tls_drop_bypass_req;
u64 tls_dump_packets;
u64 tls_dump_bytes;
#endif #endif
/* less likely accessed in data path */ /* less likely accessed in data path */
u64 csum_none; u64 csum_none;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment