Commit 46a3ea98 authored by Tariq Toukan's avatar Tariq Toukan Committed by Saeed Mahameed

net/mlx5e: kTLS, Enhance TX resync flow

Once the kTLS TX resync function is called, it used to return
a binary value, for success or failure.

However, in case the TLS SKB is a retransmission of the connection
handshake, it initiates the resync flow (as the tcp seq check holds),
while regular packet handle is expected.

In this patch, we identify this case and skip the resync operation
accordingly.

Counters:
- Add a counter (tls_skip_no_sync_data) to monitor this.
- Bump the dump counters up as they are used more frequently.
- Add a missing counter descriptor declaration for tls_resync_bytes
  in sq_stats_desc.

Fixes: d2ead1f3 ("net/mlx5e: Add kTLS TX HW offload support")
Signed-off-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent af11a7a4
......@@ -185,26 +185,33 @@ struct tx_sync_info {
skb_frag_t frags[MAX_SKB_FRAGS];
};
static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
u32 tcp_seq, struct tx_sync_info *info)
enum mlx5e_ktls_sync_retval {
MLX5E_KTLS_SYNC_DONE,
MLX5E_KTLS_SYNC_FAIL,
MLX5E_KTLS_SYNC_SKIP_NO_DATA,
};
static enum mlx5e_ktls_sync_retval
tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
u32 tcp_seq, struct tx_sync_info *info)
{
struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
struct tls_record_info *record;
int remaining, i = 0;
unsigned long flags;
bool ret = true;
spin_lock_irqsave(&tx_ctx->lock, flags);
record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
if (unlikely(!record)) {
ret = false;
ret = MLX5E_KTLS_SYNC_FAIL;
goto out;
}
if (unlikely(tcp_seq < tls_record_start_seq(record))) {
if (!tls_record_is_start_marker(record))
ret = false;
ret = tls_record_is_start_marker(record) ?
MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
goto out;
}
......@@ -316,20 +323,26 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
}
static struct sk_buff *
static enum mlx5e_ktls_sync_retval
mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
struct mlx5e_txqsq *sq,
struct sk_buff *skb,
int datalen,
u32 seq)
{
struct mlx5e_sq_stats *stats = sq->stats;
struct mlx5_wq_cyc *wq = &sq->wq;
enum mlx5e_ktls_sync_retval ret;
struct tx_sync_info info = {};
u16 contig_wqebbs_room, pi;
u8 num_wqebbs;
int i = 0;
if (!tx_sync_info_get(priv_tx, seq, &info)) {
ret = tx_sync_info_get(priv_tx, seq, &info);
if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
stats->tls_skip_no_sync_data++;
return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
}
/* We might get here if a retransmission reaches the driver
* after the relevant record is acked.
* It should be safe to drop the packet in this case
......@@ -339,13 +352,8 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
}
if (unlikely(info.sync_len < 0)) {
u32 payload;
int headln;
headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
payload = skb->len - headln;
if (likely(payload <= -info.sync_len))
return skb;
if (likely(datalen <= -info.sync_len))
return MLX5E_KTLS_SYNC_DONE;
stats->tls_drop_bypass_req++;
goto err_out;
......@@ -360,7 +368,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
*/
if (!info.nr_frags) {
tx_post_fence_nop(sq);
return skb;
return MLX5E_KTLS_SYNC_DONE;
}
num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len);
......@@ -397,7 +405,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
page_ref_add(skb_frag_page(f), n - 1);
}
return skb;
return MLX5E_KTLS_SYNC_DONE;
err_out:
for (; i < info.nr_frags; i++)
......@@ -408,8 +416,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
*/
put_page(skb_frag_page(&info.frags[i]));
dev_kfree_skb_any(skb);
return NULL;
return MLX5E_KTLS_SYNC_FAIL;
}
struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
......@@ -445,10 +452,15 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
seq = ntohl(tcp_hdr(skb)->seq);
if (unlikely(priv_tx->expected_seq != seq)) {
skb = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, skb, seq);
if (unlikely(!skb))
enum mlx5e_ktls_sync_retval ret =
mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
if (likely(ret == MLX5E_KTLS_SYNC_DONE))
*wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
else if (ret == MLX5E_KTLS_SYNC_FAIL)
goto err_out;
else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
goto out;
*wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
}
priv_tx->expected_seq = seq + datalen;
......
......@@ -52,11 +52,12 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
#endif
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
......@@ -288,11 +289,12 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->tx_tls_encrypted_bytes += sq_stats->tls_encrypted_bytes;
s->tx_tls_ctx += sq_stats->tls_ctx;
s->tx_tls_ooo += sq_stats->tls_ooo;
s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
s->tx_tls_resync_bytes += sq_stats->tls_resync_bytes;
s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
s->tx_tls_drop_bypass_req += sq_stats->tls_drop_bypass_req;
s->tx_tls_dump_bytes += sq_stats->tls_dump_bytes;
s->tx_tls_dump_packets += sq_stats->tls_dump_packets;
#endif
s->tx_cqes += sq_stats->cqes;
}
......@@ -1472,10 +1474,12 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
#endif
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
......
......@@ -129,11 +129,12 @@ struct mlx5e_sw_stats {
u64 tx_tls_encrypted_bytes;
u64 tx_tls_ctx;
u64 tx_tls_ooo;
u64 tx_tls_dump_packets;
u64 tx_tls_dump_bytes;
u64 tx_tls_resync_bytes;
u64 tx_tls_skip_no_sync_data;
u64 tx_tls_drop_no_sync_data;
u64 tx_tls_drop_bypass_req;
u64 tx_tls_dump_packets;
u64 tx_tls_dump_bytes;
#endif
u64 rx_xsk_packets;
......@@ -273,11 +274,12 @@ struct mlx5e_sq_stats {
u64 tls_encrypted_bytes;
u64 tls_ctx;
u64 tls_ooo;
u64 tls_dump_packets;
u64 tls_dump_bytes;
u64 tls_resync_bytes;
u64 tls_skip_no_sync_data;
u64 tls_drop_no_sync_data;
u64 tls_drop_bypass_req;
u64 tls_dump_packets;
u64 tls_dump_bytes;
#endif
/* less likely accessed in data path */
u64 csum_none;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment