Commit b3ae2459 authored by Tariq Toukan's avatar Tariq Toukan Committed by David S. Miller

net/tls: Add force_resync for driver resync

This patch adds a field to the tls rx offload context which enables
drivers to force a send_resync call.

This field can be used by drivers to request a resync at the next
possible tls record. It is beneficial for hardware that provides the
resync sequence number asynchronously. In such cases, the packet that
triggered the resync does not contain the information required for a
resync. Instead, the driver requests resync for all the following
TLS record until the asynchronous notification with the resync request
TCP sequence arrives.

A following series for mlx5e ConnectX-6DX TLS RX offload support will
use this mechanism.
Signed-off-by: default avatarBoris Pismenny <borisp@mellanox.com>
Signed-off-by: default avatarTariq Toukan <tariqt@mellanox.com>
Reviewed-by: default avatarMaxim Mikityanskiy <maximmi@mellanox.com>
Reviewed-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent bdad7f94
...@@ -594,12 +594,22 @@ tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction) ...@@ -594,12 +594,22 @@ tls_driver_ctx(const struct sock *sk, enum tls_offload_ctx_dir direction)
#endif #endif
/* The TLS context is valid until sk_destruct is called */ /* The TLS context is valid until sk_destruct is called */
#define RESYNC_REQ (1 << 0)
#define RESYNC_REQ_FORCE (1 << 1)
static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq) static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
{ {
struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | 1); atomic64_set(&rx_ctx->resync_req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
}
static inline void tls_offload_rx_force_resync_request(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
atomic64_set(&rx_ctx->resync_req, RESYNC_REQ | RESYNC_REQ_FORCE);
} }
static inline void static inline void
......
...@@ -694,10 +694,11 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) ...@@ -694,10 +694,11 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
{ {
struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx; struct tls_offload_context_rx *rx_ctx;
bool is_req_pending, is_force_resync;
u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE]; u8 rcd_sn[TLS_MAX_REC_SEQ_SIZE];
u32 sock_data, is_req_pending;
struct tls_prot_info *prot; struct tls_prot_info *prot;
s64 resync_req; s64 resync_req;
u32 sock_data;
u32 req_seq; u32 req_seq;
if (tls_ctx->rx_conf != TLS_HW) if (tls_ctx->rx_conf != TLS_HW)
...@@ -712,9 +713,11 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) ...@@ -712,9 +713,11 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq)
resync_req = atomic64_read(&rx_ctx->resync_req); resync_req = atomic64_read(&rx_ctx->resync_req);
req_seq = resync_req >> 32; req_seq = resync_req >> 32;
seq += TLS_HEADER_SIZE - 1; seq += TLS_HEADER_SIZE - 1;
is_req_pending = resync_req; is_req_pending = resync_req & RESYNC_REQ;
is_force_resync = resync_req & RESYNC_REQ_FORCE;
if (likely(!is_req_pending) || req_seq != seq || if (likely(!is_req_pending) ||
(!is_force_resync && req_seq != seq) ||
!atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) !atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0))
return; return;
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment