Commit 94524d8f authored by Vakul Garg's avatar Vakul Garg Committed by David S. Miller

net/tls: Add support for async decryption of tls records

When tls records are decrypted using asynchronous acclerators such as
NXP CAAM engine, the crypto apis return -EINPROGRESS. Presently, on
getting -EINPROGRESS, the tls record processing stops till the time the
crypto accelerator finishes off and returns the result. This incurs a
context switch and is not an efficient way of accessing the crypto
accelerators. Crypto accelerators work efficient when they are queued
with multiple crypto jobs without having to wait for the previous ones
to complete.

The patch submits multiple crypto requests without having to wait for
for previous ones to complete. This has been implemented for records
which are decrypted in zero-copy mode. At the end of recvmsg(), we wait
for all the asynchronous decryption requests to complete.

The references to records which have been sent for async decryption are
dropped. For cases where record decryption is not possible in zero-copy
mode, asynchronous decryption is not used and we wait for decryption
crypto api to complete.

For crypto requests executing in async fashion, the memory for
aead_request, sglists and skb etc is freed from the decryption
completion handler. The decryption completion handler wakesup the
sleeping user context when recvmsg() flags that it has done sending
all the decryption requests and there are no more decryption requests
pending to be completed.
Signed-off-by: default avatarVakul Garg <vakul.garg@nxp.com>
Reviewed-by: default avatarDave Watson <davejwatson@fb.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 181ab623
...@@ -124,6 +124,12 @@ struct tls_sw_context_rx { ...@@ -124,6 +124,12 @@ struct tls_sw_context_rx {
struct sk_buff *recv_pkt; struct sk_buff *recv_pkt;
u8 control; u8 control;
bool decrypted; bool decrypted;
atomic_t decrypt_pending;
bool async_notify;
};
struct decrypt_req_ctx {
struct sock *sk;
}; };
struct tls_record_info { struct tls_record_info {
......
...@@ -119,12 +119,50 @@ static int skb_nsg(struct sk_buff *skb, int offset, int len) ...@@ -119,12 +119,50 @@ static int skb_nsg(struct sk_buff *skb, int offset, int len)
return __skb_nsg(skb, offset, len, 0); return __skb_nsg(skb, offset, len, 0);
} }
static void tls_decrypt_done(struct crypto_async_request *req, int err)
{
struct aead_request *aead_req = (struct aead_request *)req;
struct decrypt_req_ctx *req_ctx =
(struct decrypt_req_ctx *)(aead_req + 1);
struct scatterlist *sgout = aead_req->dst;
struct tls_context *tls_ctx = tls_get_ctx(req_ctx->sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
int pending = atomic_dec_return(&ctx->decrypt_pending);
struct scatterlist *sg;
unsigned int pages;
/* Propagate if there was an err */
if (err) {
ctx->async_wait.err = err;
tls_err_abort(req_ctx->sk, err);
}
/* Release the skb, pages and memory allocated for crypto req */
kfree_skb(req->data);
/* Skip the first S/G entry as it points to AAD */
for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) {
if (!sg)
break;
put_page(sg_page(sg));
}
kfree(aead_req);
if (!pending && READ_ONCE(ctx->async_notify))
complete(&ctx->async_wait.completion);
}
static int tls_do_decryption(struct sock *sk, static int tls_do_decryption(struct sock *sk,
struct sk_buff *skb,
struct scatterlist *sgin, struct scatterlist *sgin,
struct scatterlist *sgout, struct scatterlist *sgout,
char *iv_recv, char *iv_recv,
size_t data_len, size_t data_len,
struct aead_request *aead_req) struct aead_request *aead_req,
bool async)
{ {
struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
...@@ -135,10 +173,34 @@ static int tls_do_decryption(struct sock *sk, ...@@ -135,10 +173,34 @@ static int tls_do_decryption(struct sock *sk,
aead_request_set_crypt(aead_req, sgin, sgout, aead_request_set_crypt(aead_req, sgin, sgout,
data_len + tls_ctx->rx.tag_size, data_len + tls_ctx->rx.tag_size,
(u8 *)iv_recv); (u8 *)iv_recv);
aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &ctx->async_wait);
ret = crypto_wait_req(crypto_aead_decrypt(aead_req), &ctx->async_wait); if (async) {
struct decrypt_req_ctx *req_ctx;
req_ctx = (struct decrypt_req_ctx *)(aead_req + 1);
req_ctx->sk = sk;
aead_request_set_callback(aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
tls_decrypt_done, skb);
atomic_inc(&ctx->decrypt_pending);
} else {
aead_request_set_callback(aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &ctx->async_wait);
}
ret = crypto_aead_decrypt(aead_req);
if (ret == -EINPROGRESS) {
if (async)
return ret;
ret = crypto_wait_req(ret, &ctx->async_wait);
}
if (async)
atomic_dec(&ctx->decrypt_pending);
return ret; return ret;
} }
...@@ -841,7 +903,10 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb, ...@@ -841,7 +903,10 @@ static int decrypt_internal(struct sock *sk, struct sk_buff *skb,
} }
/* Prepare and submit AEAD request */ /* Prepare and submit AEAD request */
err = tls_do_decryption(sk, sgin, sgout, iv, data_len, aead_req); err = tls_do_decryption(sk, skb, sgin, sgout, iv,
data_len, aead_req, *zc);
if (err == -EINPROGRESS)
return err;
/* Release the pages in case iov was mapped to pages */ /* Release the pages in case iov was mapped to pages */
for (; pages > 0; pages--) for (; pages > 0; pages--)
...@@ -866,8 +931,12 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb, ...@@ -866,8 +931,12 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
#endif #endif
if (!ctx->decrypted) { if (!ctx->decrypted) {
err = decrypt_internal(sk, skb, dest, NULL, chunk, zc); err = decrypt_internal(sk, skb, dest, NULL, chunk, zc);
if (err < 0) if (err < 0) {
if (err == -EINPROGRESS)
tls_advance_record_sn(sk, &tls_ctx->rx);
return err; return err;
}
} else { } else {
*zc = false; *zc = false;
} }
...@@ -895,18 +964,20 @@ static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb, ...@@ -895,18 +964,20 @@ static bool tls_sw_advance_skb(struct sock *sk, struct sk_buff *skb,
{ {
struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx); struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
struct strp_msg *rxm = strp_msg(skb);
if (len < rxm->full_len) { if (skb) {
rxm->offset += len; struct strp_msg *rxm = strp_msg(skb);
rxm->full_len -= len;
return false; if (len < rxm->full_len) {
rxm->offset += len;
rxm->full_len -= len;
return false;
}
kfree_skb(skb);
} }
/* Finished with message */ /* Finished with message */
ctx->recv_pkt = NULL; ctx->recv_pkt = NULL;
kfree_skb(skb);
__strp_unpause(&ctx->strp); __strp_unpause(&ctx->strp);
return true; return true;
...@@ -929,6 +1000,7 @@ int tls_sw_recvmsg(struct sock *sk, ...@@ -929,6 +1000,7 @@ int tls_sw_recvmsg(struct sock *sk,
int target, err = 0; int target, err = 0;
long timeo; long timeo;
bool is_kvec = msg->msg_iter.type & ITER_KVEC; bool is_kvec = msg->msg_iter.type & ITER_KVEC;
int num_async = 0;
flags |= nonblock; flags |= nonblock;
...@@ -941,6 +1013,7 @@ int tls_sw_recvmsg(struct sock *sk, ...@@ -941,6 +1013,7 @@ int tls_sw_recvmsg(struct sock *sk,
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
do { do {
bool zc = false; bool zc = false;
bool async = false;
int chunk = 0; int chunk = 0;
skb = tls_wait_data(sk, flags, timeo, &err); skb = tls_wait_data(sk, flags, timeo, &err);
...@@ -948,6 +1021,7 @@ int tls_sw_recvmsg(struct sock *sk, ...@@ -948,6 +1021,7 @@ int tls_sw_recvmsg(struct sock *sk,
goto recv_end; goto recv_end;
rxm = strp_msg(skb); rxm = strp_msg(skb);
if (!cmsg) { if (!cmsg) {
int cerr; int cerr;
...@@ -974,26 +1048,39 @@ int tls_sw_recvmsg(struct sock *sk, ...@@ -974,26 +1048,39 @@ int tls_sw_recvmsg(struct sock *sk,
err = decrypt_skb_update(sk, skb, &msg->msg_iter, err = decrypt_skb_update(sk, skb, &msg->msg_iter,
&chunk, &zc); &chunk, &zc);
if (err < 0) { if (err < 0 && err != -EINPROGRESS) {
tls_err_abort(sk, EBADMSG); tls_err_abort(sk, EBADMSG);
goto recv_end; goto recv_end;
} }
if (err == -EINPROGRESS) {
async = true;
num_async++;
goto pick_next_record;
}
ctx->decrypted = true; ctx->decrypted = true;
} }
if (!zc) { if (!zc) {
chunk = min_t(unsigned int, rxm->full_len, len); chunk = min_t(unsigned int, rxm->full_len, len);
err = skb_copy_datagram_msg(skb, rxm->offset, msg, err = skb_copy_datagram_msg(skb, rxm->offset, msg,
chunk); chunk);
if (err < 0) if (err < 0)
goto recv_end; goto recv_end;
} }
pick_next_record:
copied += chunk; copied += chunk;
len -= chunk; len -= chunk;
if (likely(!(flags & MSG_PEEK))) { if (likely(!(flags & MSG_PEEK))) {
u8 control = ctx->control; u8 control = ctx->control;
/* For async, drop current skb reference */
if (async)
skb = NULL;
if (tls_sw_advance_skb(sk, skb, chunk)) { if (tls_sw_advance_skb(sk, skb, chunk)) {
/* Return full control message to /* Return full control message to
* userspace before trying to parse * userspace before trying to parse
...@@ -1002,14 +1089,33 @@ int tls_sw_recvmsg(struct sock *sk, ...@@ -1002,14 +1089,33 @@ int tls_sw_recvmsg(struct sock *sk,
msg->msg_flags |= MSG_EOR; msg->msg_flags |= MSG_EOR;
if (control != TLS_RECORD_TYPE_DATA) if (control != TLS_RECORD_TYPE_DATA)
goto recv_end; goto recv_end;
} else {
break;
} }
} }
/* If we have a new message from strparser, continue now. */ /* If we have a new message from strparser, continue now. */
if (copied >= target && !ctx->recv_pkt) if (copied >= target && !ctx->recv_pkt)
break; break;
} while (len); } while (len);
recv_end: recv_end:
if (num_async) {
/* Wait for all previously submitted records to be decrypted */
smp_store_mb(ctx->async_notify, true);
if (atomic_read(&ctx->decrypt_pending)) {
err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
if (err) {
/* one of async decrypt failed */
tls_err_abort(sk, err);
copied = 0;
}
} else {
reinit_completion(&ctx->async_wait.completion);
}
WRITE_ONCE(ctx->async_notify, false);
}
release_sock(sk); release_sock(sk);
return copied ? : err; return copied ? : err;
} }
...@@ -1349,6 +1455,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) ...@@ -1349,6 +1455,8 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
goto free_aead; goto free_aead;
if (sw_ctx_rx) { if (sw_ctx_rx) {
(*aead)->reqsize = sizeof(struct decrypt_req_ctx);
/* Set up strparser */ /* Set up strparser */
memset(&cb, 0, sizeof(cb)); memset(&cb, 0, sizeof(cb));
cb.rcv_msg = tls_queue; cb.rcv_msg = tls_queue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment