Commit 8f5afe41 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'tls-a-few-more-fixes-for-async-decrypt'

Sabrina Dubroca says:

====================
tls: a few more fixes for async decrypt

The previous patchset [1] took care of "full async". This adds a few
fixes for cases where only part of the crypto operations go the async
route, found by extending my previous debug patch [2] to do N
synchronous operations followed by M asynchronous ops (with N and M
configurable).

[1] https://patchwork.kernel.org/project/netdevbpf/list/?series=823784&state=*
[2] https://lore.kernel.org/all/9d664093b1bf7f47497b2c40b3a085b45f3274a2.1694021240.git.sd@queasysnail.net/
====================

Link: https://lore.kernel.org/r/cover.1709132643.git.sd@queasysnail.netSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 616d82c3 13114dc5
...@@ -52,6 +52,7 @@ struct tls_decrypt_arg { ...@@ -52,6 +52,7 @@ struct tls_decrypt_arg {
struct_group(inargs, struct_group(inargs,
bool zc; bool zc;
bool async; bool async;
bool async_done;
u8 tail; u8 tail;
); );
...@@ -274,22 +275,30 @@ static int tls_do_decryption(struct sock *sk, ...@@ -274,22 +275,30 @@ static int tls_do_decryption(struct sock *sk,
DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1); DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1);
atomic_inc(&ctx->decrypt_pending); atomic_inc(&ctx->decrypt_pending);
} else { } else {
DECLARE_CRYPTO_WAIT(wait);
aead_request_set_callback(aead_req, aead_request_set_callback(aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &ctx->async_wait); crypto_req_done, &wait);
ret = crypto_aead_decrypt(aead_req);
if (ret == -EINPROGRESS || ret == -EBUSY)
ret = crypto_wait_req(ret, &wait);
return ret;
} }
ret = crypto_aead_decrypt(aead_req); ret = crypto_aead_decrypt(aead_req);
if (ret == -EINPROGRESS)
return 0;
if (ret == -EBUSY) { if (ret == -EBUSY) {
ret = tls_decrypt_async_wait(ctx); ret = tls_decrypt_async_wait(ctx);
ret = ret ?: -EINPROGRESS; darg->async_done = true;
/* all completions have run, we're not doing async anymore */
darg->async = false;
return ret;
} }
if (ret == -EINPROGRESS) {
if (darg->async)
return 0;
ret = crypto_wait_req(ret, &ctx->async_wait); atomic_dec(&ctx->decrypt_pending);
}
darg->async = false; darg->async = false;
return ret; return ret;
...@@ -1588,8 +1597,11 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov, ...@@ -1588,8 +1597,11 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
/* Prepare and submit AEAD request */ /* Prepare and submit AEAD request */
err = tls_do_decryption(sk, sgin, sgout, dctx->iv, err = tls_do_decryption(sk, sgin, sgout, dctx->iv,
data_len + prot->tail_size, aead_req, darg); data_len + prot->tail_size, aead_req, darg);
if (err) if (err) {
if (darg->async_done)
goto exit_free_skb;
goto exit_free_pages; goto exit_free_pages;
}
darg->skb = clear_skb ?: tls_strp_msg(ctx); darg->skb = clear_skb ?: tls_strp_msg(ctx);
clear_skb = NULL; clear_skb = NULL;
...@@ -1601,6 +1613,9 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov, ...@@ -1601,6 +1613,9 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov,
return err; return err;
} }
if (unlikely(darg->async_done))
return 0;
if (prot->tail_size) if (prot->tail_size)
darg->tail = dctx->tail; darg->tail = dctx->tail;
...@@ -1948,6 +1963,7 @@ int tls_sw_recvmsg(struct sock *sk, ...@@ -1948,6 +1963,7 @@ int tls_sw_recvmsg(struct sock *sk,
struct strp_msg *rxm; struct strp_msg *rxm;
struct tls_msg *tlm; struct tls_msg *tlm;
ssize_t copied = 0; ssize_t copied = 0;
ssize_t peeked = 0;
bool async = false; bool async = false;
int target, err; int target, err;
bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); bool is_kvec = iov_iter_is_kvec(&msg->msg_iter);
...@@ -2095,8 +2111,10 @@ int tls_sw_recvmsg(struct sock *sk, ...@@ -2095,8 +2111,10 @@ int tls_sw_recvmsg(struct sock *sk,
if (err < 0) if (err < 0)
goto put_on_rx_list_err; goto put_on_rx_list_err;
if (is_peek) if (is_peek) {
peeked += chunk;
goto put_on_rx_list; goto put_on_rx_list;
}
if (partially_consumed) { if (partially_consumed) {
rxm->offset += chunk; rxm->offset += chunk;
...@@ -2135,8 +2153,8 @@ int tls_sw_recvmsg(struct sock *sk, ...@@ -2135,8 +2153,8 @@ int tls_sw_recvmsg(struct sock *sk,
/* Drain records from the rx_list & copy if required */ /* Drain records from the rx_list & copy if required */
if (is_peek || is_kvec) if (is_peek || is_kvec)
err = process_rx_list(ctx, msg, &control, copied, err = process_rx_list(ctx, msg, &control, copied + peeked,
decrypted, is_peek, NULL); decrypted - peeked, is_peek, NULL);
else else
err = process_rx_list(ctx, msg, &control, 0, err = process_rx_list(ctx, msg, &control, 0,
async_copy_bytes, is_peek, NULL); async_copy_bytes, is_peek, NULL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment