Commit e7b159a4 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

net/tls: remove the record tail optimization

For TLS device offload the tag/message authentication code are
filled in by the device. The kernel merely reserves space for
them. Because device overwrites it, the contents of the tag make
do no matter. Current code tries to save space by reusing the
header as the tag. This, however, leads to an additional frag
being created and defeats buffer coalescing (which trickles
all the way down to the drivers).

Remove this optimization, and try to allocate the space for
the tag in the usual way, leave the memory uninitialized.
If memory allocation fails rewind the record pointer so that
we use the already copied user data as tag.

Note that the optimization was actually buggy, as the tag
for TLS 1.2 is 16 bytes, but header is just 13, so the reuse
may had looked past the end of the page..
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarDirk van der Merwe <dirk.vandermerwe@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d4774ac0
...@@ -256,29 +256,13 @@ static int tls_push_record(struct sock *sk, ...@@ -256,29 +256,13 @@ static int tls_push_record(struct sock *sk,
struct tls_context *ctx, struct tls_context *ctx,
struct tls_offload_context_tx *offload_ctx, struct tls_offload_context_tx *offload_ctx,
struct tls_record_info *record, struct tls_record_info *record,
struct page_frag *pfrag, int flags)
int flags,
unsigned char record_type)
{ {
struct tls_prot_info *prot = &ctx->prot_info; struct tls_prot_info *prot = &ctx->prot_info;
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct page_frag dummy_tag_frag;
skb_frag_t *frag; skb_frag_t *frag;
int i; int i;
/* fill prepend */
frag = &record->frags[0];
tls_fill_prepend(ctx,
skb_frag_address(frag),
record->len - prot->prepend_size,
record_type,
prot->version);
/* HW doesn't care about the data in the tag, because it fills it. */
dummy_tag_frag.page = skb_frag_page(frag);
dummy_tag_frag.offset = 0;
tls_append_frag(record, &dummy_tag_frag, prot->tag_size);
record->end_seq = tp->write_seq + record->len; record->end_seq = tp->write_seq + record->len;
list_add_tail_rcu(&record->list, &offload_ctx->records_list); list_add_tail_rcu(&record->list, &offload_ctx->records_list);
offload_ctx->open_record = NULL; offload_ctx->open_record = NULL;
...@@ -302,6 +286,38 @@ static int tls_push_record(struct sock *sk, ...@@ -302,6 +286,38 @@ static int tls_push_record(struct sock *sk,
return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags); return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags);
} }
static int tls_device_record_close(struct sock *sk,
struct tls_context *ctx,
struct tls_record_info *record,
struct page_frag *pfrag,
unsigned char record_type)
{
struct tls_prot_info *prot = &ctx->prot_info;
int ret;
/* append tag
* device will fill in the tag, we just need to append a placeholder
* use socket memory to improve coalescing (re-using a single buffer
* increases frag count)
* if we can't allocate memory now, steal some back from data
*/
if (likely(skb_page_frag_refill(prot->tag_size, pfrag,
sk->sk_allocation))) {
ret = 0;
tls_append_frag(record, pfrag, prot->tag_size);
} else {
ret = prot->tag_size;
if (record->len <= prot->overhead_size)
return -ENOMEM;
}
/* fill prepend */
tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
record->len - prot->overhead_size,
record_type, prot->version);
return ret;
}
static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx, static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
struct page_frag *pfrag, struct page_frag *pfrag,
size_t prepend_size) size_t prepend_size)
...@@ -452,13 +468,24 @@ static int tls_push_data(struct sock *sk, ...@@ -452,13 +468,24 @@ static int tls_push_data(struct sock *sk,
if (done || record->len >= max_open_record_len || if (done || record->len >= max_open_record_len ||
(record->num_frags >= MAX_SKB_FRAGS - 1)) { (record->num_frags >= MAX_SKB_FRAGS - 1)) {
rc = tls_device_record_close(sk, tls_ctx, record,
pfrag, record_type);
if (rc) {
if (rc > 0) {
size += rc;
} else {
size = orig_size;
destroy_record(record);
ctx->open_record = NULL;
break;
}
}
rc = tls_push_record(sk, rc = tls_push_record(sk,
tls_ctx, tls_ctx,
ctx, ctx,
record, record,
pfrag, tls_push_record_flags);
tls_push_record_flags,
record_type);
if (rc < 0) if (rc < 0)
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment