Commit 44f5e048 authored by David S. Miller's avatar David S. Miller

Merge branch 'tls-leaks'

Jakub Kicinski says:

====================
net: tls: fix memory leaks and freeing skbs

This series fixes two memory issues and a stack overflow.
First two patches are fairly simple leaks.  Third patch
partially reverts an optimization made to the strparser
which causes creation of skb->frag_list->skb->frag_list...
chains of 100s of skbs, leading to recursive kfree_skb()
filling up the kernel stack.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f4a58857 4a9c2e37
...@@ -307,6 +307,7 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); ...@@ -307,6 +307,7 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tls_device_sendpage(struct sock *sk, struct page *page, int tls_device_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags); int offset, size_t size, int flags);
void tls_device_sk_destruct(struct sock *sk); void tls_device_sk_destruct(struct sock *sk);
void tls_device_free_resources_tx(struct sock *sk);
void tls_device_init(void); void tls_device_init(void);
void tls_device_cleanup(void); void tls_device_cleanup(void);
int tls_tx_records(struct sock *sk, int flags); int tls_tx_records(struct sock *sk, int flags);
...@@ -330,6 +331,7 @@ int tls_push_sg(struct sock *sk, struct tls_context *ctx, ...@@ -330,6 +331,7 @@ int tls_push_sg(struct sock *sk, struct tls_context *ctx,
int flags); int flags);
int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
int flags); int flags);
bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
static inline struct tls_msg *tls_msg(struct sk_buff *skb) static inline struct tls_msg *tls_msg(struct sk_buff *skb)
{ {
......
...@@ -140,14 +140,12 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, ...@@ -140,14 +140,12 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
/* We are going to append to the frags_list of head. /* We are going to append to the frags_list of head.
* Need to unshare the frag_list. * Need to unshare the frag_list.
*/ */
if (skb_has_frag_list(head)) {
err = skb_unclone(head, GFP_ATOMIC); err = skb_unclone(head, GFP_ATOMIC);
if (err) { if (err) {
STRP_STATS_INCR(strp->stats.mem_fail); STRP_STATS_INCR(strp->stats.mem_fail);
desc->error = err; desc->error = err;
return 0; return 0;
} }
}
if (unlikely(skb_shinfo(head)->frag_list)) { if (unlikely(skb_shinfo(head)->frag_list)) {
/* We can't append to an sk_buff that already /* We can't append to an sk_buff that already
......
...@@ -52,8 +52,11 @@ static DEFINE_SPINLOCK(tls_device_lock); ...@@ -52,8 +52,11 @@ static DEFINE_SPINLOCK(tls_device_lock);
static void tls_device_free_ctx(struct tls_context *ctx) static void tls_device_free_ctx(struct tls_context *ctx)
{ {
if (ctx->tx_conf == TLS_HW) if (ctx->tx_conf == TLS_HW) {
kfree(tls_offload_ctx_tx(ctx)); kfree(tls_offload_ctx_tx(ctx));
kfree(ctx->tx.rec_seq);
kfree(ctx->tx.iv);
}
if (ctx->rx_conf == TLS_HW) if (ctx->rx_conf == TLS_HW)
kfree(tls_offload_ctx_rx(ctx)); kfree(tls_offload_ctx_rx(ctx));
...@@ -216,6 +219,13 @@ void tls_device_sk_destruct(struct sock *sk) ...@@ -216,6 +219,13 @@ void tls_device_sk_destruct(struct sock *sk)
} }
EXPORT_SYMBOL(tls_device_sk_destruct); EXPORT_SYMBOL(tls_device_sk_destruct);
void tls_device_free_resources_tx(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
tls_free_partial_record(sk, tls_ctx);
}
static void tls_append_frag(struct tls_record_info *record, static void tls_append_frag(struct tls_record_info *record,
struct page_frag *pfrag, struct page_frag *pfrag,
int size) int size)
......
...@@ -208,6 +208,26 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx, ...@@ -208,6 +208,26 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
return tls_push_sg(sk, ctx, sg, offset, flags); return tls_push_sg(sk, ctx, sg, offset, flags);
} }
bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
{
struct scatterlist *sg;
sg = ctx->partially_sent_record;
if (!sg)
return false;
while (1) {
put_page(sg_page(sg));
sk_mem_uncharge(sk, sg->length);
if (sg_is_last(sg))
break;
sg++;
}
ctx->partially_sent_record = NULL;
return true;
}
static void tls_write_space(struct sock *sk) static void tls_write_space(struct sock *sk)
{ {
struct tls_context *ctx = tls_get_ctx(sk); struct tls_context *ctx = tls_get_ctx(sk);
...@@ -267,6 +287,8 @@ static void tls_sk_proto_close(struct sock *sk, long timeout) ...@@ -267,6 +287,8 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
kfree(ctx->tx.rec_seq); kfree(ctx->tx.rec_seq);
kfree(ctx->tx.iv); kfree(ctx->tx.iv);
tls_sw_free_resources_tx(sk); tls_sw_free_resources_tx(sk);
} else if (ctx->tx_conf == TLS_HW) {
tls_device_free_resources_tx(sk);
} }
if (ctx->rx_conf == TLS_SW) { if (ctx->rx_conf == TLS_SW) {
......
...@@ -2052,20 +2052,7 @@ void tls_sw_free_resources_tx(struct sock *sk) ...@@ -2052,20 +2052,7 @@ void tls_sw_free_resources_tx(struct sock *sk)
/* Free up un-sent records in tx_list. First, free /* Free up un-sent records in tx_list. First, free
* the partially sent record if any at head of tx_list. * the partially sent record if any at head of tx_list.
*/ */
if (tls_ctx->partially_sent_record) { if (tls_free_partial_record(sk, tls_ctx)) {
struct scatterlist *sg = tls_ctx->partially_sent_record;
while (1) {
put_page(sg_page(sg));
sk_mem_uncharge(sk, sg->length);
if (sg_is_last(sg))
break;
sg++;
}
tls_ctx->partially_sent_record = NULL;
rec = list_first_entry(&ctx->tx_list, rec = list_first_entry(&ctx->tx_list,
struct tls_rec, list); struct tls_rec, list);
list_del(&rec->list); list_del(&rec->list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment