Commit baaf0c65 authored by Herbert Xu's avatar Herbert Xu Committed by Greg Kroah-Hartman

netlink: Do not schedule work from sk_destruct

[ Upstream commit ed5d7788 ]

It is wrong to schedule a work from sk_destruct using the socket
as the memory reserve because the socket will be freed immediately
after the return from sk_destruct.

Instead we should do the deferral prior to sk_free.

This patch does just that.

Fixes: 707693c8 ("netlink: Call cb->done from a worker thread")
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Tested-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent d1ed9c1d
...@@ -924,11 +924,13 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) ...@@ -924,11 +924,13 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
sk_mem_charge(sk, skb->truesize); sk_mem_charge(sk, skb->truesize);
} }
static void __netlink_sock_destruct(struct sock *sk) static void netlink_sock_destruct(struct sock *sk)
{ {
struct netlink_sock *nlk = nlk_sk(sk); struct netlink_sock *nlk = nlk_sk(sk);
if (nlk->cb_running) { if (nlk->cb_running) {
if (nlk->cb.done)
nlk->cb.done(&nlk->cb);
module_put(nlk->cb.module); module_put(nlk->cb.module);
kfree_skb(nlk->cb.skb); kfree_skb(nlk->cb.skb);
} }
...@@ -962,21 +964,7 @@ static void netlink_sock_destruct_work(struct work_struct *work) ...@@ -962,21 +964,7 @@ static void netlink_sock_destruct_work(struct work_struct *work)
struct netlink_sock *nlk = container_of(work, struct netlink_sock, struct netlink_sock *nlk = container_of(work, struct netlink_sock,
work); work);
nlk->cb.done(&nlk->cb); sk_free(&nlk->sk);
__netlink_sock_destruct(&nlk->sk);
}
static void netlink_sock_destruct(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
if (nlk->cb_running && nlk->cb.done) {
INIT_WORK(&nlk->work, netlink_sock_destruct_work);
schedule_work(&nlk->work);
return;
}
__netlink_sock_destruct(sk);
} }
/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
...@@ -1284,8 +1272,18 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol, ...@@ -1284,8 +1272,18 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
static void deferred_put_nlk_sk(struct rcu_head *head) static void deferred_put_nlk_sk(struct rcu_head *head)
{ {
struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu); struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
struct sock *sk = &nlk->sk;
if (!atomic_dec_and_test(&sk->sk_refcnt))
return;
if (nlk->cb_running && nlk->cb.done) {
INIT_WORK(&nlk->work, netlink_sock_destruct_work);
schedule_work(&nlk->work);
return;
}
sock_put(&nlk->sk); sk_free(sk);
} }
static int netlink_release(struct socket *sock) static int netlink_release(struct socket *sock)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment