Commit a0eea5f1 authored by Paolo Abeni's avatar Paolo Abeni Committed by David S. Miller

mptcp: fix memory leak on address flush

The endpoint cleanup path is prone to a memory leak, as reported
by syzkaller:

 BUG: memory leak
 unreferenced object 0xffff88810680ea00 (size 64):
   comm "syz-executor.6", pid 6191, jiffies 4295756280 (age 24.138s)
   hex dump (first 32 bytes):
     58 75 7d 3c 80 88 ff ff 22 01 00 00 00 00 ad de  Xu}<....".......
     01 00 02 00 00 00 00 00 ac 1e 00 07 00 00 00 00  ................
   backtrace:
     [<0000000072a9f72a>] kmalloc include/linux/slab.h:591 [inline]
     [<0000000072a9f72a>] mptcp_nl_cmd_add_addr+0x287/0x9f0 net/mptcp/pm_netlink.c:1170
     [<00000000f6e931bf>] genl_family_rcv_msg_doit.isra.0+0x225/0x340 net/netlink/genetlink.c:731
     [<00000000f1504a2c>] genl_family_rcv_msg net/netlink/genetlink.c:775 [inline]
     [<00000000f1504a2c>] genl_rcv_msg+0x341/0x5b0 net/netlink/genetlink.c:792
     [<0000000097e76f6a>] netlink_rcv_skb+0x148/0x430 net/netlink/af_netlink.c:2504
     [<00000000ceefa2b8>] genl_rcv+0x24/0x40 net/netlink/genetlink.c:803
     [<000000008ff91aec>] netlink_unicast_kernel net/netlink/af_netlink.c:1314 [inline]
     [<000000008ff91aec>] netlink_unicast+0x537/0x750 net/netlink/af_netlink.c:1340
     [<0000000041682c35>] netlink_sendmsg+0x846/0xd80 net/netlink/af_netlink.c:1929
     [<00000000df3aa8e7>] sock_sendmsg_nosec net/socket.c:704 [inline]
     [<00000000df3aa8e7>] sock_sendmsg+0x14e/0x190 net/socket.c:724
     [<000000002154c54c>] ____sys_sendmsg+0x709/0x870 net/socket.c:2403
     [<000000001aab01d7>] ___sys_sendmsg+0xff/0x170 net/socket.c:2457
     [<00000000fa3b1446>] __sys_sendmsg+0xe5/0x1b0 net/socket.c:2486
     [<00000000db2ee9c7>] do_syscall_x64 arch/x86/entry/common.c:50 [inline]
     [<00000000db2ee9c7>] do_syscall_64+0x38/0x90 arch/x86/entry/common.c:80
     [<000000005873517d>] entry_SYSCALL_64_after_hwframe+0x44/0xae

We should not require an allocation to cleanup stuff.

Rework the code a bit so that the additional RCU work is no more needed.

Fixes: 1729cf18 ("mptcp: create the listening socket for new port")
Signed-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
Signed-off-by: default avatarMat Martineau <mathew.j.martineau@linux.intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent fb4b1373
...@@ -1135,36 +1135,12 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net, ...@@ -1135,36 +1135,12 @@ static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
return 0; return 0;
} }
struct addr_entry_release_work { /* caller must ensure the RCU grace period is already elapsed */
struct rcu_work rwork; static void __mptcp_pm_release_addr_entry(struct mptcp_pm_addr_entry *entry)
struct mptcp_pm_addr_entry *entry;
};
static void mptcp_pm_release_addr_entry(struct work_struct *work)
{ {
struct addr_entry_release_work *w; if (entry->lsk)
struct mptcp_pm_addr_entry *entry; sock_release(entry->lsk);
kfree(entry);
w = container_of(to_rcu_work(work), struct addr_entry_release_work, rwork);
entry = w->entry;
if (entry) {
if (entry->lsk)
sock_release(entry->lsk);
kfree(entry);
}
kfree(w);
}
static void mptcp_pm_free_addr_entry(struct mptcp_pm_addr_entry *entry)
{
struct addr_entry_release_work *w;
w = kmalloc(sizeof(*w), GFP_ATOMIC);
if (w) {
INIT_RCU_WORK(&w->rwork, mptcp_pm_release_addr_entry);
w->entry = entry;
queue_rcu_work(system_wq, &w->rwork);
}
} }
static int mptcp_nl_remove_id_zero_address(struct net *net, static int mptcp_nl_remove_id_zero_address(struct net *net,
...@@ -1244,7 +1220,8 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info) ...@@ -1244,7 +1220,8 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
spin_unlock_bh(&pernet->lock); spin_unlock_bh(&pernet->lock);
mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), &entry->addr); mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), &entry->addr);
mptcp_pm_free_addr_entry(entry); synchronize_rcu();
__mptcp_pm_release_addr_entry(entry);
return ret; return ret;
} }
...@@ -1297,6 +1274,7 @@ static void mptcp_nl_remove_addrs_list(struct net *net, ...@@ -1297,6 +1274,7 @@ static void mptcp_nl_remove_addrs_list(struct net *net,
} }
} }
/* caller must ensure the RCU grace period is already elapsed */
static void __flush_addrs(struct list_head *list) static void __flush_addrs(struct list_head *list)
{ {
while (!list_empty(list)) { while (!list_empty(list)) {
...@@ -1305,7 +1283,7 @@ static void __flush_addrs(struct list_head *list) ...@@ -1305,7 +1283,7 @@ static void __flush_addrs(struct list_head *list)
cur = list_entry(list->next, cur = list_entry(list->next,
struct mptcp_pm_addr_entry, list); struct mptcp_pm_addr_entry, list);
list_del_rcu(&cur->list); list_del_rcu(&cur->list);
mptcp_pm_free_addr_entry(cur); __mptcp_pm_release_addr_entry(cur);
} }
} }
...@@ -1329,6 +1307,7 @@ static int mptcp_nl_cmd_flush_addrs(struct sk_buff *skb, struct genl_info *info) ...@@ -1329,6 +1307,7 @@ static int mptcp_nl_cmd_flush_addrs(struct sk_buff *skb, struct genl_info *info)
bitmap_zero(pernet->id_bitmap, MAX_ADDR_ID + 1); bitmap_zero(pernet->id_bitmap, MAX_ADDR_ID + 1);
spin_unlock_bh(&pernet->lock); spin_unlock_bh(&pernet->lock);
mptcp_nl_remove_addrs_list(sock_net(skb->sk), &free_list); mptcp_nl_remove_addrs_list(sock_net(skb->sk), &free_list);
synchronize_rcu();
__flush_addrs(&free_list); __flush_addrs(&free_list);
return 0; return 0;
} }
...@@ -1939,7 +1918,8 @@ static void __net_exit pm_nl_exit_net(struct list_head *net_list) ...@@ -1939,7 +1918,8 @@ static void __net_exit pm_nl_exit_net(struct list_head *net_list)
struct pm_nl_pernet *pernet = net_generic(net, pm_nl_pernet_id); struct pm_nl_pernet *pernet = net_generic(net, pm_nl_pernet_id);
/* net is removed from namespace list, can't race with /* net is removed from namespace list, can't race with
* other modifiers * other modifiers, also netns core already waited for a
* RCU grace period.
*/ */
__flush_addrs(&pernet->local_addr_list); __flush_addrs(&pernet->local_addr_list);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment