Commit 67072c31 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'tcp-rds-fix-use-after-free-around-kernel-tcp-reqsk'

Kuniyuki Iwashima says:

====================
tcp/rds: Fix use-after-free around kernel TCP reqsk.

syzkaller reported an warning of netns ref tracker for RDS TCP listener,
which commit 740ea3c4 ("tcp: Clean up kernel listener's reqsk in
inet_twsk_purge()") fixed for per-netns ehash.

This series fixes the bug in the partial fix and fixes the reported bug
in the global ehash.

v4: https://lore.kernel.org/netdev/20240307232151.55963-1-kuniyu@amazon.com/
v3: https://lore.kernel.org/netdev/20240307224423.53315-1-kuniyu@amazon.com/
v2: https://lore.kernel.org/netdev/20240227011041.97375-1-kuniyu@amazon.com/
v1: https://lore.kernel.org/netdev/20240223172448.94084-1-kuniyu@amazon.com/
====================

Link: https://lore.kernel.org/r/20240308200122.64357-1-kuniyu@amazon.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 9187210e 2a750d6a
......@@ -263,12 +263,12 @@ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
}
EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
/* Remove all non full sockets (TIME_WAIT and NEW_SYN_RECV) for dead netns */
void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
{
struct inet_timewait_sock *tw;
struct sock *sk;
struct hlist_nulls_node *node;
unsigned int slot;
struct sock *sk;
for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
......@@ -277,38 +277,35 @@ void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
rcu_read_lock();
restart:
sk_nulls_for_each_rcu(sk, node, &head->chain) {
if (sk->sk_state != TCP_TIME_WAIT) {
/* A kernel listener socket might not hold refcnt for net,
* so reqsk_timer_handler() could be fired after net is
* freed. Userspace listener and reqsk never exist here.
*/
if (unlikely(sk->sk_state == TCP_NEW_SYN_RECV &&
hashinfo->pernet)) {
struct request_sock *req = inet_reqsk(sk);
inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
}
int state = inet_sk_state_load(sk);
if ((1 << state) & ~(TCPF_TIME_WAIT |
TCPF_NEW_SYN_RECV))
continue;
}
tw = inet_twsk(sk);
if ((tw->tw_family != family) ||
refcount_read(&twsk_net(tw)->ns.count))
if (sk->sk_family != family ||
refcount_read(&sock_net(sk)->ns.count))
continue;
if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
continue;
if (unlikely((tw->tw_family != family) ||
refcount_read(&twsk_net(tw)->ns.count))) {
inet_twsk_put(tw);
if (unlikely(sk->sk_family != family ||
refcount_read(&sock_net(sk)->ns.count))) {
sock_gen_put(sk);
goto restart;
}
rcu_read_unlock();
local_bh_disable();
inet_twsk_deschedule_put(tw);
if (state == TCP_TIME_WAIT) {
inet_twsk_deschedule_put(inet_twsk(sk));
} else {
struct request_sock *req = inet_reqsk(sk);
inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
req);
}
local_bh_enable();
goto restart_rcu;
}
......
......@@ -398,10 +398,6 @@ void tcp_twsk_purge(struct list_head *net_exit_list, int family)
/* Even if tw_refcount == 1, we must clean up kernel reqsk */
inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo, family);
} else if (!purged_once) {
/* The last refcount is decremented in tcp_sk_exit_batch() */
if (refcount_read(&net->ipv4.tcp_death_row.tw_refcount) == 1)
continue;
inet_twsk_purge(&tcp_hashinfo, family);
purged_once = true;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment