Commit 05dbc7b5 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

tcp/dccp: remove twchain

TCP listener refactoring, part 3 :

Our goal is to hash SYN_RECV sockets into main ehash for fast lookup,
and parallel SYN processing.

Current inet_ehash_bucket contains two chains, one for ESTABLISH (and
friend states) sockets, another for TIME_WAIT sockets only.

As the hash table is sized to get at most one socket per bucket, it
makes little sense to have separate twchain, as it makes the lookup
slightly more complicated, and doubles hash table memory usage.

If we make sure all socket types have the lookup keys at the same
offsets, we can use a generic and faster lookup. It turns out TIME_WAIT
and ESTABLISHED sockets already have common lookup fields for IPv4.

[ INET_TW_MATCH() is no longer needed ]

I'll provide a follow-up to factorize IPv6 lookup as well, to remove
INET6_TW_MATCH()

This way, SYN_RECV pseudo sockets will be supported the same.

A new sock_gen_put() helper is added, doing either a sock_put() or
inet_twsk_put() [ and will support SYN_RECV later ].

Note this helper should only be called in real slow path, when rcu
lookup found a socket that was moved to another identity (freed/reused
immediately), but could eventually be used in other contexts, like
sock_edemux()

Before patch :

dmesg | grep "TCP established"

TCP established hash table entries: 524288 (order: 11, 8388608 bytes)

After patch :

TCP established hash table entries: 524288 (order: 10, 4194304 bytes)
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 53af53ae
...@@ -37,12 +37,11 @@ ...@@ -37,12 +37,11 @@
#include <asm/byteorder.h> #include <asm/byteorder.h>
/* This is for all connections with a full identity, no wildcards. /* This is for all connections with a full identity, no wildcards.
* One chain is dedicated to TIME_WAIT sockets. * The 'e' prefix stands for Establish, but we really put all sockets
* I'll experiment with dynamic table growth later. * but LISTEN ones.
*/ */
struct inet_ehash_bucket { struct inet_ehash_bucket {
struct hlist_nulls_head chain; struct hlist_nulls_head chain;
struct hlist_nulls_head twchain;
}; };
/* There are a few simple rules, which allow for local port reuse by /* There are a few simple rules, which allow for local port reuse by
...@@ -123,7 +122,6 @@ struct inet_hashinfo { ...@@ -123,7 +122,6 @@ struct inet_hashinfo {
* *
* TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
* *
* TIME_WAIT sockets use a separate chain (twchain).
*/ */
struct inet_ehash_bucket *ehash; struct inet_ehash_bucket *ehash;
spinlock_t *ehash_locks; spinlock_t *ehash_locks;
...@@ -318,9 +316,6 @@ static inline struct sock *inet_lookup_listener(struct net *net, ...@@ -318,9 +316,6 @@ static inline struct sock *inet_lookup_listener(struct net *net,
net_eq(sock_net(__sk), (__net))) net_eq(sock_net(__sk), (__net)))
#endif /* 64-bit arch */ #endif /* 64-bit arch */
#define INET_TW_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif)\
INET_MATCH(__sk, __net, __cookie, __saddr, __daddr, __ports, __dif)
/* /*
* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need * Sockets in TCP_CLOSE state are _always_ taken out of the hash, so we need
* not check it for lookups anymore, thanks Alexey. -DaveM * not check it for lookups anymore, thanks Alexey. -DaveM
......
...@@ -141,18 +141,6 @@ struct inet_timewait_sock { ...@@ -141,18 +141,6 @@ struct inet_timewait_sock {
}; };
#define tw_tclass tw_tos #define tw_tclass tw_tos
static inline void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
struct hlist_nulls_head *list)
{
hlist_nulls_add_head_rcu(&tw->tw_node, list);
}
static inline void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
struct hlist_head *list)
{
hlist_add_head(&tw->tw_bind_node, list);
}
static inline int inet_twsk_dead_hashed(const struct inet_timewait_sock *tw) static inline int inet_twsk_dead_hashed(const struct inet_timewait_sock *tw)
{ {
return !hlist_unhashed(&tw->tw_death_node); return !hlist_unhashed(&tw->tw_death_node);
...@@ -192,6 +180,7 @@ static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk) ...@@ -192,6 +180,7 @@ static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
return (struct inet_timewait_sock *)sk; return (struct inet_timewait_sock *)sk;
} }
void inet_twsk_free(struct inet_timewait_sock *tw);
void inet_twsk_put(struct inet_timewait_sock *tw); void inet_twsk_put(struct inet_timewait_sock *tw);
int inet_twsk_unhash(struct inet_timewait_sock *tw); int inet_twsk_unhash(struct inet_timewait_sock *tw);
......
...@@ -156,7 +156,7 @@ typedef __u64 __bitwise __addrpair; ...@@ -156,7 +156,7 @@ typedef __u64 __bitwise __addrpair;
*/ */
struct sock_common { struct sock_common {
/* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned /* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned
* address on 64bit arches : cf INET_MATCH() and INET_TW_MATCH() * address on 64bit arches : cf INET_MATCH()
*/ */
union { union {
__addrpair skc_addrpair; __addrpair skc_addrpair;
...@@ -301,6 +301,8 @@ struct sock { ...@@ -301,6 +301,8 @@ struct sock {
#define sk_dontcopy_end __sk_common.skc_dontcopy_end #define sk_dontcopy_end __sk_common.skc_dontcopy_end
#define sk_hash __sk_common.skc_hash #define sk_hash __sk_common.skc_hash
#define sk_portpair __sk_common.skc_portpair #define sk_portpair __sk_common.skc_portpair
#define sk_num __sk_common.skc_num
#define sk_dport __sk_common.skc_dport
#define sk_addrpair __sk_common.skc_addrpair #define sk_addrpair __sk_common.skc_addrpair
#define sk_daddr __sk_common.skc_daddr #define sk_daddr __sk_common.skc_daddr
#define sk_rcv_saddr __sk_common.skc_rcv_saddr #define sk_rcv_saddr __sk_common.skc_rcv_saddr
...@@ -1653,6 +1655,10 @@ static inline void sock_put(struct sock *sk) ...@@ -1653,6 +1655,10 @@ static inline void sock_put(struct sock *sk)
if (atomic_dec_and_test(&sk->sk_refcnt)) if (atomic_dec_and_test(&sk->sk_refcnt))
sk_free(sk); sk_free(sk);
} }
/* Generic version of sock_put(), dealing with all sockets
* (TCP_TIMEWAIT, ESTABLISHED...)
*/
void sock_gen_put(struct sock *sk);
int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested); int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested);
......
...@@ -1519,7 +1519,6 @@ enum tcp_seq_states { ...@@ -1519,7 +1519,6 @@ enum tcp_seq_states {
TCP_SEQ_STATE_LISTENING, TCP_SEQ_STATE_LISTENING,
TCP_SEQ_STATE_OPENREQ, TCP_SEQ_STATE_OPENREQ,
TCP_SEQ_STATE_ESTABLISHED, TCP_SEQ_STATE_ESTABLISHED,
TCP_SEQ_STATE_TIME_WAIT,
}; };
int tcp_seq_open(struct inode *inode, struct file *file); int tcp_seq_open(struct inode *inode, struct file *file);
......
...@@ -1158,10 +1158,8 @@ static int __init dccp_init(void) ...@@ -1158,10 +1158,8 @@ static int __init dccp_init(void)
goto out_free_bind_bucket_cachep; goto out_free_bind_bucket_cachep;
} }
for (i = 0; i <= dccp_hashinfo.ehash_mask; i++) { for (i = 0; i <= dccp_hashinfo.ehash_mask; i++)
INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i); INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].chain, i);
INIT_HLIST_NULLS_HEAD(&dccp_hashinfo.ehash[i].twchain, i);
}
if (inet_ehash_locks_alloc(&dccp_hashinfo)) if (inet_ehash_locks_alloc(&dccp_hashinfo))
goto out_free_dccp_ehash; goto out_free_dccp_ehash;
......
...@@ -635,12 +635,14 @@ static int inet_csk_diag_dump(struct sock *sk, ...@@ -635,12 +635,14 @@ static int inet_csk_diag_dump(struct sock *sk,
cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh); cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->nlh);
} }
static int inet_twsk_diag_dump(struct inet_timewait_sock *tw, static int inet_twsk_diag_dump(struct sock *sk,
struct sk_buff *skb, struct sk_buff *skb,
struct netlink_callback *cb, struct netlink_callback *cb,
struct inet_diag_req_v2 *r, struct inet_diag_req_v2 *r,
const struct nlattr *bc) const struct nlattr *bc)
{ {
struct inet_timewait_sock *tw = inet_twsk(sk);
if (bc != NULL) { if (bc != NULL) {
struct inet_diag_entry entry; struct inet_diag_entry entry;
...@@ -911,8 +913,7 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, ...@@ -911,8 +913,7 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
num = 0; num = 0;
if (hlist_nulls_empty(&head->chain) && if (hlist_nulls_empty(&head->chain))
hlist_nulls_empty(&head->twchain))
continue; continue;
if (i > s_i) if (i > s_i)
...@@ -920,7 +921,7 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, ...@@ -920,7 +921,7 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
spin_lock_bh(lock); spin_lock_bh(lock);
sk_nulls_for_each(sk, node, &head->chain) { sk_nulls_for_each(sk, node, &head->chain) {
struct inet_sock *inet = inet_sk(sk); int res;
if (!net_eq(sock_net(sk), net)) if (!net_eq(sock_net(sk), net))
continue; continue;
...@@ -929,15 +930,19 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, ...@@ -929,15 +930,19 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
if (!(r->idiag_states & (1 << sk->sk_state))) if (!(r->idiag_states & (1 << sk->sk_state)))
goto next_normal; goto next_normal;
if (r->sdiag_family != AF_UNSPEC && if (r->sdiag_family != AF_UNSPEC &&
sk->sk_family != r->sdiag_family) sk->sk_family != r->sdiag_family)
goto next_normal; goto next_normal;
if (r->id.idiag_sport != inet->inet_sport && if (r->id.idiag_sport != htons(sk->sk_num) &&
r->id.idiag_sport) r->id.idiag_sport)
goto next_normal; goto next_normal;
if (r->id.idiag_dport != inet->inet_dport && if (r->id.idiag_dport != sk->sk_dport &&
r->id.idiag_dport) r->id.idiag_dport)
goto next_normal; goto next_normal;
if (inet_csk_diag_dump(sk, skb, cb, r, bc) < 0) { if (sk->sk_state == TCP_TIME_WAIT)
res = inet_twsk_diag_dump(sk, skb, cb, r, bc);
else
res = inet_csk_diag_dump(sk, skb, cb, r, bc);
if (res < 0) {
spin_unlock_bh(lock); spin_unlock_bh(lock);
goto done; goto done;
} }
...@@ -945,33 +950,6 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb, ...@@ -945,33 +950,6 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
++num; ++num;
} }
if (r->idiag_states & TCPF_TIME_WAIT) {
struct inet_timewait_sock *tw;
inet_twsk_for_each(tw, node,
&head->twchain) {
if (!net_eq(twsk_net(tw), net))
continue;
if (num < s_num)
goto next_dying;
if (r->sdiag_family != AF_UNSPEC &&
tw->tw_family != r->sdiag_family)
goto next_dying;
if (r->id.idiag_sport != tw->tw_sport &&
r->id.idiag_sport)
goto next_dying;
if (r->id.idiag_dport != tw->tw_dport &&
r->id.idiag_dport)
goto next_dying;
if (inet_twsk_diag_dump(tw, skb, cb, r, bc) < 0) {
spin_unlock_bh(lock);
goto done;
}
next_dying:
++num;
}
}
spin_unlock_bh(lock); spin_unlock_bh(lock);
} }
......
...@@ -230,6 +230,19 @@ struct sock *__inet_lookup_listener(struct net *net, ...@@ -230,6 +230,19 @@ struct sock *__inet_lookup_listener(struct net *net,
} }
EXPORT_SYMBOL_GPL(__inet_lookup_listener); EXPORT_SYMBOL_GPL(__inet_lookup_listener);
/* All sockets share common refcount, but have different destructors */
void sock_gen_put(struct sock *sk)
{
if (!atomic_dec_and_test(&sk->sk_refcnt))
return;
if (sk->sk_state == TCP_TIME_WAIT)
inet_twsk_free(inet_twsk(sk));
else
sk_free(sk);
}
EXPORT_SYMBOL_GPL(sock_gen_put);
struct sock *__inet_lookup_established(struct net *net, struct sock *__inet_lookup_established(struct net *net,
struct inet_hashinfo *hashinfo, struct inet_hashinfo *hashinfo,
const __be32 saddr, const __be16 sport, const __be32 saddr, const __be16 sport,
...@@ -255,13 +268,13 @@ struct sock *__inet_lookup_established(struct net *net, ...@@ -255,13 +268,13 @@ struct sock *__inet_lookup_established(struct net *net,
if (likely(INET_MATCH(sk, net, acookie, if (likely(INET_MATCH(sk, net, acookie,
saddr, daddr, ports, dif))) { saddr, daddr, ports, dif))) {
if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
goto begintw; goto out;
if (unlikely(!INET_MATCH(sk, net, acookie, if (unlikely(!INET_MATCH(sk, net, acookie,
saddr, daddr, ports, dif))) { saddr, daddr, ports, dif))) {
sock_put(sk); sock_gen_put(sk);
goto begin; goto begin;
} }
goto out; goto found;
} }
} }
/* /*
...@@ -271,37 +284,9 @@ struct sock *__inet_lookup_established(struct net *net, ...@@ -271,37 +284,9 @@ struct sock *__inet_lookup_established(struct net *net,
*/ */
if (get_nulls_value(node) != slot) if (get_nulls_value(node) != slot)
goto begin; goto begin;
begintw:
/* Must check for a TIME_WAIT'er before going to listener hash. */
sk_nulls_for_each_rcu(sk, node, &head->twchain) {
if (sk->sk_hash != hash)
continue;
if (likely(INET_TW_MATCH(sk, net, acookie,
saddr, daddr, ports,
dif))) {
if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) {
sk = NULL;
goto out;
}
if (unlikely(!INET_TW_MATCH(sk, net, acookie,
saddr, daddr, ports,
dif))) {
inet_twsk_put(inet_twsk(sk));
goto begintw;
}
goto out;
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != slot)
goto begintw;
sk = NULL;
out: out:
sk = NULL;
found:
rcu_read_unlock(); rcu_read_unlock();
return sk; return sk;
} }
...@@ -326,39 +311,29 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row, ...@@ -326,39 +311,29 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
spinlock_t *lock = inet_ehash_lockp(hinfo, hash); spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
struct sock *sk2; struct sock *sk2;
const struct hlist_nulls_node *node; const struct hlist_nulls_node *node;
struct inet_timewait_sock *tw; struct inet_timewait_sock *tw = NULL;
int twrefcnt = 0; int twrefcnt = 0;
spin_lock(lock); spin_lock(lock);
/* Check TIME-WAIT sockets first. */
sk_nulls_for_each(sk2, node, &head->twchain) {
if (sk2->sk_hash != hash)
continue;
if (likely(INET_TW_MATCH(sk2, net, acookie,
saddr, daddr, ports, dif))) {
tw = inet_twsk(sk2);
if (twsk_unique(sk, sk2, twp))
goto unique;
else
goto not_unique;
}
}
tw = NULL;
/* And established part... */
sk_nulls_for_each(sk2, node, &head->chain) { sk_nulls_for_each(sk2, node, &head->chain) {
if (sk2->sk_hash != hash) if (sk2->sk_hash != hash)
continue; continue;
if (likely(INET_MATCH(sk2, net, acookie, if (likely(INET_MATCH(sk2, net, acookie,
saddr, daddr, ports, dif))) saddr, daddr, ports, dif))) {
if (sk2->sk_state == TCP_TIME_WAIT) {
tw = inet_twsk(sk2);
if (twsk_unique(sk, sk2, twp))
break;
}
goto not_unique; goto not_unique;
}
} }
unique:
/* Must record num and sport now. Otherwise we will see /* Must record num and sport now. Otherwise we will see
* in hash table socket with a funny identity. */ * in hash table socket with a funny identity.
*/
inet->inet_num = lport; inet->inet_num = lport;
inet->inet_sport = htons(lport); inet->inet_sport = htons(lport);
sk->sk_hash = hash; sk->sk_hash = hash;
......
...@@ -87,19 +87,11 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw, ...@@ -87,19 +87,11 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
refcnt += inet_twsk_bind_unhash(tw, hashinfo); refcnt += inet_twsk_bind_unhash(tw, hashinfo);
spin_unlock(&bhead->lock); spin_unlock(&bhead->lock);
#ifdef SOCK_REFCNT_DEBUG BUG_ON(refcnt >= atomic_read(&tw->tw_refcnt));
if (atomic_read(&tw->tw_refcnt) != 1) { atomic_sub(refcnt, &tw->tw_refcnt);
pr_debug("%s timewait_sock %p refcnt=%d\n",
tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
}
#endif
while (refcnt) {
inet_twsk_put(tw);
refcnt--;
}
} }
static noinline void inet_twsk_free(struct inet_timewait_sock *tw) void inet_twsk_free(struct inet_timewait_sock *tw)
{ {
struct module *owner = tw->tw_prot->owner; struct module *owner = tw->tw_prot->owner;
twsk_destructor((struct sock *)tw); twsk_destructor((struct sock *)tw);
...@@ -118,6 +110,18 @@ void inet_twsk_put(struct inet_timewait_sock *tw) ...@@ -118,6 +110,18 @@ void inet_twsk_put(struct inet_timewait_sock *tw)
} }
EXPORT_SYMBOL_GPL(inet_twsk_put); EXPORT_SYMBOL_GPL(inet_twsk_put);
static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
struct hlist_nulls_head *list)
{
hlist_nulls_add_head_rcu(&tw->tw_node, list);
}
static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
struct hlist_head *list)
{
hlist_add_head(&tw->tw_bind_node, list);
}
/* /*
* Enter the time wait state. This is called with locally disabled BH. * Enter the time wait state. This is called with locally disabled BH.
* Essentially we whip up a timewait bucket, copy the relevant info into it * Essentially we whip up a timewait bucket, copy the relevant info into it
...@@ -146,26 +150,21 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, ...@@ -146,26 +150,21 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
spin_lock(lock); spin_lock(lock);
/* /*
* Step 2: Hash TW into TIMEWAIT chain. * Step 2: Hash TW into tcp ehash chain.
* Should be done before removing sk from established chain * Notes :
* because readers are lockless and search established first. * - tw_refcnt is set to 3 because :
* - We have one reference from bhash chain.
* - We have one reference from ehash chain.
* We can use atomic_set() because prior spin_lock()/spin_unlock()
* committed into memory all tw fields.
*/ */
inet_twsk_add_node_rcu(tw, &ehead->twchain); atomic_set(&tw->tw_refcnt, 1 + 1 + 1);
inet_twsk_add_node_rcu(tw, &ehead->chain);
/* Step 3: Remove SK from established hash. */ /* Step 3: Remove SK from hash chain */
if (__sk_nulls_del_node_init_rcu(sk)) if (__sk_nulls_del_node_init_rcu(sk))
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
/*
* Notes :
* - We initially set tw_refcnt to 0 in inet_twsk_alloc()
* - We add one reference for the bhash link
* - We add one reference for the ehash link
* - We want this refcnt update done before allowing other
* threads to find this tw in ehash chain.
*/
atomic_add(1 + 1 + 1, &tw->tw_refcnt);
spin_unlock(lock); spin_unlock(lock);
} }
EXPORT_SYMBOL_GPL(__inet_twsk_hashdance); EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
...@@ -490,7 +489,9 @@ void inet_twsk_purge(struct inet_hashinfo *hashinfo, ...@@ -490,7 +489,9 @@ void inet_twsk_purge(struct inet_hashinfo *hashinfo,
restart_rcu: restart_rcu:
rcu_read_lock(); rcu_read_lock();
restart: restart:
sk_nulls_for_each_rcu(sk, node, &head->twchain) { sk_nulls_for_each_rcu(sk, node, &head->chain) {
if (sk->sk_state != TCP_TIME_WAIT)
continue;
tw = inet_twsk(sk); tw = inet_twsk(sk);
if ((tw->tw_family != family) || if ((tw->tw_family != family) ||
atomic_read(&twsk_net(tw)->count)) atomic_read(&twsk_net(tw)->count))
......
...@@ -3137,10 +3137,9 @@ void __init tcp_init(void) ...@@ -3137,10 +3137,9 @@ void __init tcp_init(void)
&tcp_hashinfo.ehash_mask, &tcp_hashinfo.ehash_mask,
0, 0,
thash_entries ? 0 : 512 * 1024); thash_entries ? 0 : 512 * 1024);
for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) { for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)
INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i);
}
if (inet_ehash_locks_alloc(&tcp_hashinfo)) if (inet_ehash_locks_alloc(&tcp_hashinfo))
panic("TCP: failed to alloc ehash_locks"); panic("TCP: failed to alloc ehash_locks");
tcp_hashinfo.bhash = tcp_hashinfo.bhash =
......
...@@ -2194,18 +2194,6 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock); ...@@ -2194,18 +2194,6 @@ EXPORT_SYMBOL(tcp_v4_destroy_sock);
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
/* Proc filesystem TCP sock list dumping. */ /* Proc filesystem TCP sock list dumping. */
static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
{
return hlist_nulls_empty(head) ? NULL :
list_entry(head->first, struct inet_timewait_sock, tw_node);
}
static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
{
return !is_a_nulls(tw->tw_node.next) ?
hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
}
/* /*
* Get next listener socket follow cur. If cur is NULL, get first socket * Get next listener socket follow cur. If cur is NULL, get first socket
* starting from bucket given in st->bucket; when st->bucket is zero the * starting from bucket given in st->bucket; when st->bucket is zero the
...@@ -2309,10 +2297,9 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos) ...@@ -2309,10 +2297,9 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
return rc; return rc;
} }
static inline bool empty_bucket(struct tcp_iter_state *st) static inline bool empty_bucket(const struct tcp_iter_state *st)
{ {
return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) && return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
} }
/* /*
...@@ -2329,7 +2316,6 @@ static void *established_get_first(struct seq_file *seq) ...@@ -2329,7 +2316,6 @@ static void *established_get_first(struct seq_file *seq)
for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
struct sock *sk; struct sock *sk;
struct hlist_nulls_node *node; struct hlist_nulls_node *node;
struct inet_timewait_sock *tw;
spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
/* Lockless fast path for the common case of empty buckets */ /* Lockless fast path for the common case of empty buckets */
...@@ -2345,18 +2331,7 @@ static void *established_get_first(struct seq_file *seq) ...@@ -2345,18 +2331,7 @@ static void *established_get_first(struct seq_file *seq)
rc = sk; rc = sk;
goto out; goto out;
} }
st->state = TCP_SEQ_STATE_TIME_WAIT;
inet_twsk_for_each(tw, node,
&tcp_hashinfo.ehash[st->bucket].twchain) {
if (tw->tw_family != st->family ||
!net_eq(twsk_net(tw), net)) {
continue;
}
rc = tw;
goto out;
}
spin_unlock_bh(lock); spin_unlock_bh(lock);
st->state = TCP_SEQ_STATE_ESTABLISHED;
} }
out: out:
return rc; return rc;
...@@ -2365,7 +2340,6 @@ static void *established_get_first(struct seq_file *seq) ...@@ -2365,7 +2340,6 @@ static void *established_get_first(struct seq_file *seq)
static void *established_get_next(struct seq_file *seq, void *cur) static void *established_get_next(struct seq_file *seq, void *cur)
{ {
struct sock *sk = cur; struct sock *sk = cur;
struct inet_timewait_sock *tw;
struct hlist_nulls_node *node; struct hlist_nulls_node *node;
struct tcp_iter_state *st = seq->private; struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq); struct net *net = seq_file_net(seq);
...@@ -2373,45 +2347,16 @@ static void *established_get_next(struct seq_file *seq, void *cur) ...@@ -2373,45 +2347,16 @@ static void *established_get_next(struct seq_file *seq, void *cur)
++st->num; ++st->num;
++st->offset; ++st->offset;
if (st->state == TCP_SEQ_STATE_TIME_WAIT) { sk = sk_nulls_next(sk);
tw = cur;
tw = tw_next(tw);
get_tw:
while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
tw = tw_next(tw);
}
if (tw) {
cur = tw;
goto out;
}
spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
st->state = TCP_SEQ_STATE_ESTABLISHED;
/* Look for next non empty bucket */
st->offset = 0;
while (++st->bucket <= tcp_hashinfo.ehash_mask &&
empty_bucket(st))
;
if (st->bucket > tcp_hashinfo.ehash_mask)
return NULL;
spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
} else
sk = sk_nulls_next(sk);
sk_nulls_for_each_from(sk, node) { sk_nulls_for_each_from(sk, node) {
if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
goto found; return sk;
} }
st->state = TCP_SEQ_STATE_TIME_WAIT; spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain); ++st->bucket;
goto get_tw; return established_get_first(seq);
found:
cur = sk;
out:
return cur;
} }
static void *established_get_idx(struct seq_file *seq, loff_t pos) static void *established_get_idx(struct seq_file *seq, loff_t pos)
...@@ -2464,10 +2409,9 @@ static void *tcp_seek_last_pos(struct seq_file *seq) ...@@ -2464,10 +2409,9 @@ static void *tcp_seek_last_pos(struct seq_file *seq)
if (rc) if (rc)
break; break;
st->bucket = 0; st->bucket = 0;
st->state = TCP_SEQ_STATE_ESTABLISHED;
/* Fallthrough */ /* Fallthrough */
case TCP_SEQ_STATE_ESTABLISHED: case TCP_SEQ_STATE_ESTABLISHED:
case TCP_SEQ_STATE_TIME_WAIT:
st->state = TCP_SEQ_STATE_ESTABLISHED;
if (st->bucket > tcp_hashinfo.ehash_mask) if (st->bucket > tcp_hashinfo.ehash_mask)
break; break;
rc = established_get_first(seq); rc = established_get_first(seq);
...@@ -2524,7 +2468,6 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) ...@@ -2524,7 +2468,6 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
} }
break; break;
case TCP_SEQ_STATE_ESTABLISHED: case TCP_SEQ_STATE_ESTABLISHED:
case TCP_SEQ_STATE_TIME_WAIT:
rc = established_get_next(seq, v); rc = established_get_next(seq, v);
break; break;
} }
...@@ -2548,7 +2491,6 @@ static void tcp_seq_stop(struct seq_file *seq, void *v) ...@@ -2548,7 +2491,6 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
if (v != SEQ_START_TOKEN) if (v != SEQ_START_TOKEN)
spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock); spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
break; break;
case TCP_SEQ_STATE_TIME_WAIT:
case TCP_SEQ_STATE_ESTABLISHED: case TCP_SEQ_STATE_ESTABLISHED:
if (v) if (v)
spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
...@@ -2707,6 +2649,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw, ...@@ -2707,6 +2649,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
static int tcp4_seq_show(struct seq_file *seq, void *v) static int tcp4_seq_show(struct seq_file *seq, void *v)
{ {
struct tcp_iter_state *st; struct tcp_iter_state *st;
struct sock *sk = v;
int len; int len;
if (v == SEQ_START_TOKEN) { if (v == SEQ_START_TOKEN) {
...@@ -2721,14 +2664,14 @@ static int tcp4_seq_show(struct seq_file *seq, void *v) ...@@ -2721,14 +2664,14 @@ static int tcp4_seq_show(struct seq_file *seq, void *v)
switch (st->state) { switch (st->state) {
case TCP_SEQ_STATE_LISTENING: case TCP_SEQ_STATE_LISTENING:
case TCP_SEQ_STATE_ESTABLISHED: case TCP_SEQ_STATE_ESTABLISHED:
get_tcp4_sock(v, seq, st->num, &len); if (sk->sk_state == TCP_TIME_WAIT)
get_timewait4_sock(v, seq, st->num, &len);
else
get_tcp4_sock(v, seq, st->num, &len);
break; break;
case TCP_SEQ_STATE_OPENREQ: case TCP_SEQ_STATE_OPENREQ:
get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len); get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
break; break;
case TCP_SEQ_STATE_TIME_WAIT:
get_timewait4_sock(v, seq, st->num, &len);
break;
} }
seq_printf(seq, "%*s\n", TMPSZ - 1 - len, ""); seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
out: out:
......
...@@ -89,43 +89,36 @@ struct sock *__inet6_lookup_established(struct net *net, ...@@ -89,43 +89,36 @@ struct sock *__inet6_lookup_established(struct net *net,
sk_nulls_for_each_rcu(sk, node, &head->chain) { sk_nulls_for_each_rcu(sk, node, &head->chain) {
if (sk->sk_hash != hash) if (sk->sk_hash != hash)
continue; continue;
if (likely(INET6_MATCH(sk, net, saddr, daddr, ports, dif))) { if (sk->sk_state == TCP_TIME_WAIT) {
if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) if (!INET6_TW_MATCH(sk, net, saddr, daddr, ports, dif))
goto begintw; continue;
} else {
if (!INET6_MATCH(sk, net, saddr, daddr, ports, dif))
continue;
}
if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
goto out;
if (sk->sk_state == TCP_TIME_WAIT) {
if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr,
ports, dif))) {
sock_gen_put(sk);
goto begin;
}
} else {
if (unlikely(!INET6_MATCH(sk, net, saddr, daddr, if (unlikely(!INET6_MATCH(sk, net, saddr, daddr,
ports, dif))) { ports, dif))) {
sock_put(sk); sock_put(sk);
goto begin; goto begin;
} }
goto out; goto found;
} }
} }
if (get_nulls_value(node) != slot) if (get_nulls_value(node) != slot)
goto begin; goto begin;
begintw:
/* Must check for a TIME_WAIT'er before going to listener hash. */
sk_nulls_for_each_rcu(sk, node, &head->twchain) {
if (sk->sk_hash != hash)
continue;
if (likely(INET6_TW_MATCH(sk, net, saddr, daddr,
ports, dif))) {
if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) {
sk = NULL;
goto out;
}
if (unlikely(!INET6_TW_MATCH(sk, net, saddr, daddr,
ports, dif))) {
inet_twsk_put(inet_twsk(sk));
goto begintw;
}
goto out;
}
}
if (get_nulls_value(node) != slot)
goto begintw;
sk = NULL;
out: out:
sk = NULL;
found:
rcu_read_unlock(); rcu_read_unlock();
return sk; return sk;
} }
...@@ -248,31 +241,25 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row, ...@@ -248,31 +241,25 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
spinlock_t *lock = inet_ehash_lockp(hinfo, hash); spinlock_t *lock = inet_ehash_lockp(hinfo, hash);
struct sock *sk2; struct sock *sk2;
const struct hlist_nulls_node *node; const struct hlist_nulls_node *node;
struct inet_timewait_sock *tw; struct inet_timewait_sock *tw = NULL;
int twrefcnt = 0; int twrefcnt = 0;
spin_lock(lock); spin_lock(lock);
/* Check TIME-WAIT sockets first. */ sk_nulls_for_each(sk2, node, &head->chain) {
sk_nulls_for_each(sk2, node, &head->twchain) {
if (sk2->sk_hash != hash) if (sk2->sk_hash != hash)
continue; continue;
if (likely(INET6_TW_MATCH(sk2, net, saddr, daddr, if (sk2->sk_state == TCP_TIME_WAIT) {
ports, dif))) { if (likely(INET6_TW_MATCH(sk2, net, saddr, daddr,
tw = inet_twsk(sk2); ports, dif))) {
if (twsk_unique(sk, sk2, twp)) tw = inet_twsk(sk2);
goto unique; if (twsk_unique(sk, sk2, twp))
else goto unique;
goto not_unique; else
goto not_unique;
}
} }
}
tw = NULL;
/* And established part... */
sk_nulls_for_each(sk2, node, &head->chain) {
if (sk2->sk_hash != hash)
continue;
if (likely(INET6_MATCH(sk2, net, saddr, daddr, ports, dif))) if (likely(INET6_MATCH(sk2, net, saddr, daddr, ports, dif)))
goto not_unique; goto not_unique;
} }
......
...@@ -1834,6 +1834,7 @@ static void get_timewait6_sock(struct seq_file *seq, ...@@ -1834,6 +1834,7 @@ static void get_timewait6_sock(struct seq_file *seq,
static int tcp6_seq_show(struct seq_file *seq, void *v) static int tcp6_seq_show(struct seq_file *seq, void *v)
{ {
struct tcp_iter_state *st; struct tcp_iter_state *st;
struct sock *sk = v;
if (v == SEQ_START_TOKEN) { if (v == SEQ_START_TOKEN) {
seq_puts(seq, seq_puts(seq,
...@@ -1849,14 +1850,14 @@ static int tcp6_seq_show(struct seq_file *seq, void *v) ...@@ -1849,14 +1850,14 @@ static int tcp6_seq_show(struct seq_file *seq, void *v)
switch (st->state) { switch (st->state) {
case TCP_SEQ_STATE_LISTENING: case TCP_SEQ_STATE_LISTENING:
case TCP_SEQ_STATE_ESTABLISHED: case TCP_SEQ_STATE_ESTABLISHED:
get_tcp6_sock(seq, v, st->num); if (sk->sk_state == TCP_TIME_WAIT)
get_timewait6_sock(seq, v, st->num);
else
get_tcp6_sock(seq, v, st->num);
break; break;
case TCP_SEQ_STATE_OPENREQ: case TCP_SEQ_STATE_OPENREQ:
get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid); get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
break; break;
case TCP_SEQ_STATE_TIME_WAIT:
get_timewait6_sock(seq, v, st->num);
break;
} }
out: out:
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment