Commit 1397af5b authored by Florian Westphal's avatar Florian Westphal Committed by Pablo Neira Ayuso

netfilter: conntrack: remove the percpu dying list

Its no longer needed. Entries that need event redelivery are placed
on the new pernet dying list.

The advantage is that there is no need to take additional spinlock on
conntrack removal unless event redelivery failed or the conntrack entry
was never added to the table in the first place (confirmed bit not set).

The IPS_CONFIRMED bit now needs to be set as soon as the entry has been
unlinked from the unconfirmed list, else the destroy function may
attempt to unlink it a second time.
Signed-off-by: default avatarFlorian Westphal <fw@strlen.de>
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent 0d3cc504
...@@ -96,7 +96,6 @@ struct nf_ip_net { ...@@ -96,7 +96,6 @@ struct nf_ip_net {
struct ct_pcpu { struct ct_pcpu {
spinlock_t lock; spinlock_t lock;
struct hlist_nulls_head unconfirmed; struct hlist_nulls_head unconfirmed;
struct hlist_nulls_head dying;
}; };
struct netns_ct { struct netns_ct {
......
...@@ -525,21 +525,6 @@ clean_from_lists(struct nf_conn *ct) ...@@ -525,21 +525,6 @@ clean_from_lists(struct nf_conn *ct)
nf_ct_remove_expectations(ct); nf_ct_remove_expectations(ct);
} }
/* must be called with local_bh_disable */
static void nf_ct_add_to_dying_list(struct nf_conn *ct)
{
struct ct_pcpu *pcpu;
/* add this conntrack to the (per cpu) dying list */
ct->cpu = smp_processor_id();
pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
spin_lock(&pcpu->lock);
hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
&pcpu->dying);
spin_unlock(&pcpu->lock);
}
/* must be called with local_bh_disable */ /* must be called with local_bh_disable */
static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct) static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
{ {
...@@ -556,11 +541,11 @@ static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct) ...@@ -556,11 +541,11 @@ static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct)
} }
/* must be called with local_bh_disable */ /* must be called with local_bh_disable */
static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct) static void nf_ct_del_from_unconfirmed_list(struct nf_conn *ct)
{ {
struct ct_pcpu *pcpu; struct ct_pcpu *pcpu;
/* We overload first tuple to link into unconfirmed or dying list.*/ /* We overload first tuple to link into unconfirmed list.*/
pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu);
spin_lock(&pcpu->lock); spin_lock(&pcpu->lock);
...@@ -648,7 +633,8 @@ void nf_ct_destroy(struct nf_conntrack *nfct) ...@@ -648,7 +633,8 @@ void nf_ct_destroy(struct nf_conntrack *nfct)
*/ */
nf_ct_remove_expectations(ct); nf_ct_remove_expectations(ct);
nf_ct_del_from_dying_or_unconfirmed_list(ct); if (unlikely(!nf_ct_is_confirmed(ct)))
nf_ct_del_from_unconfirmed_list(ct);
local_bh_enable(); local_bh_enable();
...@@ -686,7 +672,6 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct) ...@@ -686,7 +672,6 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
local_bh_disable(); local_bh_disable();
__nf_ct_delete_from_lists(ct); __nf_ct_delete_from_lists(ct);
nf_ct_add_to_dying_list(ct);
local_bh_enable(); local_bh_enable();
} }
...@@ -700,8 +685,6 @@ static void nf_ct_add_to_ecache_list(struct nf_conn *ct) ...@@ -700,8 +685,6 @@ static void nf_ct_add_to_ecache_list(struct nf_conn *ct)
hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
&cnet->ecache.dying_list); &cnet->ecache.dying_list);
spin_unlock(&cnet->ecache.dying_lock); spin_unlock(&cnet->ecache.dying_lock);
#else
nf_ct_add_to_dying_list(ct);
#endif #endif
} }
...@@ -995,7 +978,6 @@ static void __nf_conntrack_insert_prepare(struct nf_conn *ct) ...@@ -995,7 +978,6 @@ static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
struct nf_conn_tstamp *tstamp; struct nf_conn_tstamp *tstamp;
refcount_inc(&ct->ct_general.use); refcount_inc(&ct->ct_general.use);
ct->status |= IPS_CONFIRMED;
/* set conntrack timestamp, if enabled. */ /* set conntrack timestamp, if enabled. */
tstamp = nf_conn_tstamp_find(ct); tstamp = nf_conn_tstamp_find(ct);
...@@ -1024,7 +1006,6 @@ static int __nf_ct_resolve_clash(struct sk_buff *skb, ...@@ -1024,7 +1006,6 @@ static int __nf_ct_resolve_clash(struct sk_buff *skb,
nf_conntrack_get(&ct->ct_general); nf_conntrack_get(&ct->ct_general);
nf_ct_acct_merge(ct, ctinfo, loser_ct); nf_ct_acct_merge(ct, ctinfo, loser_ct);
nf_ct_add_to_dying_list(loser_ct);
nf_ct_put(loser_ct); nf_ct_put(loser_ct);
nf_ct_set(skb, ct, ctinfo); nf_ct_set(skb, ct, ctinfo);
...@@ -1157,7 +1138,6 @@ nf_ct_resolve_clash(struct sk_buff *skb, struct nf_conntrack_tuple_hash *h, ...@@ -1157,7 +1138,6 @@ nf_ct_resolve_clash(struct sk_buff *skb, struct nf_conntrack_tuple_hash *h,
return ret; return ret;
drop: drop:
nf_ct_add_to_dying_list(loser_ct);
NF_CT_STAT_INC(net, drop); NF_CT_STAT_INC(net, drop);
NF_CT_STAT_INC(net, insert_failed); NF_CT_STAT_INC(net, insert_failed);
return NF_DROP; return NF_DROP;
...@@ -1224,10 +1204,10 @@ __nf_conntrack_confirm(struct sk_buff *skb) ...@@ -1224,10 +1204,10 @@ __nf_conntrack_confirm(struct sk_buff *skb)
* user context, else we insert an already 'dead' hash, blocking * user context, else we insert an already 'dead' hash, blocking
* further use of that particular connection -JM. * further use of that particular connection -JM.
*/ */
nf_ct_del_from_dying_or_unconfirmed_list(ct); nf_ct_del_from_unconfirmed_list(ct);
ct->status |= IPS_CONFIRMED;
if (unlikely(nf_ct_is_dying(ct))) { if (unlikely(nf_ct_is_dying(ct))) {
nf_ct_add_to_dying_list(ct);
NF_CT_STAT_INC(net, insert_failed); NF_CT_STAT_INC(net, insert_failed);
goto dying; goto dying;
} }
...@@ -1251,7 +1231,6 @@ __nf_conntrack_confirm(struct sk_buff *skb) ...@@ -1251,7 +1231,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
goto out; goto out;
if (chainlen++ > max_chainlen) { if (chainlen++ > max_chainlen) {
chaintoolong: chaintoolong:
nf_ct_add_to_dying_list(ct);
NF_CT_STAT_INC(net, chaintoolong); NF_CT_STAT_INC(net, chaintoolong);
NF_CT_STAT_INC(net, insert_failed); NF_CT_STAT_INC(net, insert_failed);
ret = NF_DROP; ret = NF_DROP;
...@@ -2800,7 +2779,6 @@ void nf_conntrack_init_end(void) ...@@ -2800,7 +2779,6 @@ void nf_conntrack_init_end(void)
* We need to use special "null" values, not used in hash table * We need to use special "null" values, not used in hash table
*/ */
#define UNCONFIRMED_NULLS_VAL ((1<<30)+0) #define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
#define DYING_NULLS_VAL ((1<<30)+1)
int nf_conntrack_init_net(struct net *net) int nf_conntrack_init_net(struct net *net)
{ {
...@@ -2821,7 +2799,6 @@ int nf_conntrack_init_net(struct net *net) ...@@ -2821,7 +2799,6 @@ int nf_conntrack_init_net(struct net *net)
spin_lock_init(&pcpu->lock); spin_lock_init(&pcpu->lock);
INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL); INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
} }
net->ct.stat = alloc_percpu(struct ip_conntrack_stat); net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
......
...@@ -94,7 +94,6 @@ static enum retry_state ecache_work_evict_list(struct nf_conntrack_net *cnet) ...@@ -94,7 +94,6 @@ static enum retry_state ecache_work_evict_list(struct nf_conntrack_net *cnet)
hlist_nulls_for_each_entry_safe(h, n, &evicted_list, hnnode) { hlist_nulls_for_each_entry_safe(h, n, &evicted_list, hnnode) {
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
hlist_nulls_add_fake(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode);
nf_ct_put(ct); nf_ct_put(ct);
......
...@@ -62,7 +62,6 @@ struct ctnetlink_list_dump_ctx { ...@@ -62,7 +62,6 @@ struct ctnetlink_list_dump_ctx {
struct nf_conn *last; struct nf_conn *last;
unsigned int cpu; unsigned int cpu;
bool done; bool done;
bool retrans_done;
}; };
static int ctnetlink_dump_tuples_proto(struct sk_buff *skb, static int ctnetlink_dump_tuples_proto(struct sk_buff *skb,
...@@ -1751,13 +1750,12 @@ static int ctnetlink_dump_one_entry(struct sk_buff *skb, ...@@ -1751,13 +1750,12 @@ static int ctnetlink_dump_one_entry(struct sk_buff *skb,
} }
static int static int
ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying) ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
{ {
struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx; struct ctnetlink_list_dump_ctx *ctx = (void *)cb->ctx;
struct nf_conn *ct, *last; struct nf_conn *ct, *last;
struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n; struct hlist_nulls_node *n;
struct hlist_nulls_head *list;
struct net *net = sock_net(skb->sk); struct net *net = sock_net(skb->sk);
int res, cpu; int res, cpu;
...@@ -1774,12 +1772,11 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying ...@@ -1774,12 +1772,11 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
spin_lock_bh(&pcpu->lock); spin_lock_bh(&pcpu->lock);
list = dying ? &pcpu->dying : &pcpu->unconfirmed;
restart: restart:
hlist_nulls_for_each_entry(h, n, list, hnnode) { hlist_nulls_for_each_entry(h, n, &pcpu->unconfirmed, hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h); ct = nf_ct_tuplehash_to_ctrack(h);
res = ctnetlink_dump_one_entry(skb, cb, ct, dying); res = ctnetlink_dump_one_entry(skb, cb, ct, false);
if (res < 0) { if (res < 0) {
ctx->cpu = cpu; ctx->cpu = cpu;
spin_unlock_bh(&pcpu->lock); spin_unlock_bh(&pcpu->lock);
...@@ -1812,8 +1809,8 @@ ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -1812,8 +1809,8 @@ ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
struct hlist_nulls_node *n; struct hlist_nulls_node *n;
#endif #endif
if (ctx->retrans_done) if (ctx->done)
return ctnetlink_dump_list(skb, cb, true); return 0;
ctx->last = NULL; ctx->last = NULL;
...@@ -1842,10 +1839,10 @@ ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -1842,10 +1839,10 @@ ctnetlink_dump_dying(struct sk_buff *skb, struct netlink_callback *cb)
spin_unlock_bh(&ecache_net->dying_lock); spin_unlock_bh(&ecache_net->dying_lock);
#endif #endif
ctx->done = true;
nf_ct_put(last); nf_ct_put(last);
ctx->retrans_done = true;
return ctnetlink_dump_list(skb, cb, true); return skb->len;
} }
static int ctnetlink_get_ct_dying(struct sk_buff *skb, static int ctnetlink_get_ct_dying(struct sk_buff *skb,
...@@ -1863,12 +1860,6 @@ static int ctnetlink_get_ct_dying(struct sk_buff *skb, ...@@ -1863,12 +1860,6 @@ static int ctnetlink_get_ct_dying(struct sk_buff *skb,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static int
ctnetlink_dump_unconfirmed(struct sk_buff *skb, struct netlink_callback *cb)
{
return ctnetlink_dump_list(skb, cb, false);
}
static int ctnetlink_get_ct_unconfirmed(struct sk_buff *skb, static int ctnetlink_get_ct_unconfirmed(struct sk_buff *skb,
const struct nfnl_info *info, const struct nfnl_info *info,
const struct nlattr * const cda[]) const struct nlattr * const cda[])
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment