Commit 5251e2d2 authored by Pablo Neira Ayuso's avatar Pablo Neira Ayuso Committed by David S. Miller

[NETFILTER]: conntrack: fix race condition in early_drop

On SMP environments the maximum number of conntracks can be overpassed
under heavy stress situations due to an existing race condition.

        CPU A                   CPU B
     atomic_read()               ...
     early_drop()                ...
        ...                  atomic_read()
   allocate conntrack      allocate conntrack
     atomic_inc()             atomic_inc()

This patch moves the counter incrementation before the early drop stage.
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: default avatarPatrick McHardy <kaber@trash.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 01f34848
...@@ -622,11 +622,15 @@ struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig, ...@@ -622,11 +622,15 @@ struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig,
ip_conntrack_hash_rnd_initted = 1; ip_conntrack_hash_rnd_initted = 1;
} }
/* We don't want any race condition at early drop stage */
atomic_inc(&ip_conntrack_count);
if (ip_conntrack_max if (ip_conntrack_max
&& atomic_read(&ip_conntrack_count) >= ip_conntrack_max) { && atomic_read(&ip_conntrack_count) > ip_conntrack_max) {
unsigned int hash = hash_conntrack(orig); unsigned int hash = hash_conntrack(orig);
/* Try dropping from this hash chain. */ /* Try dropping from this hash chain. */
if (!early_drop(&ip_conntrack_hash[hash])) { if (!early_drop(&ip_conntrack_hash[hash])) {
atomic_dec(&ip_conntrack_count);
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_WARNING printk(KERN_WARNING
"ip_conntrack: table full, dropping" "ip_conntrack: table full, dropping"
...@@ -638,6 +642,7 @@ struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig, ...@@ -638,6 +642,7 @@ struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig,
conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC); conntrack = kmem_cache_alloc(ip_conntrack_cachep, GFP_ATOMIC);
if (!conntrack) { if (!conntrack) {
DEBUGP("Can't allocate conntrack.\n"); DEBUGP("Can't allocate conntrack.\n");
atomic_dec(&ip_conntrack_count);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
...@@ -651,8 +656,6 @@ struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig, ...@@ -651,8 +656,6 @@ struct ip_conntrack *ip_conntrack_alloc(struct ip_conntrack_tuple *orig,
conntrack->timeout.data = (unsigned long)conntrack; conntrack->timeout.data = (unsigned long)conntrack;
conntrack->timeout.function = death_by_timeout; conntrack->timeout.function = death_by_timeout;
atomic_inc(&ip_conntrack_count);
return conntrack; return conntrack;
} }
......
...@@ -848,11 +848,15 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, ...@@ -848,11 +848,15 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
nf_conntrack_hash_rnd_initted = 1; nf_conntrack_hash_rnd_initted = 1;
} }
/* We don't want any race condition at early drop stage */
atomic_inc(&nf_conntrack_count);
if (nf_conntrack_max if (nf_conntrack_max
&& atomic_read(&nf_conntrack_count) >= nf_conntrack_max) { && atomic_read(&nf_conntrack_count) > nf_conntrack_max) {
unsigned int hash = hash_conntrack(orig); unsigned int hash = hash_conntrack(orig);
/* Try dropping from this hash chain. */ /* Try dropping from this hash chain. */
if (!early_drop(&nf_conntrack_hash[hash])) { if (!early_drop(&nf_conntrack_hash[hash])) {
atomic_dec(&nf_conntrack_count);
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_WARNING printk(KERN_WARNING
"nf_conntrack: table full, dropping" "nf_conntrack: table full, dropping"
...@@ -903,10 +907,12 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, ...@@ -903,10 +907,12 @@ __nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
init_timer(&conntrack->timeout); init_timer(&conntrack->timeout);
conntrack->timeout.data = (unsigned long)conntrack; conntrack->timeout.data = (unsigned long)conntrack;
conntrack->timeout.function = death_by_timeout; conntrack->timeout.function = death_by_timeout;
read_unlock_bh(&nf_ct_cache_lock);
atomic_inc(&nf_conntrack_count); return conntrack;
out: out:
read_unlock_bh(&nf_ct_cache_lock); read_unlock_bh(&nf_ct_cache_lock);
atomic_dec(&nf_conntrack_count);
return conntrack; return conntrack;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment