Commit 4c4d11b9 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso says:

====================
Netfilter fixes for net

The following patchset contains two Netfilter fixes for your net tree,
they are:

1) Fix NAt compilation with UP, from Geert Uytterhoeven.

2) Fix incorrect number of entries when dumping a set, from
   Vishwanath Pai.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 02388bf8 7f4f7dd4
...@@ -1041,12 +1041,24 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext, ...@@ -1041,12 +1041,24 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
static int static int
mtype_head(struct ip_set *set, struct sk_buff *skb) mtype_head(struct ip_set *set, struct sk_buff *skb)
{ {
const struct htype *h = set->data; struct htype *h = set->data;
const struct htable *t; const struct htable *t;
struct nlattr *nested; struct nlattr *nested;
size_t memsize; size_t memsize;
u8 htable_bits; u8 htable_bits;
/* If any members have expired, set->elements will be wrong
* mytype_expire function will update it with the right count.
* we do not hold set->lock here, so grab it first.
* set->elements can still be incorrect in the case of a huge set,
* because elements might time out during the listing.
*/
if (SET_WITH_TIMEOUT(set)) {
spin_lock_bh(&set->lock);
mtype_expire(set, h);
spin_unlock_bh(&set->lock);
}
rcu_read_lock_bh(); rcu_read_lock_bh();
t = rcu_dereference_bh_nfnl(h->table); t = rcu_dereference_bh_nfnl(h->table);
memsize = mtype_ahash_memsize(h, t) + set->ext_size; memsize = mtype_ahash_memsize(h, t) + set->ext_size;
......
...@@ -429,7 +429,7 @@ nf_nat_setup_info(struct nf_conn *ct, ...@@ -429,7 +429,7 @@ nf_nat_setup_info(struct nf_conn *ct,
srchash = hash_by_src(net, srchash = hash_by_src(net,
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
lock = &nf_nat_locks[srchash % ARRAY_SIZE(nf_nat_locks)]; lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];
spin_lock_bh(lock); spin_lock_bh(lock);
hlist_add_head_rcu(&ct->nat_bysource, hlist_add_head_rcu(&ct->nat_bysource,
&nf_nat_bysource[srchash]); &nf_nat_bysource[srchash]);
...@@ -532,9 +532,9 @@ static void __nf_nat_cleanup_conntrack(struct nf_conn *ct) ...@@ -532,9 +532,9 @@ static void __nf_nat_cleanup_conntrack(struct nf_conn *ct)
unsigned int h; unsigned int h;
h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
spin_lock_bh(&nf_nat_locks[h % ARRAY_SIZE(nf_nat_locks)]); spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
hlist_del_rcu(&ct->nat_bysource); hlist_del_rcu(&ct->nat_bysource);
spin_unlock_bh(&nf_nat_locks[h % ARRAY_SIZE(nf_nat_locks)]); spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
} }
static int nf_nat_proto_clean(struct nf_conn *ct, void *data) static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
...@@ -807,8 +807,8 @@ static int __init nf_nat_init(void) ...@@ -807,8 +807,8 @@ static int __init nf_nat_init(void)
/* Leave them the same for the moment. */ /* Leave them the same for the moment. */
nf_nat_htable_size = nf_conntrack_htable_size; nf_nat_htable_size = nf_conntrack_htable_size;
if (nf_nat_htable_size < ARRAY_SIZE(nf_nat_locks)) if (nf_nat_htable_size < CONNTRACK_LOCKS)
nf_nat_htable_size = ARRAY_SIZE(nf_nat_locks); nf_nat_htable_size = CONNTRACK_LOCKS;
nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0); nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
if (!nf_nat_bysource) if (!nf_nat_bysource)
...@@ -821,7 +821,7 @@ static int __init nf_nat_init(void) ...@@ -821,7 +821,7 @@ static int __init nf_nat_init(void)
return ret; return ret;
} }
for (i = 0; i < ARRAY_SIZE(nf_nat_locks); i++) for (i = 0; i < CONNTRACK_LOCKS; i++)
spin_lock_init(&nf_nat_locks[i]); spin_lock_init(&nf_nat_locks[i]);
nf_ct_helper_expectfn_register(&follow_master_nat); nf_ct_helper_expectfn_register(&follow_master_nat);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment