Commit 4e29e9ec authored by Patrick McHardy's avatar Patrick McHardy Committed by David S. Miller

[NETFILTER]: nf_conntrack: fix smp_processor_id() in preemptible code warning

Since we're using RCU for the conntrack hash now, we need to avoid
getting preempted or interrupted by BHs while changing the stats.

Fixes warning reported by Tilman Schmidt <tilman@imap.cc> when using
preemptible RCU:

[   48.180297] BUG: using smp_processor_id() in preemptible [00000000] code: ntpdate/3562
[   48.180297] caller is __nf_conntrack_find+0x9b/0xeb [nf_conntrack]
[   48.180297] Pid: 3562, comm: ntpdate Not tainted 2.6.25-rc2-mm1-testing #1
[   48.180297]  [<c02015b9>] debug_smp_processor_id+0x99/0xb0
[   48.180297]  [<fac643a7>] __nf_conntrack_find+0x9b/0xeb [nf_conntrack]
Tested-by: default avatarTilman Schmidt <tilman@imap.cc>
Tested-by: Christian Casteyde <casteyde.christian@free.fr> [Bugzilla #10097]
Signed-off-by: default avatarPatrick McHardy <kaber@trash.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3bdfe7ec
...@@ -256,13 +256,19 @@ __nf_conntrack_find(const struct nf_conntrack_tuple *tuple) ...@@ -256,13 +256,19 @@ __nf_conntrack_find(const struct nf_conntrack_tuple *tuple)
struct hlist_node *n; struct hlist_node *n;
unsigned int hash = hash_conntrack(tuple); unsigned int hash = hash_conntrack(tuple);
/* Disable BHs the entire time since we normally need to disable them
* at least once for the stats anyway.
*/
local_bh_disable();
hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) { hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) {
if (nf_ct_tuple_equal(tuple, &h->tuple)) { if (nf_ct_tuple_equal(tuple, &h->tuple)) {
NF_CT_STAT_INC(found); NF_CT_STAT_INC(found);
local_bh_enable();
return h; return h;
} }
NF_CT_STAT_INC(searched); NF_CT_STAT_INC(searched);
} }
local_bh_enable();
return NULL; return NULL;
} }
...@@ -400,17 +406,20 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, ...@@ -400,17 +406,20 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
struct hlist_node *n; struct hlist_node *n;
unsigned int hash = hash_conntrack(tuple); unsigned int hash = hash_conntrack(tuple);
rcu_read_lock(); /* Disable BHs the entire time since we need to disable them at
* least once for the stats anyway.
*/
rcu_read_lock_bh();
hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) { hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) {
if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack &&
nf_ct_tuple_equal(tuple, &h->tuple)) { nf_ct_tuple_equal(tuple, &h->tuple)) {
NF_CT_STAT_INC(found); NF_CT_STAT_INC(found);
rcu_read_unlock(); rcu_read_unlock_bh();
return 1; return 1;
} }
NF_CT_STAT_INC(searched); NF_CT_STAT_INC(searched);
} }
rcu_read_unlock(); rcu_read_unlock_bh();
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment