Commit 76eb9460 authored by Patrick McHardy's avatar Patrick McHardy Committed by David S. Miller

[NETFILTER]: nf_conntrack: don't inline early_drop()

early_drop() is only called *very* rarely, unfortunately gcc inlines it
into the hotpath because there is only a single caller. Explicitly mark
it noinline.
Signed-off-by: default avatarPatrick McHardy <kaber@trash.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ffaa9c10
...@@ -420,7 +420,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken); ...@@ -420,7 +420,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_tuple_taken);
/* There's a small race here where we may free a just-assured /* There's a small race here where we may free a just-assured
connection. Too bad: we're in trouble anyway. */ connection. Too bad: we're in trouble anyway. */
static int early_drop(unsigned int hash) static noinline int early_drop(unsigned int hash)
{ {
/* Use oldest entry, which is roughly LRU */ /* Use oldest entry, which is roughly LRU */
struct nf_conntrack_tuple_hash *h; struct nf_conntrack_tuple_hash *h;
...@@ -472,8 +472,8 @@ struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig, ...@@ -472,8 +472,8 @@ struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
/* We don't want any race condition at early drop stage */ /* We don't want any race condition at early drop stage */
atomic_inc(&nf_conntrack_count); atomic_inc(&nf_conntrack_count);
if (nf_conntrack_max if (nf_conntrack_max &&
&& atomic_read(&nf_conntrack_count) > nf_conntrack_max) { unlikely(atomic_read(&nf_conntrack_count) > nf_conntrack_max)) {
unsigned int hash = hash_conntrack(orig); unsigned int hash = hash_conntrack(orig);
if (!early_drop(hash)) { if (!early_drop(hash)) {
atomic_dec(&nf_conntrack_count); atomic_dec(&nf_conntrack_count);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment