Commit fabf3a85 authored by Eric Dumazet's avatar Eric Dumazet Committed by Patrick McHardy

netfilter: xt_statistic: remove nth_lock spinlock

Use atomic_cmpxchg() to avoid dirtying a shared location.

xt_statistic_priv smp aligned to avoid sharing same cache line with
other stuff.
Signed-off-by: default avatarEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: default avatarPatrick McHardy <kaber@trash.net>
parent a8b56389
...@@ -18,8 +18,8 @@ ...@@ -18,8 +18,8 @@
#include <linux/netfilter/x_tables.h> #include <linux/netfilter/x_tables.h>
struct xt_statistic_priv { struct xt_statistic_priv {
uint32_t count; atomic_t count;
}; } ____cacheline_aligned_in_smp;
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
...@@ -27,13 +27,12 @@ MODULE_DESCRIPTION("Xtables: statistics-based matching (\"Nth\", random)"); ...@@ -27,13 +27,12 @@ MODULE_DESCRIPTION("Xtables: statistics-based matching (\"Nth\", random)");
MODULE_ALIAS("ipt_statistic"); MODULE_ALIAS("ipt_statistic");
MODULE_ALIAS("ip6t_statistic"); MODULE_ALIAS("ip6t_statistic");
static DEFINE_SPINLOCK(nth_lock);
static bool static bool
statistic_mt(const struct sk_buff *skb, struct xt_action_param *par) statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
{ {
const struct xt_statistic_info *info = par->matchinfo; const struct xt_statistic_info *info = par->matchinfo;
bool ret = info->flags & XT_STATISTIC_INVERT; bool ret = info->flags & XT_STATISTIC_INVERT;
int nval, oval;
switch (info->mode) { switch (info->mode) {
case XT_STATISTIC_MODE_RANDOM: case XT_STATISTIC_MODE_RANDOM:
...@@ -41,12 +40,12 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par) ...@@ -41,12 +40,12 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
ret = !ret; ret = !ret;
break; break;
case XT_STATISTIC_MODE_NTH: case XT_STATISTIC_MODE_NTH:
spin_lock_bh(&nth_lock); do {
if (info->master->count++ == info->u.nth.every) { oval = atomic_read(&info->master->count);
info->master->count = 0; nval = (oval == info->u.nth.every) ? 0 : oval + 1;
} while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
if (nval == 0)
ret = !ret; ret = !ret;
}
spin_unlock_bh(&nth_lock);
break; break;
} }
...@@ -64,7 +63,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par) ...@@ -64,7 +63,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
info->master = kzalloc(sizeof(*info->master), GFP_KERNEL); info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
if (info->master == NULL) if (info->master == NULL)
return -ENOMEM; return -ENOMEM;
info->master->count = info->u.nth.count; atomic_set(&info->master->count, info->u.nth.count);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment