Commit af510ebd authored by Pablo Neira Ayuso's avatar Pablo Neira Ayuso

Revert "netfilter: xt_quota: fix the behavior of xt_quota module"

This reverts commit e9837e55.

When talking to Maze and Chenbo, we agreed to keep this back by now
due to problems in the ruleset listing path with 32-bit arches.
Signed-off-by: default avatarMaciej Żenczykowski <maze@google.com>
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
parent 468c041c
...@@ -15,11 +15,9 @@ struct xt_quota_info { ...@@ -15,11 +15,9 @@ struct xt_quota_info {
__u32 flags; __u32 flags;
__u32 pad; __u32 pad;
__aligned_u64 quota; __aligned_u64 quota;
#ifdef __KERNEL__
atomic64_t counter; /* Used internally by the kernel */
#else struct xt_quota_priv *master;
__aligned_u64 remain;
#endif
}; };
#endif /* _XT_QUOTA_H */ #endif /* _XT_QUOTA_H */
...@@ -11,6 +11,11 @@ ...@@ -11,6 +11,11 @@
#include <linux/netfilter/xt_quota.h> #include <linux/netfilter/xt_quota.h>
#include <linux/module.h> #include <linux/module.h>
struct xt_quota_priv {
spinlock_t lock;
uint64_t quota;
};
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sam Johnston <samj@samj.net>"); MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
MODULE_DESCRIPTION("Xtables: countdown quota match"); MODULE_DESCRIPTION("Xtables: countdown quota match");
...@@ -21,48 +26,54 @@ static bool ...@@ -21,48 +26,54 @@ static bool
quota_mt(const struct sk_buff *skb, struct xt_action_param *par) quota_mt(const struct sk_buff *skb, struct xt_action_param *par)
{ {
struct xt_quota_info *q = (void *)par->matchinfo; struct xt_quota_info *q = (void *)par->matchinfo;
u64 current_count = atomic64_read(&q->counter); struct xt_quota_priv *priv = q->master;
bool ret = q->flags & XT_QUOTA_INVERT; bool ret = q->flags & XT_QUOTA_INVERT;
u64 old_count, new_count;
spin_lock_bh(&priv->lock);
do { if (priv->quota >= skb->len) {
if (current_count == 1) priv->quota -= skb->len;
return ret; ret = !ret;
if (current_count <= skb->len) { } else {
atomic64_set(&q->counter, 1); /* we do not allow even small packets from now on */
return ret; priv->quota = 0;
} }
old_count = current_count; spin_unlock_bh(&priv->lock);
new_count = current_count - skb->len;
current_count = atomic64_cmpxchg(&q->counter, old_count, return ret;
new_count);
} while (current_count != old_count);
return !ret;
} }
static int quota_mt_check(const struct xt_mtchk_param *par) static int quota_mt_check(const struct xt_mtchk_param *par)
{ {
struct xt_quota_info *q = par->matchinfo; struct xt_quota_info *q = par->matchinfo;
BUILD_BUG_ON(sizeof(atomic64_t) != sizeof(__u64));
if (q->flags & ~XT_QUOTA_MASK) if (q->flags & ~XT_QUOTA_MASK)
return -EINVAL; return -EINVAL;
if (atomic64_read(&q->counter) > q->quota + 1)
return -ERANGE;
if (atomic64_read(&q->counter) == 0) q->master = kmalloc(sizeof(*q->master), GFP_KERNEL);
atomic64_set(&q->counter, q->quota + 1); if (q->master == NULL)
return -ENOMEM;
spin_lock_init(&q->master->lock);
q->master->quota = q->quota;
return 0; return 0;
} }
static void quota_mt_destroy(const struct xt_mtdtor_param *par)
{
const struct xt_quota_info *q = par->matchinfo;
kfree(q->master);
}
static struct xt_match quota_mt_reg __read_mostly = { static struct xt_match quota_mt_reg __read_mostly = {
.name = "quota", .name = "quota",
.revision = 0, .revision = 0,
.family = NFPROTO_UNSPEC, .family = NFPROTO_UNSPEC,
.match = quota_mt, .match = quota_mt,
.checkentry = quota_mt_check, .checkentry = quota_mt_check,
.destroy = quota_mt_destroy,
.matchsize = sizeof(struct xt_quota_info), .matchsize = sizeof(struct xt_quota_info),
.usersize = offsetof(struct xt_quota_info, master),
.me = THIS_MODULE, .me = THIS_MODULE,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment