Commit d896b312 authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso says:

====================
Netfilter fixes for net

The following patchset contains accumulated Netfilter fixes for your
net tree:

1) Ensure quota dump and reset happens iff we can deliver numbers to
   userspace.

2) Silence splat on incorrect use of smp_processor_id() from nft_queue.

3) Fix an out-of-bound access reported by KASAN in
   nf_tables_rule_destroy(), patch from Florian Westphal.

4) Fix layer 4 checksum mangling in the nf_tables payload expression
   with IPv6.

5) Fix a race in the CLUSTERIP target from control plane path when two
   threads run to add a new configuration object. Serialize invocations
   of clusterip_config_init() using spin_lock. From Xin Long.

6) Call br_nf_pre_routing_finish_bridge_finish() once we are done with
   the br_nf_pre_routing_finish() hook. From Artur Molchanov.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9b60047a 14221cc4
...@@ -399,7 +399,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_ ...@@ -399,7 +399,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_
br_nf_hook_thresh(NF_BR_PRE_ROUTING, br_nf_hook_thresh(NF_BR_PRE_ROUTING,
net, sk, skb, skb->dev, net, sk, skb, skb->dev,
NULL, NULL,
br_nf_pre_routing_finish); br_nf_pre_routing_finish_bridge);
return 0; return 0;
} }
ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr); ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
......
...@@ -144,7 +144,7 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry) ...@@ -144,7 +144,7 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
rcu_read_lock_bh(); rcu_read_lock_bh();
c = __clusterip_config_find(net, clusterip); c = __clusterip_config_find(net, clusterip);
if (c) { if (c) {
if (unlikely(!atomic_inc_not_zero(&c->refcount))) if (!c->pde || unlikely(!atomic_inc_not_zero(&c->refcount)))
c = NULL; c = NULL;
else if (entry) else if (entry)
atomic_inc(&c->entries); atomic_inc(&c->entries);
...@@ -168,12 +168,13 @@ static struct clusterip_config * ...@@ -168,12 +168,13 @@ static struct clusterip_config *
clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip, clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
struct net_device *dev) struct net_device *dev)
{ {
struct net *net = dev_net(dev);
struct clusterip_config *c; struct clusterip_config *c;
struct clusterip_net *cn = net_generic(dev_net(dev), clusterip_net_id); struct clusterip_net *cn = net_generic(net, clusterip_net_id);
c = kzalloc(sizeof(*c), GFP_ATOMIC); c = kzalloc(sizeof(*c), GFP_ATOMIC);
if (!c) if (!c)
return NULL; return ERR_PTR(-ENOMEM);
c->dev = dev; c->dev = dev;
c->clusterip = ip; c->clusterip = ip;
...@@ -185,6 +186,17 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip, ...@@ -185,6 +186,17 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
atomic_set(&c->refcount, 1); atomic_set(&c->refcount, 1);
atomic_set(&c->entries, 1); atomic_set(&c->entries, 1);
spin_lock_bh(&cn->lock);
if (__clusterip_config_find(net, ip)) {
spin_unlock_bh(&cn->lock);
kfree(c);
return ERR_PTR(-EBUSY);
}
list_add_rcu(&c->list, &cn->configs);
spin_unlock_bh(&cn->lock);
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
{ {
char buffer[16]; char buffer[16];
...@@ -195,16 +207,16 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip, ...@@ -195,16 +207,16 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
cn->procdir, cn->procdir,
&clusterip_proc_fops, c); &clusterip_proc_fops, c);
if (!c->pde) { if (!c->pde) {
spin_lock_bh(&cn->lock);
list_del_rcu(&c->list);
spin_unlock_bh(&cn->lock);
kfree(c); kfree(c);
return NULL;
return ERR_PTR(-ENOMEM);
} }
} }
#endif #endif
spin_lock_bh(&cn->lock);
list_add_rcu(&c->list, &cn->configs);
spin_unlock_bh(&cn->lock);
return c; return c;
} }
...@@ -410,9 +422,9 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par) ...@@ -410,9 +422,9 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
config = clusterip_config_init(cipinfo, config = clusterip_config_init(cipinfo,
e->ip.dst.s_addr, dev); e->ip.dst.s_addr, dev);
if (!config) { if (IS_ERR(config)) {
dev_put(dev); dev_put(dev);
return -ENOMEM; return PTR_ERR(config);
} }
dev_mc_add(config->dev, config->clustermac); dev_mc_add(config->dev, config->clustermac);
} }
......
...@@ -2115,7 +2115,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, ...@@ -2115,7 +2115,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
* is called on error from nf_tables_newrule(). * is called on error from nf_tables_newrule().
*/ */
expr = nft_expr_first(rule); expr = nft_expr_first(rule);
while (expr->ops && expr != nft_expr_last(rule)) { while (expr != nft_expr_last(rule) && expr->ops) {
nf_tables_expr_destroy(ctx, expr); nf_tables_expr_destroy(ctx, expr);
expr = nft_expr_next(expr); expr = nft_expr_next(expr);
} }
......
...@@ -250,6 +250,22 @@ static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt, ...@@ -250,6 +250,22 @@ static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
return 0; return 0;
} }
static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
__wsum fsum, __wsum tsum, int csum_offset)
{
__sum16 sum;
if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
return -1;
nft_csum_replace(&sum, fsum, tsum);
if (!skb_make_writable(skb, csum_offset + sizeof(sum)) ||
skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
return -1;
return 0;
}
static void nft_payload_set_eval(const struct nft_expr *expr, static void nft_payload_set_eval(const struct nft_expr *expr,
struct nft_regs *regs, struct nft_regs *regs,
const struct nft_pktinfo *pkt) const struct nft_pktinfo *pkt)
...@@ -259,7 +275,6 @@ static void nft_payload_set_eval(const struct nft_expr *expr, ...@@ -259,7 +275,6 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
const u32 *src = &regs->data[priv->sreg]; const u32 *src = &regs->data[priv->sreg];
int offset, csum_offset; int offset, csum_offset;
__wsum fsum, tsum; __wsum fsum, tsum;
__sum16 sum;
switch (priv->base) { switch (priv->base) {
case NFT_PAYLOAD_LL_HEADER: case NFT_PAYLOAD_LL_HEADER:
...@@ -282,18 +297,14 @@ static void nft_payload_set_eval(const struct nft_expr *expr, ...@@ -282,18 +297,14 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
csum_offset = offset + priv->csum_offset; csum_offset = offset + priv->csum_offset;
offset += priv->offset; offset += priv->offset;
if (priv->csum_type == NFT_PAYLOAD_CSUM_INET && if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
(priv->base != NFT_PAYLOAD_TRANSPORT_HEADER || (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
skb->ip_summed != CHECKSUM_PARTIAL)) { skb->ip_summed != CHECKSUM_PARTIAL)) {
if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
goto err;
fsum = skb_checksum(skb, offset, priv->len, 0); fsum = skb_checksum(skb, offset, priv->len, 0);
tsum = csum_partial(src, priv->len, 0); tsum = csum_partial(src, priv->len, 0);
nft_csum_replace(&sum, fsum, tsum);
if (!skb_make_writable(skb, csum_offset + sizeof(sum)) || if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0) nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
goto err; goto err;
if (priv->csum_flags && if (priv->csum_flags &&
......
...@@ -38,7 +38,7 @@ static void nft_queue_eval(const struct nft_expr *expr, ...@@ -38,7 +38,7 @@ static void nft_queue_eval(const struct nft_expr *expr,
if (priv->queues_total > 1) { if (priv->queues_total > 1) {
if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) { if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) {
int cpu = smp_processor_id(); int cpu = raw_smp_processor_id();
queue = priv->queuenum + cpu % priv->queues_total; queue = priv->queuenum + cpu % priv->queues_total;
} else { } else {
......
...@@ -110,30 +110,32 @@ static int nft_quota_obj_init(const struct nlattr * const tb[], ...@@ -110,30 +110,32 @@ static int nft_quota_obj_init(const struct nlattr * const tb[],
static int nft_quota_do_dump(struct sk_buff *skb, struct nft_quota *priv, static int nft_quota_do_dump(struct sk_buff *skb, struct nft_quota *priv,
bool reset) bool reset)
{ {
u64 consumed, consumed_cap;
u32 flags = priv->flags; u32 flags = priv->flags;
u64 consumed;
if (reset) {
consumed = atomic64_xchg(&priv->consumed, 0);
if (test_and_clear_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags))
flags |= NFT_QUOTA_F_DEPLETED;
} else {
consumed = atomic64_read(&priv->consumed);
}
/* Since we inconditionally increment consumed quota for each packet /* Since we inconditionally increment consumed quota for each packet
* that we see, don't go over the quota boundary in what we send to * that we see, don't go over the quota boundary in what we send to
* userspace. * userspace.
*/ */
if (consumed > priv->quota) consumed = atomic64_read(&priv->consumed);
consumed = priv->quota; if (consumed >= priv->quota) {
consumed_cap = priv->quota;
flags |= NFT_QUOTA_F_DEPLETED;
} else {
consumed_cap = consumed;
}
if (nla_put_be64(skb, NFTA_QUOTA_BYTES, cpu_to_be64(priv->quota), if (nla_put_be64(skb, NFTA_QUOTA_BYTES, cpu_to_be64(priv->quota),
NFTA_QUOTA_PAD) || NFTA_QUOTA_PAD) ||
nla_put_be64(skb, NFTA_QUOTA_CONSUMED, cpu_to_be64(consumed), nla_put_be64(skb, NFTA_QUOTA_CONSUMED, cpu_to_be64(consumed_cap),
NFTA_QUOTA_PAD) || NFTA_QUOTA_PAD) ||
nla_put_be32(skb, NFTA_QUOTA_FLAGS, htonl(flags))) nla_put_be32(skb, NFTA_QUOTA_FLAGS, htonl(flags)))
goto nla_put_failure; goto nla_put_failure;
if (reset) {
atomic64_sub(consumed, &priv->consumed);
clear_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags);
}
return 0; return 0;
nla_put_failure: nla_put_failure:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment