Commit 30a0f49d authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'nf-23-05-17' of https://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf

Florian Westphal says:

====================
Netfilter fixes for net

1. Silence warning about unused variable when CONFIG_NF_NAT=n, from Tom Rix.
2. nftables: Fix possible out-of-bounds access, from myself.
3. nftables: fix null deref+UAF during element insertion into rbtree,
   also from myself.

* tag 'nf-23-05-17' of https://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf:
  netfilter: nft_set_rbtree: fix null deref on element insertion
  netfilter: nf_tables: fix nft_trans type confusion
  netfilter: conntrack: define variables exp_nat_nla_policy and any_addr with CONFIG_NF_NAT
====================

Link: https://lore.kernel.org/r/20230517123756.7353-1-fw@strlen.deSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents c259ad11 61ae320a
......@@ -2976,7 +2976,9 @@ static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
return -1;
}
#if IS_ENABLED(CONFIG_NF_NAT)
static const union nf_inet_addr any_addr;
#endif
static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
{
......@@ -3460,10 +3462,12 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
return 0;
}
#if IS_ENABLED(CONFIG_NF_NAT)
static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
[CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
[CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
};
#endif
static int
ctnetlink_parse_expect_nat(const struct nlattr *attr,
......
......@@ -3865,12 +3865,10 @@ static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
struct nft_trans *trans;
list_for_each_entry(trans, &nft_net->commit_list, list) {
struct nft_rule *rule = nft_trans_rule(trans);
if (trans->msg_type == NFT_MSG_NEWRULE &&
trans->ctx.chain == chain &&
id == nft_trans_rule_id(trans))
return rule;
return nft_trans_rule(trans);
}
return ERR_PTR(-ENOENT);
}
......
......@@ -221,7 +221,7 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
{
struct nft_set *set = (struct nft_set *)__set;
struct rb_node *prev = rb_prev(&rbe->node);
struct nft_rbtree_elem *rbe_prev;
struct nft_rbtree_elem *rbe_prev = NULL;
struct nft_set_gc_batch *gcb;
gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
......@@ -229,17 +229,21 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
return -ENOMEM;
/* search for expired end interval coming before this element. */
do {
while (prev) {
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
if (nft_rbtree_interval_end(rbe_prev))
break;
prev = rb_prev(prev);
} while (prev != NULL);
}
if (rbe_prev) {
rb_erase(&rbe_prev->node, &priv->root);
atomic_dec(&set->nelems);
}
rb_erase(&rbe_prev->node, &priv->root);
rb_erase(&rbe->node, &priv->root);
atomic_sub(2, &set->nelems);
atomic_dec(&set->nelems);
nft_set_gc_batch_add(gcb, rbe);
nft_set_gc_batch_complete(gcb);
......@@ -268,7 +272,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
struct nft_set_ext **ext)
{
struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
struct rb_node *node, *parent, **p, *first = NULL;
struct rb_node *node, *next, *parent, **p, *first = NULL;
struct nft_rbtree *priv = nft_set_priv(set);
u8 genmask = nft_genmask_next(net);
int d, err;
......@@ -307,7 +311,9 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
* Values stored in the tree are in reversed order, starting from
* highest to lowest value.
*/
for (node = first; node != NULL; node = rb_next(node)) {
for (node = first; node != NULL; node = next) {
next = rb_next(node);
rbe = rb_entry(node, struct nft_rbtree_elem, node);
if (!nft_set_elem_active(&rbe->ext, genmask))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment