Commit 2fb91ddb authored by Pablo Neira Ayuso's avatar Pablo Neira Ayuso

netfilter: nft_rbtree: fix data handling of end interval elements

This patch fixes several things which related to the handling of
end interval elements:

* Chain use underflow with intervals and map: If you add a rule
  using intervals+map that introduces a loop, the error path of the
  rbtree set decrements the chain refcount for each side of the
  interval, leading to a chain use counter underflow.

* Don't copy the data part of the end interval element since, this
  area is uninitialized and this confuses the loop detection code.

* Don't allocate room for the data part of end interval elements
  since this is unused.

So, after this patch the idea is that end interval elements don't
have a data part.
Signed-off-by: default avatarPablo Neira Ayuso <pablo@netfilter.org>
Acked-by: default avatarPatrick McHardy <kaber@trash.net>
parent bd7fc645
...@@ -69,8 +69,10 @@ static void nft_rbtree_elem_destroy(const struct nft_set *set, ...@@ -69,8 +69,10 @@ static void nft_rbtree_elem_destroy(const struct nft_set *set,
struct nft_rbtree_elem *rbe) struct nft_rbtree_elem *rbe)
{ {
nft_data_uninit(&rbe->key, NFT_DATA_VALUE); nft_data_uninit(&rbe->key, NFT_DATA_VALUE);
if (set->flags & NFT_SET_MAP) if (set->flags & NFT_SET_MAP &&
!(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
nft_data_uninit(rbe->data, set->dtype); nft_data_uninit(rbe->data, set->dtype);
kfree(rbe); kfree(rbe);
} }
...@@ -108,7 +110,8 @@ static int nft_rbtree_insert(const struct nft_set *set, ...@@ -108,7 +110,8 @@ static int nft_rbtree_insert(const struct nft_set *set,
int err; int err;
size = sizeof(*rbe); size = sizeof(*rbe);
if (set->flags & NFT_SET_MAP) if (set->flags & NFT_SET_MAP &&
!(elem->flags & NFT_SET_ELEM_INTERVAL_END))
size += sizeof(rbe->data[0]); size += sizeof(rbe->data[0]);
rbe = kzalloc(size, GFP_KERNEL); rbe = kzalloc(size, GFP_KERNEL);
...@@ -117,7 +120,8 @@ static int nft_rbtree_insert(const struct nft_set *set, ...@@ -117,7 +120,8 @@ static int nft_rbtree_insert(const struct nft_set *set,
rbe->flags = elem->flags; rbe->flags = elem->flags;
nft_data_copy(&rbe->key, &elem->key); nft_data_copy(&rbe->key, &elem->key);
if (set->flags & NFT_SET_MAP) if (set->flags & NFT_SET_MAP &&
!(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
nft_data_copy(rbe->data, &elem->data); nft_data_copy(rbe->data, &elem->data);
err = __nft_rbtree_insert(set, rbe); err = __nft_rbtree_insert(set, rbe);
...@@ -153,7 +157,8 @@ static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem) ...@@ -153,7 +157,8 @@ static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
parent = parent->rb_right; parent = parent->rb_right;
else { else {
elem->cookie = rbe; elem->cookie = rbe;
if (set->flags & NFT_SET_MAP) if (set->flags & NFT_SET_MAP &&
!(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
nft_data_copy(&elem->data, rbe->data); nft_data_copy(&elem->data, rbe->data);
elem->flags = rbe->flags; elem->flags = rbe->flags;
return 0; return 0;
...@@ -177,7 +182,8 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx, ...@@ -177,7 +182,8 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
rbe = rb_entry(node, struct nft_rbtree_elem, node); rbe = rb_entry(node, struct nft_rbtree_elem, node);
nft_data_copy(&elem.key, &rbe->key); nft_data_copy(&elem.key, &rbe->key);
if (set->flags & NFT_SET_MAP) if (set->flags & NFT_SET_MAP &&
!(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
nft_data_copy(&elem.data, rbe->data); nft_data_copy(&elem.data, rbe->data);
elem.flags = rbe->flags; elem.flags = rbe->flags;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment