Commit ceb20a3c authored by Paolo Abeni's avatar Paolo Abeni

Merge tag 'nf-23-07-06' of git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf

Pablo Neira Ayuso says:

====================
Netfilter fixes for net

The following patchset contains Netfilter fixes for net:

1) Fix missing overflow use refcount checks in nf_tables.

2) Do not set IPS_ASSURED for IPS_NAT_CLASH entries in GRE tracker,
   from Florian Westphal.

3) Bail out if nf_ct_helper_hash is NULL before registering helper,
   from Florent Revest.

4) Use siphash() instead siphash_4u64() to fix performance regression,
   also from Florian.

5) Do not allow to add rules to removed chains via ID,
   from Thadeu Lima de Souza Cascardo.

6) Fix oob read access in byteorder expression, also from Thadeu.

netfilter pull request 23-07-06

====================

Link: https://lore.kernel.org/r/20230705230406.52201-1-pablo@netfilter.orgSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 21327f81 caf3ef74
......@@ -67,6 +67,9 @@ struct nf_conntrack_tuple {
/* The protocol. */
u_int8_t protonum;
/* The direction must be ignored for the tuplehash */
struct { } __nfct_hash_offsetend;
/* The direction (for tuplehash) */
u_int8_t dir;
} dst;
......
......@@ -1211,6 +1211,29 @@ int __nft_release_basechain(struct nft_ctx *ctx);
unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
static inline bool nft_use_inc(u32 *use)
{
if (*use == UINT_MAX)
return false;
(*use)++;
return true;
}
static inline void nft_use_dec(u32 *use)
{
WARN_ON_ONCE((*use)-- == 0);
}
/* For error and abort path: restore use counter to previous state. */
static inline void nft_use_inc_restore(u32 *use)
{
WARN_ON_ONCE(!nft_use_inc(use));
}
#define nft_use_dec_restore nft_use_dec
/**
* struct nft_table - nf_tables table
*
......@@ -1296,8 +1319,8 @@ struct nft_object {
struct list_head list;
struct rhlist_head rhlhead;
struct nft_object_hash_key key;
u32 genmask:2,
use:30;
u32 genmask:2;
u32 use;
u64 handle;
u16 udlen;
u8 *udata;
......@@ -1399,8 +1422,8 @@ struct nft_flowtable {
char *name;
int hooknum;
int ops_len;
u32 genmask:2,
use:30;
u32 genmask:2;
u32 use;
u64 handle;
/* runtime data below here */
struct list_head hook_list ____cacheline_aligned;
......
......@@ -211,24 +211,18 @@ static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
unsigned int zoneid,
const struct net *net)
{
u64 a, b, c, d;
siphash_key_t key;
get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd));
/* The direction must be ignored, handle usable tuplehash members manually */
a = (u64)tuple->src.u3.all[0] << 32 | tuple->src.u3.all[3];
b = (u64)tuple->dst.u3.all[0] << 32 | tuple->dst.u3.all[3];
key = nf_conntrack_hash_rnd;
c = (__force u64)tuple->src.u.all << 32 | (__force u64)tuple->dst.u.all << 16;
c |= tuple->dst.protonum;
key.key[0] ^= zoneid;
key.key[1] ^= net_hash_mix(net);
d = (u64)zoneid << 32 | net_hash_mix(net);
/* IPv4: u3.all[1,2,3] == 0 */
c ^= (u64)tuple->src.u3.all[1] << 32 | tuple->src.u3.all[2];
d += (u64)tuple->dst.u3.all[1] << 32 | tuple->dst.u3.all[2];
return (u32)siphash_4u64(a, b, c, d, &nf_conntrack_hash_rnd);
return siphash((void *)tuple,
offsetofend(struct nf_conntrack_tuple, dst.__nfct_hash_offsetend),
&key);
}
static u32 scale_hash(u32 hash)
......
......@@ -360,6 +360,9 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES);
BUG_ON(strlen(me->name) > NF_CT_HELPER_NAME_LEN - 1);
if (!nf_ct_helper_hash)
return -ENOENT;
if (me->expect_policy->max_expected > NF_CT_EXPECT_MAX_CNT)
return -EINVAL;
......@@ -515,4 +518,5 @@ int nf_conntrack_helper_init(void)
void nf_conntrack_helper_fini(void)
{
kvfree(nf_ct_helper_hash);
nf_ct_helper_hash = NULL;
}
......@@ -205,6 +205,8 @@ int nf_conntrack_gre_packet(struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
const struct nf_hook_state *state)
{
unsigned long status;
if (!nf_ct_is_confirmed(ct)) {
unsigned int *timeouts = nf_ct_timeout_lookup(ct);
......@@ -217,11 +219,17 @@ int nf_conntrack_gre_packet(struct nf_conn *ct,
ct->proto.gre.timeout = timeouts[GRE_CT_UNREPLIED];
}
status = READ_ONCE(ct->status);
/* If we've seen traffic both ways, this is a GRE connection.
* Extend timeout. */
if (ct->status & IPS_SEEN_REPLY) {
if (status & IPS_SEEN_REPLY) {
nf_ct_refresh_acct(ct, ctinfo, skb,
ct->proto.gre.stream_timeout);
/* never set ASSURED for IPS_NAT_CLASH, they time out soon */
if (unlikely((status & IPS_NAT_CLASH)))
return NF_ACCEPT;
/* Also, more likely to be important, and not a probe. */
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
......
This diff is collapsed.
......@@ -30,11 +30,11 @@ void nft_byteorder_eval(const struct nft_expr *expr,
const struct nft_byteorder *priv = nft_expr_priv(expr);
u32 *src = &regs->data[priv->sreg];
u32 *dst = &regs->data[priv->dreg];
union { u32 u32; u16 u16; } *s, *d;
u16 *s16, *d16;
unsigned int i;
s = (void *)src;
d = (void *)dst;
s16 = (void *)src;
d16 = (void *)dst;
switch (priv->size) {
case 8: {
......@@ -62,11 +62,11 @@ void nft_byteorder_eval(const struct nft_expr *expr,
switch (priv->op) {
case NFT_BYTEORDER_NTOH:
for (i = 0; i < priv->len / 4; i++)
d[i].u32 = ntohl((__force __be32)s[i].u32);
dst[i] = ntohl((__force __be32)src[i]);
break;
case NFT_BYTEORDER_HTON:
for (i = 0; i < priv->len / 4; i++)
d[i].u32 = (__force __u32)htonl(s[i].u32);
dst[i] = (__force __u32)htonl(src[i]);
break;
}
break;
......@@ -74,11 +74,11 @@ void nft_byteorder_eval(const struct nft_expr *expr,
switch (priv->op) {
case NFT_BYTEORDER_NTOH:
for (i = 0; i < priv->len / 2; i++)
d[i].u16 = ntohs((__force __be16)s[i].u16);
d16[i] = ntohs((__force __be16)s16[i]);
break;
case NFT_BYTEORDER_HTON:
for (i = 0; i < priv->len / 2; i++)
d[i].u16 = (__force __u16)htons(s[i].u16);
d16[i] = (__force __u16)htons(s16[i]);
break;
}
break;
......
......@@ -408,8 +408,10 @@ static int nft_flow_offload_init(const struct nft_ctx *ctx,
if (IS_ERR(flowtable))
return PTR_ERR(flowtable);
if (!nft_use_inc(&flowtable->use))
return -EMFILE;
priv->flowtable = flowtable;
flowtable->use++;
return nf_ct_netns_get(ctx->net, ctx->family);
}
......@@ -428,7 +430,7 @@ static void nft_flow_offload_activate(const struct nft_ctx *ctx,
{
struct nft_flow_offload *priv = nft_expr_priv(expr);
priv->flowtable->use++;
nft_use_inc_restore(&priv->flowtable->use);
}
static void nft_flow_offload_destroy(const struct nft_ctx *ctx,
......
......@@ -159,7 +159,7 @@ static void nft_immediate_deactivate(const struct nft_ctx *ctx,
default:
nft_chain_del(chain);
chain->bound = false;
chain->table->use--;
nft_use_dec(&chain->table->use);
break;
}
break;
......@@ -198,7 +198,7 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx,
* let the transaction records release this chain and its rules.
*/
if (chain->bound) {
chain->use--;
nft_use_dec(&chain->use);
break;
}
......@@ -206,9 +206,9 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx,
chain_ctx = *ctx;
chain_ctx.chain = chain;
chain->use--;
nft_use_dec(&chain->use);
list_for_each_entry_safe(rule, n, &chain->rules, list) {
chain->use--;
nft_use_dec(&chain->use);
list_del(&rule->list);
nf_tables_rule_destroy(&chain_ctx, rule);
}
......
......@@ -41,8 +41,10 @@ static int nft_objref_init(const struct nft_ctx *ctx,
if (IS_ERR(obj))
return -ENOENT;
if (!nft_use_inc(&obj->use))
return -EMFILE;
nft_objref_priv(expr) = obj;
obj->use++;
return 0;
}
......@@ -72,7 +74,7 @@ static void nft_objref_deactivate(const struct nft_ctx *ctx,
if (phase == NFT_TRANS_COMMIT)
return;
obj->use--;
nft_use_dec(&obj->use);
}
static void nft_objref_activate(const struct nft_ctx *ctx,
......@@ -80,7 +82,7 @@ static void nft_objref_activate(const struct nft_ctx *ctx,
{
struct nft_object *obj = nft_objref_priv(expr);
obj->use++;
nft_use_inc_restore(&obj->use);
}
static const struct nft_expr_ops nft_objref_ops = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment