Commit d68d7d20 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf

Pablo Neira Ayuso says:

====================
Netfilter fixes for net

1) Check for interval validity in all concatenation fields in
   nft_set_pipapo, from Stefano Brivio.

2) Missing preemption disabled in conntrack and flowtable stat
   updates, from Xin Long.

3) Fix compilation warning when CONFIG_NF_CONNTRACK_MARK=n.

Except for 3) which was a bug introduced in a recent fix in 6.1-rc
- anything else, broken for several releases.

* git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf:
  netfilter: ctnetlink: fix compilation warning after data race fixes in ct mark
  netfilter: conntrack: fix using __this_cpu_add in preemptible
  netfilter: flowtable_offload: fix using __this_cpu_add in preemptible
  netfilter: nft_set_pipapo: Actually validate intervals in fields after the first one
====================

Link: https://lore.kernel.org/r/20221130121934.1125-1-pablo@netfilter.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 6c681f89 1feeae07
...@@ -891,7 +891,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct) ...@@ -891,7 +891,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
zone = nf_ct_zone(ct); zone = nf_ct_zone(ct);
if (!nf_ct_ext_valid_pre(ct->ext)) { if (!nf_ct_ext_valid_pre(ct->ext)) {
NF_CT_STAT_INC(net, insert_failed); NF_CT_STAT_INC_ATOMIC(net, insert_failed);
return -ETIMEDOUT; return -ETIMEDOUT;
} }
...@@ -938,7 +938,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct) ...@@ -938,7 +938,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
if (!nf_ct_ext_valid_post(ct->ext)) { if (!nf_ct_ext_valid_post(ct->ext)) {
nf_ct_kill(ct); nf_ct_kill(ct);
NF_CT_STAT_INC(net, drop); NF_CT_STAT_INC_ATOMIC(net, drop);
return -ETIMEDOUT; return -ETIMEDOUT;
} }
...@@ -1275,7 +1275,7 @@ __nf_conntrack_confirm(struct sk_buff *skb) ...@@ -1275,7 +1275,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
*/ */
if (!nf_ct_ext_valid_post(ct->ext)) { if (!nf_ct_ext_valid_post(ct->ext)) {
nf_ct_kill(ct); nf_ct_kill(ct);
NF_CT_STAT_INC(net, drop); NF_CT_STAT_INC_ATOMIC(net, drop);
return NF_DROP; return NF_DROP;
} }
......
...@@ -328,8 +328,13 @@ ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct) ...@@ -328,8 +328,13 @@ ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
} }
#ifdef CONFIG_NF_CONNTRACK_MARK #ifdef CONFIG_NF_CONNTRACK_MARK
static int ctnetlink_dump_mark(struct sk_buff *skb, u32 mark) static int ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
{ {
u32 mark = READ_ONCE(ct->mark);
if (!mark)
return 0;
if (nla_put_be32(skb, CTA_MARK, htonl(mark))) if (nla_put_be32(skb, CTA_MARK, htonl(mark)))
goto nla_put_failure; goto nla_put_failure;
return 0; return 0;
...@@ -543,7 +548,7 @@ static int ctnetlink_dump_extinfo(struct sk_buff *skb, ...@@ -543,7 +548,7 @@ static int ctnetlink_dump_extinfo(struct sk_buff *skb,
static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct) static int ctnetlink_dump_info(struct sk_buff *skb, struct nf_conn *ct)
{ {
if (ctnetlink_dump_status(skb, ct) < 0 || if (ctnetlink_dump_status(skb, ct) < 0 ||
ctnetlink_dump_mark(skb, READ_ONCE(ct->mark)) < 0 || ctnetlink_dump_mark(skb, ct) < 0 ||
ctnetlink_dump_secctx(skb, ct) < 0 || ctnetlink_dump_secctx(skb, ct) < 0 ||
ctnetlink_dump_id(skb, ct) < 0 || ctnetlink_dump_id(skb, ct) < 0 ||
ctnetlink_dump_use(skb, ct) < 0 || ctnetlink_dump_use(skb, ct) < 0 ||
...@@ -722,7 +727,6 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item) ...@@ -722,7 +727,6 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
struct sk_buff *skb; struct sk_buff *skb;
unsigned int type; unsigned int type;
unsigned int flags = 0, group; unsigned int flags = 0, group;
u32 mark;
int err; int err;
if (events & (1 << IPCT_DESTROY)) { if (events & (1 << IPCT_DESTROY)) {
...@@ -827,9 +831,8 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item) ...@@ -827,9 +831,8 @@ ctnetlink_conntrack_event(unsigned int events, const struct nf_ct_event *item)
} }
#ifdef CONFIG_NF_CONNTRACK_MARK #ifdef CONFIG_NF_CONNTRACK_MARK
mark = READ_ONCE(ct->mark); if (events & (1 << IPCT_MARK) &&
if ((events & (1 << IPCT_MARK) || mark) && ctnetlink_dump_mark(skb, ct) < 0)
ctnetlink_dump_mark(skb, mark) < 0)
goto nla_put_failure; goto nla_put_failure;
#endif #endif
nlmsg_end(skb, nlh); nlmsg_end(skb, nlh);
...@@ -2671,7 +2674,6 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct) ...@@ -2671,7 +2674,6 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
{ {
const struct nf_conntrack_zone *zone; const struct nf_conntrack_zone *zone;
struct nlattr *nest_parms; struct nlattr *nest_parms;
u32 mark;
zone = nf_ct_zone(ct); zone = nf_ct_zone(ct);
...@@ -2733,8 +2735,7 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct) ...@@ -2733,8 +2735,7 @@ static int __ctnetlink_glue_build(struct sk_buff *skb, struct nf_conn *ct)
goto nla_put_failure; goto nla_put_failure;
#ifdef CONFIG_NF_CONNTRACK_MARK #ifdef CONFIG_NF_CONNTRACK_MARK
mark = READ_ONCE(ct->mark); if (ctnetlink_dump_mark(skb, ct) < 0)
if (mark && ctnetlink_dump_mark(skb, mark) < 0)
goto nla_put_failure; goto nla_put_failure;
#endif #endif
if (ctnetlink_dump_labels(skb, ct) < 0) if (ctnetlink_dump_labels(skb, ct) < 0)
......
...@@ -997,13 +997,13 @@ static void flow_offload_queue_work(struct flow_offload_work *offload) ...@@ -997,13 +997,13 @@ static void flow_offload_queue_work(struct flow_offload_work *offload)
struct net *net = read_pnet(&offload->flowtable->net); struct net *net = read_pnet(&offload->flowtable->net);
if (offload->cmd == FLOW_CLS_REPLACE) { if (offload->cmd == FLOW_CLS_REPLACE) {
NF_FLOW_TABLE_STAT_INC(net, count_wq_add); NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count_wq_add);
queue_work(nf_flow_offload_add_wq, &offload->work); queue_work(nf_flow_offload_add_wq, &offload->work);
} else if (offload->cmd == FLOW_CLS_DESTROY) { } else if (offload->cmd == FLOW_CLS_DESTROY) {
NF_FLOW_TABLE_STAT_INC(net, count_wq_del); NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count_wq_del);
queue_work(nf_flow_offload_del_wq, &offload->work); queue_work(nf_flow_offload_del_wq, &offload->work);
} else { } else {
NF_FLOW_TABLE_STAT_INC(net, count_wq_stats); NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count_wq_stats);
queue_work(nf_flow_offload_stats_wq, &offload->work); queue_work(nf_flow_offload_stats_wq, &offload->work);
} }
} }
......
...@@ -1162,6 +1162,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set, ...@@ -1162,6 +1162,7 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
struct nft_pipapo_match *m = priv->clone; struct nft_pipapo_match *m = priv->clone;
u8 genmask = nft_genmask_next(net); u8 genmask = nft_genmask_next(net);
struct nft_pipapo_field *f; struct nft_pipapo_field *f;
const u8 *start_p, *end_p;
int i, bsize_max, err = 0; int i, bsize_max, err = 0;
if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END)) if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END))
...@@ -1202,9 +1203,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set, ...@@ -1202,9 +1203,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set,
} }
/* Validate */ /* Validate */
start_p = start;
end_p = end;
nft_pipapo_for_each_field(f, i, m) { nft_pipapo_for_each_field(f, i, m) {
const u8 *start_p = start, *end_p = end;
if (f->rules >= (unsigned long)NFT_PIPAPO_RULE0_MAX) if (f->rules >= (unsigned long)NFT_PIPAPO_RULE0_MAX)
return -ENOSPC; return -ENOSPC;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment