Commit a08ce73b authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf

Pablo Neira Ayuso says:

====================
Netfilter/IPVS fixes for net

The following patchset contains Netfilter/IPVS fixes for your net tree:

1) Reject non-null terminated helper names from xt_CT, from Gao Feng.

2) Fix KASAN splat due to out-of-bound access from commit phase, from
   Alexey Kodanev.

3) Missing conntrack hook registration on IPVS FTP helper, from Julian
   Anastasov.

4) Incorrect skbuff allocation size in bridge nft_reject, from Taehee Yoo.

5) Fix inverted check on packet xmit to non-local addresses, also from
   Julian.

6) Fix ebtables alignment compat problems, from Alin Nastac.

7) Hook mask checks are not correct in xt_set, from Serhey Popovych.

8) Fix timeout listing of element in ipsets, from Jozsef.

9) Cap maximum timeout value in ipset, also from Jozsef.

10) Don't allow family option for hash:mac sets, from Florent Fourcot.

11) Restrict ebtables to work with NFPROTO_BRIDGE targets only, this
    Florian.

12) Another bug reported by KASAN in the rbtree set backend, from
    Taehee Yoo.

13) Missing __IPS_MAX_BIT update doesn't include IPS_OFFLOAD_BIT.
    From Gao Feng.

14) Missing initialization of match/target in ebtables, from Florian
    Westphal.

15) Remove useless nft_dup.h file in include path, from C. Labbe.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 349b71d6 d8e87fc6
...@@ -23,6 +23,9 @@ ...@@ -23,6 +23,9 @@
/* Set is defined with timeout support: timeout value may be 0 */ /* Set is defined with timeout support: timeout value may be 0 */
#define IPSET_NO_TIMEOUT UINT_MAX #define IPSET_NO_TIMEOUT UINT_MAX
/* Max timeout value, see msecs_to_jiffies() in jiffies.h */
#define IPSET_MAX_TIMEOUT (UINT_MAX >> 1)/MSEC_PER_SEC
#define ip_set_adt_opt_timeout(opt, set) \ #define ip_set_adt_opt_timeout(opt, set) \
((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout) ((opt)->ext.timeout != IPSET_NO_TIMEOUT ? (opt)->ext.timeout : (set)->timeout)
...@@ -32,11 +35,10 @@ ip_set_timeout_uget(struct nlattr *tb) ...@@ -32,11 +35,10 @@ ip_set_timeout_uget(struct nlattr *tb)
unsigned int timeout = ip_set_get_h32(tb); unsigned int timeout = ip_set_get_h32(tb);
/* Normalize to fit into jiffies */ /* Normalize to fit into jiffies */
if (timeout > UINT_MAX/MSEC_PER_SEC) if (timeout > IPSET_MAX_TIMEOUT)
timeout = UINT_MAX/MSEC_PER_SEC; timeout = IPSET_MAX_TIMEOUT;
/* Userspace supplied TIMEOUT parameter: adjust crazy size */ return timeout;
return timeout == IPSET_NO_TIMEOUT ? IPSET_NO_TIMEOUT - 1 : timeout;
} }
static inline bool static inline bool
...@@ -65,8 +67,14 @@ ip_set_timeout_set(unsigned long *timeout, u32 value) ...@@ -65,8 +67,14 @@ ip_set_timeout_set(unsigned long *timeout, u32 value)
static inline u32 static inline u32
ip_set_timeout_get(const unsigned long *timeout) ip_set_timeout_get(const unsigned long *timeout)
{ {
return *timeout == IPSET_ELEM_PERMANENT ? 0 : u32 t;
jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
if (*timeout == IPSET_ELEM_PERMANENT)
return 0;
t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
/* Zero value in userspace means no timeout */
return t == 0 ? 1 : t;
} }
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -631,6 +631,7 @@ struct ip_vs_service { ...@@ -631,6 +631,7 @@ struct ip_vs_service {
/* alternate persistence engine */ /* alternate persistence engine */
struct ip_vs_pe __rcu *pe; struct ip_vs_pe __rcu *pe;
int conntrack_afmask;
struct rcu_head rcu_head; struct rcu_head rcu_head;
}; };
...@@ -1611,6 +1612,35 @@ static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp, ...@@ -1611,6 +1612,35 @@ static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp,
return false; return false;
} }
static inline int ip_vs_register_conntrack(struct ip_vs_service *svc)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
int afmask = (svc->af == AF_INET6) ? 2 : 1;
int ret = 0;
if (!(svc->conntrack_afmask & afmask)) {
ret = nf_ct_netns_get(svc->ipvs->net, svc->af);
if (ret >= 0)
svc->conntrack_afmask |= afmask;
}
return ret;
#else
return 0;
#endif
}
static inline void ip_vs_unregister_conntrack(struct ip_vs_service *svc)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
int afmask = (svc->af == AF_INET6) ? 2 : 1;
if (svc->conntrack_afmask & afmask) {
nf_ct_netns_put(svc->ipvs->net, svc->af);
svc->conntrack_afmask &= ~afmask;
}
#endif
}
static inline int static inline int
ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
{ {
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NFT_DUP_H_
#define _NFT_DUP_H_
struct nft_dup_inet {
enum nft_registers sreg_addr:8;
enum nft_registers sreg_dev:8;
};
#endif /* _NFT_DUP_H_ */
...@@ -112,7 +112,7 @@ enum ip_conntrack_status { ...@@ -112,7 +112,7 @@ enum ip_conntrack_status {
IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING | IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING |
IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_OFFLOAD), IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_OFFLOAD),
__IPS_MAX_BIT = 14, __IPS_MAX_BIT = 15,
}; };
/* Connection tracking event types */ /* Connection tracking event types */
......
...@@ -411,6 +411,12 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par, ...@@ -411,6 +411,12 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0); watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
if (IS_ERR(watcher)) if (IS_ERR(watcher))
return PTR_ERR(watcher); return PTR_ERR(watcher);
if (watcher->family != NFPROTO_BRIDGE) {
module_put(watcher->me);
return -ENOENT;
}
w->u.watcher = watcher; w->u.watcher = watcher;
par->target = watcher; par->target = watcher;
...@@ -709,6 +715,8 @@ ebt_check_entry(struct ebt_entry *e, struct net *net, ...@@ -709,6 +715,8 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
} }
i = 0; i = 0;
memset(&mtpar, 0, sizeof(mtpar));
memset(&tgpar, 0, sizeof(tgpar));
mtpar.net = tgpar.net = net; mtpar.net = tgpar.net = net;
mtpar.table = tgpar.table = name; mtpar.table = tgpar.table = name;
mtpar.entryinfo = tgpar.entryinfo = e; mtpar.entryinfo = tgpar.entryinfo = e;
...@@ -730,6 +738,13 @@ ebt_check_entry(struct ebt_entry *e, struct net *net, ...@@ -730,6 +738,13 @@ ebt_check_entry(struct ebt_entry *e, struct net *net,
goto cleanup_watchers; goto cleanup_watchers;
} }
/* Reject UNSPEC, xtables verdicts/return values are incompatible */
if (target->family != NFPROTO_BRIDGE) {
module_put(target->me);
ret = -ENOENT;
goto cleanup_watchers;
}
t->u.target = target; t->u.target = target;
if (t->u.target == &ebt_standard_target) { if (t->u.target == &ebt_standard_target) {
if (gap < sizeof(struct ebt_standard_target)) { if (gap < sizeof(struct ebt_standard_target)) {
...@@ -1605,16 +1620,16 @@ struct compat_ebt_entry_mwt { ...@@ -1605,16 +1620,16 @@ struct compat_ebt_entry_mwt {
compat_uptr_t ptr; compat_uptr_t ptr;
} u; } u;
compat_uint_t match_size; compat_uint_t match_size;
compat_uint_t data[0]; compat_uint_t data[0] __attribute__ ((aligned (__alignof__(struct compat_ebt_replace))));
}; };
/* account for possible padding between match_size and ->data */ /* account for possible padding between match_size and ->data */
static int ebt_compat_entry_padsize(void) static int ebt_compat_entry_padsize(void)
{ {
BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) < BUILD_BUG_ON(sizeof(struct ebt_entry_match) <
COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt))); sizeof(struct compat_ebt_entry_mwt));
return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) - return (int) sizeof(struct ebt_entry_match) -
COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)); sizeof(struct compat_ebt_entry_mwt);
} }
static int ebt_compat_match_offset(const struct xt_match *match, static int ebt_compat_match_offset(const struct xt_match *match,
......
...@@ -261,7 +261,7 @@ static void nft_reject_br_send_v6_unreach(struct net *net, ...@@ -261,7 +261,7 @@ static void nft_reject_br_send_v6_unreach(struct net *net,
if (!reject6_br_csum_ok(oldskb, hook)) if (!reject6_br_csum_ok(oldskb, hook))
return; return;
nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) + nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) +
LL_MAX_HEADER + len, GFP_ATOMIC); LL_MAX_HEADER + len, GFP_ATOMIC);
if (!nskb) if (!nskb)
return; return;
......
...@@ -531,6 +531,7 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name, ...@@ -531,6 +531,7 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
return -ENOMEM; return -ENOMEM;
j = 0; j = 0;
memset(&mtpar, 0, sizeof(mtpar));
mtpar.net = net; mtpar.net = net;
mtpar.table = name; mtpar.table = name;
mtpar.entryinfo = &e->ip; mtpar.entryinfo = &e->ip;
......
...@@ -550,6 +550,7 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name, ...@@ -550,6 +550,7 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
return -ENOMEM; return -ENOMEM;
j = 0; j = 0;
memset(&mtpar, 0, sizeof(mtpar));
mtpar.net = net; mtpar.net = net;
mtpar.table = name; mtpar.table = name;
mtpar.entryinfo = &e->ipv6; mtpar.entryinfo = &e->ipv6;
......
...@@ -1234,7 +1234,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, ...@@ -1234,7 +1234,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
pr_debug("Create set %s with family %s\n", pr_debug("Create set %s with family %s\n",
set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6"); set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
#ifndef IP_SET_PROTO_UNDEF #ifdef IP_SET_PROTO_UNDEF
if (set->family != NFPROTO_UNSPEC)
return -IPSET_ERR_INVALID_FAMILY;
#else
if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY; return -IPSET_ERR_INVALID_FAMILY;
#endif #endif
......
...@@ -839,6 +839,9 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, ...@@ -839,6 +839,9 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
* For now only for NAT! * For now only for NAT!
*/ */
ip_vs_rs_hash(ipvs, dest); ip_vs_rs_hash(ipvs, dest);
/* FTP-NAT requires conntrack for mangling */
if (svc->port == FTPPORT)
ip_vs_register_conntrack(svc);
} }
atomic_set(&dest->conn_flags, conn_flags); atomic_set(&dest->conn_flags, conn_flags);
...@@ -1462,6 +1465,7 @@ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup) ...@@ -1462,6 +1465,7 @@ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup)
*/ */
static void ip_vs_unlink_service(struct ip_vs_service *svc, bool cleanup) static void ip_vs_unlink_service(struct ip_vs_service *svc, bool cleanup)
{ {
ip_vs_unregister_conntrack(svc);
/* Hold svc to avoid double release from dest_trash */ /* Hold svc to avoid double release from dest_trash */
atomic_inc(&svc->refcnt); atomic_inc(&svc->refcnt);
/* /*
......
...@@ -168,7 +168,7 @@ static inline bool crosses_local_route_boundary(int skb_af, struct sk_buff *skb, ...@@ -168,7 +168,7 @@ static inline bool crosses_local_route_boundary(int skb_af, struct sk_buff *skb,
bool new_rt_is_local) bool new_rt_is_local)
{ {
bool rt_mode_allow_local = !!(rt_mode & IP_VS_RT_MODE_LOCAL); bool rt_mode_allow_local = !!(rt_mode & IP_VS_RT_MODE_LOCAL);
bool rt_mode_allow_non_local = !!(rt_mode & IP_VS_RT_MODE_LOCAL); bool rt_mode_allow_non_local = !!(rt_mode & IP_VS_RT_MODE_NON_LOCAL);
bool rt_mode_allow_redirect = !!(rt_mode & IP_VS_RT_MODE_RDR); bool rt_mode_allow_redirect = !!(rt_mode & IP_VS_RT_MODE_RDR);
bool source_is_loopback; bool source_is_loopback;
bool old_rt_is_local; bool old_rt_is_local;
......
...@@ -2890,12 +2890,13 @@ static struct nft_set *nft_set_lookup_byid(const struct net *net, ...@@ -2890,12 +2890,13 @@ static struct nft_set *nft_set_lookup_byid(const struct net *net,
u32 id = ntohl(nla_get_be32(nla)); u32 id = ntohl(nla_get_be32(nla));
list_for_each_entry(trans, &net->nft.commit_list, list) { list_for_each_entry(trans, &net->nft.commit_list, list) {
struct nft_set *set = nft_trans_set(trans); if (trans->msg_type == NFT_MSG_NEWSET) {
struct nft_set *set = nft_trans_set(trans);
if (trans->msg_type == NFT_MSG_NEWSET && if (id == nft_trans_set_id(trans) &&
id == nft_trans_set_id(trans) && nft_active_genmask(set, genmask))
nft_active_genmask(set, genmask)) return set;
return set; }
} }
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
} }
......
...@@ -66,7 +66,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set ...@@ -66,7 +66,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
parent = rcu_dereference_raw(parent->rb_left); parent = rcu_dereference_raw(parent->rb_left);
if (interval && if (interval &&
nft_rbtree_equal(set, this, interval) && nft_rbtree_equal(set, this, interval) &&
nft_rbtree_interval_end(this) && nft_rbtree_interval_end(rbe) &&
!nft_rbtree_interval_end(interval)) !nft_rbtree_interval_end(interval))
continue; continue;
interval = rbe; interval = rbe;
......
...@@ -245,12 +245,22 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par, ...@@ -245,12 +245,22 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
} }
if (info->helper[0]) { if (info->helper[0]) {
if (strnlen(info->helper, sizeof(info->helper)) == sizeof(info->helper)) {
ret = -ENAMETOOLONG;
goto err3;
}
ret = xt_ct_set_helper(ct, info->helper, par); ret = xt_ct_set_helper(ct, info->helper, par);
if (ret < 0) if (ret < 0)
goto err3; goto err3;
} }
if (info->timeout[0]) { if (info->timeout[0]) {
if (strnlen(info->timeout, sizeof(info->timeout)) == sizeof(info->timeout)) {
ret = -ENAMETOOLONG;
goto err4;
}
ret = xt_ct_set_timeout(ct, par, info->timeout); ret = xt_ct_set_timeout(ct, par, info->timeout);
if (ret < 0) if (ret < 0)
goto err4; goto err4;
......
...@@ -372,8 +372,8 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par) ...@@ -372,8 +372,8 @@ set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
/* Normalize to fit into jiffies */ /* Normalize to fit into jiffies */
if (add_opt.ext.timeout != IPSET_NO_TIMEOUT && if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
add_opt.ext.timeout > UINT_MAX / MSEC_PER_SEC) add_opt.ext.timeout > IPSET_MAX_TIMEOUT)
add_opt.ext.timeout = UINT_MAX / MSEC_PER_SEC; add_opt.ext.timeout = IPSET_MAX_TIMEOUT;
if (info->add_set.index != IPSET_INVALID_ID) if (info->add_set.index != IPSET_INVALID_ID)
ip_set_add(info->add_set.index, skb, par, &add_opt); ip_set_add(info->add_set.index, skb, par, &add_opt);
if (info->del_set.index != IPSET_INVALID_ID) if (info->del_set.index != IPSET_INVALID_ID)
...@@ -407,8 +407,8 @@ set_target_v3(struct sk_buff *skb, const struct xt_action_param *par) ...@@ -407,8 +407,8 @@ set_target_v3(struct sk_buff *skb, const struct xt_action_param *par)
/* Normalize to fit into jiffies */ /* Normalize to fit into jiffies */
if (add_opt.ext.timeout != IPSET_NO_TIMEOUT && if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
add_opt.ext.timeout > UINT_MAX / MSEC_PER_SEC) add_opt.ext.timeout > IPSET_MAX_TIMEOUT)
add_opt.ext.timeout = UINT_MAX / MSEC_PER_SEC; add_opt.ext.timeout = IPSET_MAX_TIMEOUT;
if (info->add_set.index != IPSET_INVALID_ID) if (info->add_set.index != IPSET_INVALID_ID)
ip_set_add(info->add_set.index, skb, par, &add_opt); ip_set_add(info->add_set.index, skb, par, &add_opt);
if (info->del_set.index != IPSET_INVALID_ID) if (info->del_set.index != IPSET_INVALID_ID)
...@@ -470,7 +470,7 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par) ...@@ -470,7 +470,7 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
} }
if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) | if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) |
(info->flags & IPSET_FLAG_MAP_SKBQUEUE)) && (info->flags & IPSET_FLAG_MAP_SKBQUEUE)) &&
!(par->hook_mask & (1 << NF_INET_FORWARD | (par->hook_mask & ~(1 << NF_INET_FORWARD |
1 << NF_INET_LOCAL_OUT | 1 << NF_INET_LOCAL_OUT |
1 << NF_INET_POST_ROUTING))) { 1 << NF_INET_POST_ROUTING))) {
pr_info_ratelimited("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n"); pr_info_ratelimited("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment