Commit e00dd941 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec

Steffen Klassert says:

====================
pull request (net): ipsec 2020-03-27

1) Handle NETDEV_UNREGISTER for xfrm device to handle asynchronous
   unregister events cleanly. From Raed Salem.

2) Fix vti6 tunnel inter address family TX through bpf_redirect().
   From Nicolas Dichtel.

3) Fix lenght check in verify_sec_ctx_len() to avoid a
   slab-out-of-bounds. From Xin Long.

4) Add a missing verify_sec_ctx_len check in xfrm_add_acquire
   to avoid a possible out-of-bounds to access. From Xin Long.

5) Use built-in RCU list checking of hlist_for_each_entry_rcu
   to silence false lockdep warning in __xfrm6_tunnel_spi_lookup
   when CONFIG_PROVE_RCU_LIST is enabled. From Madhuparna Bhowmik.

6) Fix a panic on esp offload when crypto is done asynchronously.
   From Xin Long.

7) Fix a skb memory leak in an error path of vti6_rcv.
   From Torsten Hilbrich.

8) Fix a race that can lead to a doulbe free in xfrm_policy_timer.
   From Xin Long.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8262e6f9 4c59406e
...@@ -303,6 +303,7 @@ config SYN_COOKIES ...@@ -303,6 +303,7 @@ config SYN_COOKIES
config NET_IPVTI config NET_IPVTI
tristate "Virtual (secure) IP: tunneling" tristate "Virtual (secure) IP: tunneling"
depends on IPV6 || IPV6=n
select INET_TUNNEL select INET_TUNNEL
select NET_IP_TUNNEL select NET_IP_TUNNEL
select XFRM select XFRM
......
...@@ -187,17 +187,39 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -187,17 +187,39 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
int mtu; int mtu;
if (!dst) { if (!dst) {
struct rtable *rt; switch (skb->protocol) {
case htons(ETH_P_IP): {
fl->u.ip4.flowi4_oif = dev->ifindex; struct rtable *rt;
fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4); fl->u.ip4.flowi4_oif = dev->ifindex;
if (IS_ERR(rt)) { fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
if (IS_ERR(rt)) {
dev->stats.tx_carrier_errors++;
goto tx_error_icmp;
}
dst = &rt->dst;
skb_dst_set(skb, dst);
break;
}
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
fl->u.ip6.flowi6_oif = dev->ifindex;
fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
if (dst->error) {
dst_release(dst);
dst = NULL;
dev->stats.tx_carrier_errors++;
goto tx_error_icmp;
}
skb_dst_set(skb, dst);
break;
#endif
default:
dev->stats.tx_carrier_errors++; dev->stats.tx_carrier_errors++;
goto tx_error_icmp; goto tx_error_icmp;
} }
dst = &rt->dst;
skb_dst_set(skb, dst);
} }
dst_hold(dst); dst_hold(dst);
......
...@@ -311,7 +311,7 @@ static int vti6_rcv(struct sk_buff *skb) ...@@ -311,7 +311,7 @@ static int vti6_rcv(struct sk_buff *skb)
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
rcu_read_unlock(); rcu_read_unlock();
return 0; goto discard;
} }
ipv6h = ipv6_hdr(skb); ipv6h = ipv6_hdr(skb);
...@@ -450,15 +450,33 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) ...@@ -450,15 +450,33 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
int mtu; int mtu;
if (!dst) { if (!dst) {
fl->u.ip6.flowi6_oif = dev->ifindex; switch (skb->protocol) {
fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC; case htons(ETH_P_IP): {
dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6); struct rtable *rt;
if (dst->error) {
dst_release(dst); fl->u.ip4.flowi4_oif = dev->ifindex;
dst = NULL; fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
if (IS_ERR(rt))
goto tx_err_link_failure;
dst = &rt->dst;
skb_dst_set(skb, dst);
break;
}
case htons(ETH_P_IPV6):
fl->u.ip6.flowi6_oif = dev->ifindex;
fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
if (dst->error) {
dst_release(dst);
dst = NULL;
goto tx_err_link_failure;
}
skb_dst_set(skb, dst);
break;
default:
goto tx_err_link_failure; goto tx_err_link_failure;
} }
skb_dst_set(skb, dst);
} }
dst_hold(dst); dst_hold(dst);
......
...@@ -78,7 +78,7 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const ...@@ -78,7 +78,7 @@ static struct xfrm6_tunnel_spi *__xfrm6_tunnel_spi_lookup(struct net *net, const
hlist_for_each_entry_rcu(x6spi, hlist_for_each_entry_rcu(x6spi,
&xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)], &xfrm6_tn->spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
list_byaddr) { list_byaddr, lockdep_is_held(&xfrm6_tunnel_spi_lock)) {
if (xfrm6_addr_equal(&x6spi->addr, saddr)) if (xfrm6_addr_equal(&x6spi->addr, saddr))
return x6spi; return x6spi;
} }
......
...@@ -78,8 +78,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur ...@@ -78,8 +78,8 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
int err; int err;
unsigned long flags; unsigned long flags;
struct xfrm_state *x; struct xfrm_state *x;
struct sk_buff *skb2, *nskb;
struct softnet_data *sd; struct softnet_data *sd;
struct sk_buff *skb2, *nskb, *pskb = NULL;
netdev_features_t esp_features = features; netdev_features_t esp_features = features;
struct xfrm_offload *xo = xfrm_offload(skb); struct xfrm_offload *xo = xfrm_offload(skb);
struct sec_path *sp; struct sec_path *sp;
...@@ -168,14 +168,14 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur ...@@ -168,14 +168,14 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
} else { } else {
if (skb == skb2) if (skb == skb2)
skb = nskb; skb = nskb;
else
if (!skb) pskb->next = nskb;
return NULL;
continue; continue;
} }
skb_push(skb2, skb2->data - skb_mac_header(skb2)); skb_push(skb2, skb2->data - skb_mac_header(skb2));
pskb = skb2;
} }
return skb; return skb;
...@@ -383,6 +383,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void ...@@ -383,6 +383,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
return xfrm_dev_feat_change(dev); return xfrm_dev_feat_change(dev);
case NETDEV_DOWN: case NETDEV_DOWN:
case NETDEV_UNREGISTER:
return xfrm_dev_down(dev); return xfrm_dev_down(dev);
} }
return NOTIFY_DONE; return NOTIFY_DONE;
......
...@@ -434,7 +434,9 @@ EXPORT_SYMBOL(xfrm_policy_destroy); ...@@ -434,7 +434,9 @@ EXPORT_SYMBOL(xfrm_policy_destroy);
static void xfrm_policy_kill(struct xfrm_policy *policy) static void xfrm_policy_kill(struct xfrm_policy *policy)
{ {
write_lock_bh(&policy->lock);
policy->walk.dead = 1; policy->walk.dead = 1;
write_unlock_bh(&policy->lock);
atomic_inc(&policy->genid); atomic_inc(&policy->genid);
......
...@@ -110,7 +110,8 @@ static inline int verify_sec_ctx_len(struct nlattr **attrs) ...@@ -110,7 +110,8 @@ static inline int verify_sec_ctx_len(struct nlattr **attrs)
return 0; return 0;
uctx = nla_data(rt); uctx = nla_data(rt);
if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len)) if (uctx->len > nla_len(rt) ||
uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
return -EINVAL; return -EINVAL;
return 0; return 0;
...@@ -2273,6 +2274,9 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, ...@@ -2273,6 +2274,9 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
xfrm_mark_get(attrs, &mark); xfrm_mark_get(attrs, &mark);
err = verify_newpolicy_info(&ua->policy); err = verify_newpolicy_info(&ua->policy);
if (err)
goto free_state;
err = verify_sec_ctx_len(attrs);
if (err) if (err)
goto free_state; goto free_state;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment