Commit e51a08b8 authored by David S. Miller's avatar David S. Miller

Merge branch 'openvswitch-net'

Pravin B Shelar says:

====================
openvswitch: datapath fixes

Following patch series is mostly targeted to MPLS fixes. other
patches are related datapth transmit path error handling.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ceb8d5bf 74f47278
...@@ -1579,8 +1579,10 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs, ...@@ -1579,8 +1579,10 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk); bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk);
skb = udp_tunnel_handle_offloads(skb, udp_sum); skb = udp_tunnel_handle_offloads(skb, udp_sum);
if (IS_ERR(skb)) if (IS_ERR(skb)) {
return -EINVAL; err = -EINVAL;
goto err;
}
skb_scrub_packet(skb, xnet); skb_scrub_packet(skb, xnet);
...@@ -1590,12 +1592,16 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs, ...@@ -1590,12 +1592,16 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
/* Need space for new headers (invalidates iph ptr) */ /* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom); err = skb_cow_head(skb, min_headroom);
if (unlikely(err)) if (unlikely(err)) {
return err; kfree_skb(skb);
goto err;
}
skb = vlan_hwaccel_push_inside(skb); skb = vlan_hwaccel_push_inside(skb);
if (WARN_ON(!skb)) if (WARN_ON(!skb)) {
return -ENOMEM; err = -ENOMEM;
goto err;
}
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_FLAGS); vxh->vx_flags = htonl(VXLAN_FLAGS);
...@@ -1606,6 +1612,9 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs, ...@@ -1606,6 +1612,9 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio, udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio,
ttl, src_port, dst_port); ttl, src_port, dst_port);
return 0; return 0;
err:
dst_release(dst);
return err;
} }
#endif #endif
...@@ -1621,7 +1630,7 @@ int vxlan_xmit_skb(struct vxlan_sock *vs, ...@@ -1621,7 +1630,7 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
skb = udp_tunnel_handle_offloads(skb, udp_sum); skb = udp_tunnel_handle_offloads(skb, udp_sum);
if (IS_ERR(skb)) if (IS_ERR(skb))
return -EINVAL; return PTR_ERR(skb);
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ VXLAN_HLEN + sizeof(struct iphdr) + VXLAN_HLEN + sizeof(struct iphdr)
...@@ -1629,8 +1638,10 @@ int vxlan_xmit_skb(struct vxlan_sock *vs, ...@@ -1629,8 +1638,10 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
/* Need space for new headers (invalidates iph ptr) */ /* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom); err = skb_cow_head(skb, min_headroom);
if (unlikely(err)) if (unlikely(err)) {
kfree_skb(skb);
return err; return err;
}
skb = vlan_hwaccel_push_inside(skb); skb = vlan_hwaccel_push_inside(skb);
if (WARN_ON(!skb)) if (WARN_ON(!skb))
...@@ -1776,9 +1787,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ...@@ -1776,9 +1787,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
tos, ttl, df, src_port, dst_port, tos, ttl, df, src_port, dst_port,
htonl(vni << 8), htonl(vni << 8),
!net_eq(vxlan->net, dev_net(vxlan->dev))); !net_eq(vxlan->net, dev_net(vxlan->dev)));
if (err < 0) {
if (err < 0) /* skb is already freed. */
skb = NULL;
goto rt_tx_error; goto rt_tx_error;
}
iptunnel_xmit_stats(err, &dev->stats, dev->tstats); iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
} else { } else {
......
...@@ -2522,7 +2522,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb) ...@@ -2522,7 +2522,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
/* If MPLS offload request, verify we are testing hardware MPLS features /* If MPLS offload request, verify we are testing hardware MPLS features
* instead of standard features for the netdev. * instead of standard features for the netdev.
*/ */
#ifdef CONFIG_NET_MPLS_GSO #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
static netdev_features_t net_mpls_features(struct sk_buff *skb, static netdev_features_t net_mpls_features(struct sk_buff *skb,
netdev_features_t features, netdev_features_t features,
__be16 type) __be16 type)
......
...@@ -122,14 +122,18 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt, ...@@ -122,14 +122,18 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
int err; int err;
skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx); skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx);
if (IS_ERR(skb))
return PTR_ERR(skb);
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr) + GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
err = skb_cow_head(skb, min_headroom); err = skb_cow_head(skb, min_headroom);
if (unlikely(err)) if (unlikely(err)) {
kfree_skb(skb);
return err; return err;
}
skb = vlan_hwaccel_push_inside(skb); skb = vlan_hwaccel_push_inside(skb);
if (unlikely(!skb)) if (unlikely(!skb))
......
...@@ -31,10 +31,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb, ...@@ -31,10 +31,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
SKB_GSO_TCPV6 | SKB_GSO_TCPV6 |
SKB_GSO_UDP | SKB_GSO_UDP |
SKB_GSO_DODGY | SKB_GSO_DODGY |
SKB_GSO_TCP_ECN | SKB_GSO_TCP_ECN)))
SKB_GSO_GRE |
SKB_GSO_GRE_CSUM |
SKB_GSO_IPIP)))
goto out; goto out;
/* Setup inner SKB. */ /* Setup inner SKB. */
......
...@@ -147,7 +147,8 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, ...@@ -147,7 +147,8 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
hdr = eth_hdr(skb); hdr = eth_hdr(skb);
hdr->h_proto = mpls->mpls_ethertype; hdr->h_proto = mpls->mpls_ethertype;
skb_set_inner_protocol(skb, skb->protocol); if (!skb->inner_protocol)
skb_set_inner_protocol(skb, skb->protocol);
skb->protocol = mpls->mpls_ethertype; skb->protocol = mpls->mpls_ethertype;
invalidate_flow_key(key); invalidate_flow_key(key);
......
...@@ -1753,7 +1753,6 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr, ...@@ -1753,7 +1753,6 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
__be16 eth_type, __be16 vlan_tci, bool log) __be16 eth_type, __be16 vlan_tci, bool log)
{ {
const struct nlattr *a; const struct nlattr *a;
bool out_tnl_port = false;
int rem, err; int rem, err;
if (depth >= SAMPLE_ACTION_DEPTH) if (depth >= SAMPLE_ACTION_DEPTH)
...@@ -1796,8 +1795,6 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr, ...@@ -1796,8 +1795,6 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
case OVS_ACTION_ATTR_OUTPUT: case OVS_ACTION_ATTR_OUTPUT:
if (nla_get_u32(a) >= DP_MAX_PORTS) if (nla_get_u32(a) >= DP_MAX_PORTS)
return -EINVAL; return -EINVAL;
out_tnl_port = false;
break; break;
case OVS_ACTION_ATTR_HASH: { case OVS_ACTION_ATTR_HASH: {
...@@ -1832,12 +1829,6 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr, ...@@ -1832,12 +1829,6 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
case OVS_ACTION_ATTR_PUSH_MPLS: { case OVS_ACTION_ATTR_PUSH_MPLS: {
const struct ovs_action_push_mpls *mpls = nla_data(a); const struct ovs_action_push_mpls *mpls = nla_data(a);
/* Networking stack do not allow simultaneous Tunnel
* and MPLS GSO.
*/
if (out_tnl_port)
return -EINVAL;
if (!eth_p_mpls(mpls->mpls_ethertype)) if (!eth_p_mpls(mpls->mpls_ethertype))
return -EINVAL; return -EINVAL;
/* Prohibit push MPLS other than to a white list /* Prohibit push MPLS other than to a white list
...@@ -1873,11 +1864,9 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr, ...@@ -1873,11 +1864,9 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
case OVS_ACTION_ATTR_SET: case OVS_ACTION_ATTR_SET:
err = validate_set(a, key, sfa, err = validate_set(a, key, sfa,
&out_tnl_port, eth_type, log); &skip_copy, eth_type, log);
if (err) if (err)
return err; return err;
skip_copy = out_tnl_port;
break; break;
case OVS_ACTION_ATTR_SAMPLE: case OVS_ACTION_ATTR_SAMPLE:
......
...@@ -219,7 +219,10 @@ static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb) ...@@ -219,7 +219,10 @@ static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
false); false);
if (err < 0) if (err < 0)
ip_rt_put(rt); ip_rt_put(rt);
return err;
error: error:
kfree_skb(skb);
return err; return err;
} }
......
...@@ -73,7 +73,7 @@ static struct sk_buff *__build_header(struct sk_buff *skb, ...@@ -73,7 +73,7 @@ static struct sk_buff *__build_header(struct sk_buff *skb,
skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM)); skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
if (IS_ERR(skb)) if (IS_ERR(skb))
return NULL; return skb;
tpi.flags = filter_tnl_flags(tun_key->tun_flags); tpi.flags = filter_tnl_flags(tun_key->tun_flags);
tpi.proto = htons(ETH_P_TEB); tpi.proto = htons(ETH_P_TEB);
...@@ -144,7 +144,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) ...@@ -144,7 +144,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
if (unlikely(!OVS_CB(skb)->egress_tun_info)) { if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
err = -EINVAL; err = -EINVAL;
goto error; goto err_free_skb;
} }
tun_key = &OVS_CB(skb)->egress_tun_info->tunnel; tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
...@@ -157,8 +157,10 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) ...@@ -157,8 +157,10 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
fl.flowi4_proto = IPPROTO_GRE; fl.flowi4_proto = IPPROTO_GRE;
rt = ip_route_output_key(net, &fl); rt = ip_route_output_key(net, &fl);
if (IS_ERR(rt)) if (IS_ERR(rt)) {
return PTR_ERR(rt); err = PTR_ERR(rt);
goto err_free_skb;
}
tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags); tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags);
...@@ -183,8 +185,9 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) ...@@ -183,8 +185,9 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
/* Push Tunnel header. */ /* Push Tunnel header. */
skb = __build_header(skb, tunnel_hlen); skb = __build_header(skb, tunnel_hlen);
if (unlikely(!skb)) { if (IS_ERR(skb)) {
err = 0; err = PTR_ERR(rt);
skb = NULL;
goto err_free_rt; goto err_free_rt;
} }
...@@ -198,7 +201,8 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) ...@@ -198,7 +201,8 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false); tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false);
err_free_rt: err_free_rt:
ip_rt_put(rt); ip_rt_put(rt);
error: err_free_skb:
kfree_skb(skb);
return err; return err;
} }
......
...@@ -187,7 +187,9 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb) ...@@ -187,7 +187,9 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
false); false);
if (err < 0) if (err < 0)
ip_rt_put(rt); ip_rt_put(rt);
return err;
error: error:
kfree_skb(skb);
return err; return err;
} }
......
...@@ -519,10 +519,9 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb) ...@@ -519,10 +519,9 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
u64_stats_update_end(&stats->syncp); u64_stats_update_end(&stats->syncp);
} else if (sent < 0) { } else if (sent < 0) {
ovs_vport_record_error(vport, VPORT_E_TX_ERROR); ovs_vport_record_error(vport, VPORT_E_TX_ERROR);
kfree_skb(skb); } else {
} else
ovs_vport_record_error(vport, VPORT_E_TX_DROPPED); ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
}
return sent; return sent;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment