Commit 5f35227e authored by Jesse Gross's avatar Jesse Gross Committed by David S. Miller

net: Generalize ndo_gso_check to ndo_features_check

GSO isn't the only offload feature with restrictions that
potentially can't be expressed with the current features mechanism.
Checksum is another although it's a general issue that could in
theory apply to anything. Even if it may be possible to
implement these restrictions in other ways, it can result in
duplicate code or inefficient per-packet behavior.

This generalizes ndo_gso_check so that drivers can remove any
features that don't make sense for a given packet, similar to
netif_skb_features(). It also converts existing driver
restrictions to the new format, completing the work that was
done to support tunnel protocols since the issues apply to
checksums as well.

By actually removing features from the set that are used to do
offloading, it solves another problem with the existing
interface. In these cases, GSO would run with the original set
of features and not do anything because it appears that
segmentation is not required.

CC: Tom Herbert <therbert@google.com>
CC: Joe Stringer <joestringer@nicira.com>
CC: Eric Dumazet <edumazet@google.com>
CC: Hayes Wang <hayeswang@realtek.com>
Signed-off-by: default avatarJesse Gross <jesse@nicira.com>
Acked-by: default avatarTom Herbert <therbert@google.com>
Fixes: 04ffcb25 ("net: Add ndo_gso_check")
Tested-by: default avatarHayes Wang <hayeswang@realtek.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6e4ab361
...@@ -12553,9 +12553,11 @@ static int bnx2x_get_phys_port_id(struct net_device *netdev, ...@@ -12553,9 +12553,11 @@ static int bnx2x_get_phys_port_id(struct net_device *netdev,
return 0; return 0;
} }
static bool bnx2x_gso_check(struct sk_buff *skb, struct net_device *dev) static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{ {
return vxlan_gso_check(skb); return vxlan_features_check(skb, features);
} }
static const struct net_device_ops bnx2x_netdev_ops = { static const struct net_device_ops bnx2x_netdev_ops = {
...@@ -12589,7 +12591,7 @@ static const struct net_device_ops bnx2x_netdev_ops = { ...@@ -12589,7 +12591,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
#endif #endif
.ndo_get_phys_port_id = bnx2x_get_phys_port_id, .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state, .ndo_set_vf_link_state = bnx2x_set_vf_link_state,
.ndo_gso_check = bnx2x_gso_check, .ndo_features_check = bnx2x_features_check,
}; };
static int bnx2x_set_coherency_mask(struct bnx2x *bp) static int bnx2x_set_coherency_mask(struct bnx2x *bp)
......
...@@ -4459,9 +4459,11 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, ...@@ -4459,9 +4459,11 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
adapter->vxlan_port_count--; adapter->vxlan_port_count--;
} }
static bool be_gso_check(struct sk_buff *skb, struct net_device *dev) static netdev_features_t be_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{ {
return vxlan_gso_check(skb); return vxlan_features_check(skb, features);
} }
#endif #endif
...@@ -4492,7 +4494,7 @@ static const struct net_device_ops be_netdev_ops = { ...@@ -4492,7 +4494,7 @@ static const struct net_device_ops be_netdev_ops = {
#ifdef CONFIG_BE2NET_VXLAN #ifdef CONFIG_BE2NET_VXLAN
.ndo_add_vxlan_port = be_add_vxlan_port, .ndo_add_vxlan_port = be_add_vxlan_port,
.ndo_del_vxlan_port = be_del_vxlan_port, .ndo_del_vxlan_port = be_del_vxlan_port,
.ndo_gso_check = be_gso_check, .ndo_features_check = be_features_check,
#endif #endif
}; };
......
...@@ -2365,9 +2365,11 @@ static void mlx4_en_del_vxlan_port(struct net_device *dev, ...@@ -2365,9 +2365,11 @@ static void mlx4_en_del_vxlan_port(struct net_device *dev,
queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
} }
static bool mlx4_en_gso_check(struct sk_buff *skb, struct net_device *dev) static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{ {
return vxlan_gso_check(skb); return vxlan_features_check(skb, features);
} }
#endif #endif
...@@ -2400,7 +2402,7 @@ static const struct net_device_ops mlx4_netdev_ops = { ...@@ -2400,7 +2402,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_MLX4_EN_VXLAN #ifdef CONFIG_MLX4_EN_VXLAN
.ndo_add_vxlan_port = mlx4_en_add_vxlan_port, .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
.ndo_del_vxlan_port = mlx4_en_del_vxlan_port, .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
.ndo_gso_check = mlx4_en_gso_check, .ndo_features_check = mlx4_en_features_check,
#endif #endif
}; };
...@@ -2434,7 +2436,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { ...@@ -2434,7 +2436,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
#ifdef CONFIG_MLX4_EN_VXLAN #ifdef CONFIG_MLX4_EN_VXLAN
.ndo_add_vxlan_port = mlx4_en_add_vxlan_port, .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
.ndo_del_vxlan_port = mlx4_en_del_vxlan_port, .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
.ndo_gso_check = mlx4_en_gso_check, .ndo_features_check = mlx4_en_features_check,
#endif #endif
}; };
......
...@@ -505,9 +505,11 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev, ...@@ -505,9 +505,11 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev,
adapter->flags |= QLCNIC_DEL_VXLAN_PORT; adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
} }
static bool qlcnic_gso_check(struct sk_buff *skb, struct net_device *dev) static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{ {
return vxlan_gso_check(skb); return vxlan_features_check(skb, features);
} }
#endif #endif
...@@ -532,7 +534,7 @@ static const struct net_device_ops qlcnic_netdev_ops = { ...@@ -532,7 +534,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
#ifdef CONFIG_QLCNIC_VXLAN #ifdef CONFIG_QLCNIC_VXLAN
.ndo_add_vxlan_port = qlcnic_add_vxlan_port, .ndo_add_vxlan_port = qlcnic_add_vxlan_port,
.ndo_del_vxlan_port = qlcnic_del_vxlan_port, .ndo_del_vxlan_port = qlcnic_del_vxlan_port,
.ndo_gso_check = qlcnic_gso_check, .ndo_features_check = qlcnic_features_check,
#endif #endif
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller, .ndo_poll_controller = qlcnic_poll_controller,
......
...@@ -1012,12 +1012,15 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, ...@@ -1012,12 +1012,15 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* Callback to use for xmit over the accelerated station. This * Callback to use for xmit over the accelerated station. This
* is used in place of ndo_start_xmit on accelerated net * is used in place of ndo_start_xmit on accelerated net
* devices. * devices.
* bool (*ndo_gso_check) (struct sk_buff *skb, * netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
* struct net_device *dev); * struct net_device *dev
* netdev_features_t features);
* Called by core transmit path to determine if device is capable of * Called by core transmit path to determine if device is capable of
* performing GSO on a packet. The device returns true if it is * performing offload operations on a given packet. This is to give
* able to GSO the packet, false otherwise. If the return value is * the device an opportunity to implement any restrictions that cannot
* false the stack will do software GSO. * be otherwise expressed by feature flags. The check is called with
* the set of features that the stack has calculated and it returns
* those the driver believes to be appropriate.
* *
* int (*ndo_switch_parent_id_get)(struct net_device *dev, * int (*ndo_switch_parent_id_get)(struct net_device *dev,
* struct netdev_phys_item_id *psid); * struct netdev_phys_item_id *psid);
...@@ -1178,8 +1181,9 @@ struct net_device_ops { ...@@ -1178,8 +1181,9 @@ struct net_device_ops {
struct net_device *dev, struct net_device *dev,
void *priv); void *priv);
int (*ndo_get_lock_subclass)(struct net_device *dev); int (*ndo_get_lock_subclass)(struct net_device *dev);
bool (*ndo_gso_check) (struct sk_buff *skb, netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
struct net_device *dev); struct net_device *dev,
netdev_features_t features);
#ifdef CONFIG_NET_SWITCHDEV #ifdef CONFIG_NET_SWITCHDEV
int (*ndo_switch_parent_id_get)(struct net_device *dev, int (*ndo_switch_parent_id_get)(struct net_device *dev,
struct netdev_phys_item_id *psid); struct netdev_phys_item_id *psid);
...@@ -3611,8 +3615,6 @@ static inline bool netif_needs_gso(struct net_device *dev, struct sk_buff *skb, ...@@ -3611,8 +3615,6 @@ static inline bool netif_needs_gso(struct net_device *dev, struct sk_buff *skb,
netdev_features_t features) netdev_features_t features)
{ {
return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
(dev->netdev_ops->ndo_gso_check &&
!dev->netdev_ops->ndo_gso_check(skb, dev)) ||
unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
(skb->ip_summed != CHECKSUM_UNNECESSARY))); (skb->ip_summed != CHECKSUM_UNNECESSARY)));
} }
......
#ifndef __NET_VXLAN_H #ifndef __NET_VXLAN_H
#define __NET_VXLAN_H 1 #define __NET_VXLAN_H 1
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/if_vlan.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/udp.h> #include <linux/udp.h>
...@@ -51,16 +54,33 @@ int vxlan_xmit_skb(struct vxlan_sock *vs, ...@@ -51,16 +54,33 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
__be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
__be16 src_port, __be16 dst_port, __be32 vni, bool xnet); __be16 src_port, __be16 dst_port, __be32 vni, bool xnet);
static inline bool vxlan_gso_check(struct sk_buff *skb) static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
netdev_features_t features)
{ {
if ((skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) && u8 l4_hdr = 0;
if (!skb->encapsulation)
return features;
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
l4_hdr = ip_hdr(skb)->protocol;
break;
case htons(ETH_P_IPV6):
l4_hdr = ipv6_hdr(skb)->nexthdr;
break;
default:
return features;;
}
if ((l4_hdr == IPPROTO_UDP) &&
(skb->inner_protocol_type != ENCAP_TYPE_ETHER || (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
skb->inner_protocol != htons(ETH_P_TEB) || skb->inner_protocol != htons(ETH_P_TEB) ||
(skb_inner_mac_header(skb) - skb_transport_header(skb) != (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
sizeof(struct udphdr) + sizeof(struct vxlanhdr)))) sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
return false; return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
return true; return features;
} }
/* IP header + UDP + VXLAN + Ethernet header */ /* IP header + UDP + VXLAN + Ethernet header */
......
...@@ -2563,7 +2563,7 @@ static netdev_features_t harmonize_features(struct sk_buff *skb, ...@@ -2563,7 +2563,7 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
netdev_features_t netif_skb_features(struct sk_buff *skb) netdev_features_t netif_skb_features(struct sk_buff *skb)
{ {
const struct net_device *dev = skb->dev; struct net_device *dev = skb->dev;
netdev_features_t features = dev->features; netdev_features_t features = dev->features;
u16 gso_segs = skb_shinfo(skb)->gso_segs; u16 gso_segs = skb_shinfo(skb)->gso_segs;
__be16 protocol = skb->protocol; __be16 protocol = skb->protocol;
...@@ -2571,13 +2571,20 @@ netdev_features_t netif_skb_features(struct sk_buff *skb) ...@@ -2571,13 +2571,20 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs) if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
features &= ~NETIF_F_GSO_MASK; features &= ~NETIF_F_GSO_MASK;
/* If encapsulation offload request, verify we are testing
* hardware encapsulation features instead of standard
* features for the netdev
*/
if (skb->encapsulation)
features &= dev->hw_enc_features;
if (!vlan_tx_tag_present(skb)) { if (!vlan_tx_tag_present(skb)) {
if (unlikely(protocol == htons(ETH_P_8021Q) || if (unlikely(protocol == htons(ETH_P_8021Q) ||
protocol == htons(ETH_P_8021AD))) { protocol == htons(ETH_P_8021AD))) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto; protocol = veh->h_vlan_encapsulated_proto;
} else { } else {
return harmonize_features(skb, features); goto finalize;
} }
} }
...@@ -2595,6 +2602,11 @@ netdev_features_t netif_skb_features(struct sk_buff *skb) ...@@ -2595,6 +2602,11 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX); NETIF_F_HW_VLAN_STAG_TX);
finalize:
if (dev->netdev_ops->ndo_features_check)
features &= dev->netdev_ops->ndo_features_check(skb, dev,
features);
return harmonize_features(skb, features); return harmonize_features(skb, features);
} }
EXPORT_SYMBOL(netif_skb_features); EXPORT_SYMBOL(netif_skb_features);
...@@ -2665,13 +2677,6 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device ...@@ -2665,13 +2677,6 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
if (unlikely(!skb)) if (unlikely(!skb))
goto out_null; goto out_null;
/* If encapsulation offload request, verify we are testing
* hardware encapsulation features instead of standard
* features for the netdev
*/
if (skb->encapsulation)
features &= dev->hw_enc_features;
if (netif_needs_gso(dev, skb, features)) { if (netif_needs_gso(dev, skb, features)) {
struct sk_buff *segs; struct sk_buff *segs;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment