Commit fc741216 authored by Jesse Gross's avatar Jesse Gross Committed by David S. Miller

net offloading: Pass features into netif_needs_gso().

Now that there is a single function that can compute the device
features relevant to a packet, we don't want to run it for each
offload.  This converts netif_needs_gso() to take the features
of the device, rather than computing them itself.
Signed-off-by: default avatarJesse Gross <jesse@nicira.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent f01a5236
...@@ -488,7 +488,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -488,7 +488,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!netif_carrier_ok(dev) || if (unlikely(!netif_carrier_ok(dev) ||
(frags > 1 && !xennet_can_sg(dev)) || (frags > 1 && !xennet_can_sg(dev)) ||
netif_needs_gso(dev, skb))) { netif_needs_gso(skb, netif_skb_features(skb)))) {
spin_unlock_irq(&np->tx_lock); spin_unlock_irq(&np->tx_lock);
goto drop; goto drop;
} }
......
...@@ -2317,16 +2317,10 @@ static inline int skb_gso_ok(struct sk_buff *skb, int features) ...@@ -2317,16 +2317,10 @@ static inline int skb_gso_ok(struct sk_buff *skb, int features)
(!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
} }
static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) static inline int netif_needs_gso(struct sk_buff *skb, int features)
{ {
if (skb_is_gso(skb)) { return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
int features = netif_skb_features(skb);
return (!skb_gso_ok(skb, features) ||
unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
}
return 0;
} }
static inline void netif_set_gso_max_size(struct net_device *dev, static inline void netif_set_gso_max_size(struct net_device *dev,
......
...@@ -2086,6 +2086,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -2086,6 +2086,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
int rc = NETDEV_TX_OK; int rc = NETDEV_TX_OK;
if (likely(!skb->next)) { if (likely(!skb->next)) {
int features;
/* /*
* If device doesnt need skb->dst, release it right now while * If device doesnt need skb->dst, release it right now while
* its hot in this cpu cache * its hot in this cpu cache
...@@ -2098,8 +2100,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -2098,8 +2100,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
skb_orphan_try(skb); skb_orphan_try(skb);
features = netif_skb_features(skb);
if (vlan_tx_tag_present(skb) && if (vlan_tx_tag_present(skb) &&
!(dev->features & NETIF_F_HW_VLAN_TX)) { !(features & NETIF_F_HW_VLAN_TX)) {
skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
if (unlikely(!skb)) if (unlikely(!skb))
goto out; goto out;
...@@ -2107,7 +2111,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, ...@@ -2107,7 +2111,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
skb->vlan_tci = 0; skb->vlan_tci = 0;
} }
if (netif_needs_gso(dev, skb)) { if (netif_needs_gso(skb, features)) {
if (unlikely(dev_gso_segment(skb))) if (unlikely(dev_gso_segment(skb)))
goto out_kfree_skb; goto out_kfree_skb;
if (skb->next) if (skb->next)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment