Commit 24b2c375 authored by Edward Cree's avatar Edward Cree Committed by David S. Miller

sfc: advertise encapsulated offloads on EF10

Necessitates an .ndo_features_check, as the EF10 datapath has several
 limitations on what it can handle.
Signed-off-by: default avatarEdward Cree <ecree@solarflare.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0ce8df66
...@@ -1304,6 +1304,7 @@ static void efx_ef10_fini_nic(struct efx_nic *efx) ...@@ -1304,6 +1304,7 @@ static void efx_ef10_fini_nic(struct efx_nic *efx)
static int efx_ef10_init_nic(struct efx_nic *efx) static int efx_ef10_init_nic(struct efx_nic *efx)
{ {
struct efx_ef10_nic_data *nic_data = efx->nic_data; struct efx_ef10_nic_data *nic_data = efx->nic_data;
netdev_features_t hw_enc_features = 0;
int rc; int rc;
if (nic_data->must_check_datapath_caps) { if (nic_data->must_check_datapath_caps) {
...@@ -1348,6 +1349,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx) ...@@ -1348,6 +1349,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_restore_piobufs = false; nic_data->must_restore_piobufs = false;
} }
/* add encapsulated checksum offload features */
if (efx_has_cap(efx, VXLAN_NVGRE) && !efx_ef10_is_vf(efx))
hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
/* add encapsulated TSO features */
if (efx_has_cap(efx, TX_TSO_V2_ENCAP)) {
netdev_features_t encap_tso_features;
encap_tso_features = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM;
hw_enc_features |= encap_tso_features | NETIF_F_TSO;
efx->net_dev->features |= encap_tso_features;
}
efx->net_dev->hw_enc_features = hw_enc_features;
/* don't fail init if RSS setup doesn't work */ /* don't fail init if RSS setup doesn't work */
rc = efx->type->rx_push_rss_config(efx, false, rc = efx->type->rx_push_rss_config(efx, false,
efx->rss_context.rx_indir_table, NULL); efx->rss_context.rx_indir_table, NULL);
......
...@@ -596,6 +596,7 @@ static const struct net_device_ops efx_netdev_ops = { ...@@ -596,6 +596,7 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_set_mac_address = efx_set_mac_address, .ndo_set_mac_address = efx_set_mac_address,
.ndo_set_rx_mode = efx_set_rx_mode, .ndo_set_rx_mode = efx_set_rx_mode,
.ndo_set_features = efx_set_features, .ndo_set_features = efx_set_features,
.ndo_features_check = efx_features_check,
.ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid, .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid, .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid,
#ifdef CONFIG_SFC_SRIOV #ifdef CONFIG_SFC_SRIOV
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "net_driver.h" #include "net_driver.h"
#include <linux/module.h> #include <linux/module.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <net/gre.h>
#include "efx_common.h" #include "efx_common.h"
#include "efx_channels.h" #include "efx_channels.h"
#include "efx.h" #include "efx.h"
...@@ -1287,6 +1288,89 @@ const struct pci_error_handlers efx_err_handlers = { ...@@ -1287,6 +1288,89 @@ const struct pci_error_handlers efx_err_handlers = {
.resume = efx_io_resume, .resume = efx_io_resume,
}; };
/* Determine whether the NIC will be able to handle TX offloads for a given
* encapsulated packet.
*/
static bool efx_can_encap_offloads(struct efx_nic *efx, struct sk_buff *skb)
{
struct gre_base_hdr *greh;
__be16 dst_port;
u8 ipproto;
/* Does the NIC support encap offloads?
* If not, we should never get here, because we shouldn't have
* advertised encap offload feature flags in the first place.
*/
if (WARN_ON_ONCE(!efx->type->udp_tnl_has_port))
return false;
/* Determine encapsulation protocol in use */
switch (skb->protocol) {
case htons(ETH_P_IP):
ipproto = ip_hdr(skb)->protocol;
break;
case htons(ETH_P_IPV6):
/* If there are extension headers, this will cause us to
* think we can't offload something that we maybe could have.
*/
ipproto = ipv6_hdr(skb)->nexthdr;
break;
default:
/* Not IP, so can't offload it */
return false;
}
switch (ipproto) {
case IPPROTO_GRE:
/* We support NVGRE but not IP over GRE or random gretaps.
* Specifically, the NIC will accept GRE as encapsulated if
* the inner protocol is Ethernet, but only handle it
* correctly if the GRE header is 8 bytes long. Moreover,
* it will not update the Checksum or Sequence Number fields
* if they are present. (The Routing Present flag,
* GRE_ROUTING, cannot be set else the header would be more
* than 8 bytes long; so we don't have to worry about it.)
*/
if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
return false;
if (ntohs(skb->inner_protocol) != ETH_P_TEB)
return false;
if (skb_inner_mac_header(skb) - skb_transport_header(skb) != 8)
return false;
greh = (struct gre_base_hdr *)skb_transport_header(skb);
return !(greh->flags & (GRE_CSUM | GRE_SEQ));
case IPPROTO_UDP:
/* If the port is registered for a UDP tunnel, we assume the
* packet is for that tunnel, and the NIC will handle it as
* such. If not, the NIC won't know what to do with it.
*/
dst_port = udp_hdr(skb)->dest;
return efx->type->udp_tnl_has_port(efx, dst_port);
default:
return false;
}
}
netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features)
{
struct efx_nic *efx = netdev_priv(dev);
if (skb->encapsulation) {
if (features & NETIF_F_GSO_MASK)
/* Hardware can only do TSO with at most 208 bytes
* of headers.
*/
if (skb_inner_transport_offset(skb) >
EFX_TSO2_MAX_HDRLEN)
features &= ~(NETIF_F_GSO_MASK);
if (features & (NETIF_F_GSO_MASK | NETIF_F_CSUM_MASK))
if (!efx_can_encap_offloads(efx, skb))
features &= ~(NETIF_F_GSO_MASK |
NETIF_F_CSUM_MASK);
}
return features;
}
int efx_get_phys_port_id(struct net_device *net_dev, int efx_get_phys_port_id(struct net_device *net_dev,
struct netdev_phys_item_id *ppid) struct netdev_phys_item_id *ppid)
{ {
......
...@@ -105,6 +105,9 @@ int efx_change_mtu(struct net_device *net_dev, int new_mtu); ...@@ -105,6 +105,9 @@ int efx_change_mtu(struct net_device *net_dev, int new_mtu);
extern const struct pci_error_handlers efx_err_handlers; extern const struct pci_error_handlers efx_err_handlers;
netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features);
int efx_get_phys_port_id(struct net_device *net_dev, int efx_get_phys_port_id(struct net_device *net_dev,
struct netdev_phys_item_id *ppid); struct netdev_phys_item_id *ppid);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment