Commit caa2858c authored by David S. Miller's avatar David S. Miller

Merge branch 'sfc-more-encap-offloads'

Edward Cree says:

====================
sfc: more encap offloads

This patch series adds support for RX checksum offload of encapsulated packets.
It also adds support for configuring the hardware's lists of UDP ports used for
VXLAN and GENEVE encapsulation offloads.  Since changing these lists causes the
MC to reboot, the driver has been hardened against reboots, which used to be
considered an exceptional occurrence but are now normal.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents df6dd79b e5fbd977
......@@ -433,6 +433,9 @@ typedef union efx_oword {
(oword).u64[1] = (from).u64[1] & (mask).u64[1]; \
} while (0)
#define EFX_AND_QWORD(qword, from, mask) \
(qword).u64[0] = (from).u64[0] & (mask).u64[0]
#define EFX_OR_OWORD(oword, from, mask) \
do { \
(oword).u64[0] = (from).u64[0] | (mask).u64[0]; \
......
This diff is collapsed.
......@@ -23,12 +23,15 @@
#include <linux/aer.h>
#include <linux/interrupt.h>
#include "net_driver.h"
#include <net/gre.h>
#include <net/udp_tunnel.h>
#include "efx.h"
#include "nic.h"
#include "selftest.h"
#include "sriov.h"
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "workarounds.h"
/**************************************************************************
......@@ -88,6 +91,21 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
};
/* UDP tunnel type names */
static const char *const efx_udp_tunnel_type_names[] = {
[TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN] = "vxlan",
[TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE] = "geneve",
};
void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen)
{
if (type < ARRAY_SIZE(efx_udp_tunnel_type_names) &&
efx_udp_tunnel_type_names[type] != NULL)
snprintf(buf, buflen, "%s", efx_udp_tunnel_type_names[type]);
else
snprintf(buf, buflen, "type %d", type);
}
/* Reset workqueue. If any NIC has a hardware failure then a reset will be
* queued onto this work queue. This is not a per-nic work queue, because
* efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
......@@ -2336,6 +2354,52 @@ static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vi
return -EOPNOTSUPP;
}
static int efx_udp_tunnel_type_map(enum udp_parsable_tunnel_type in)
{
switch (in) {
case UDP_TUNNEL_TYPE_VXLAN:
return TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN;
case UDP_TUNNEL_TYPE_GENEVE:
return TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE;
default:
return -1;
}
}
static void efx_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
{
struct efx_nic *efx = netdev_priv(dev);
struct efx_udp_tunnel tnl;
int efx_tunnel_type;
efx_tunnel_type = efx_udp_tunnel_type_map(ti->type);
if (efx_tunnel_type < 0)
return;
tnl.type = (u16)efx_tunnel_type;
tnl.port = ti->port;
if (efx->type->udp_tnl_add_port)
(void)efx->type->udp_tnl_add_port(efx, tnl);
}
static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti)
{
struct efx_nic *efx = netdev_priv(dev);
struct efx_udp_tunnel tnl;
int efx_tunnel_type;
efx_tunnel_type = efx_udp_tunnel_type_map(ti->type);
if (efx_tunnel_type < 0)
return;
tnl.type = (u16)efx_tunnel_type;
tnl.port = ti->port;
if (efx->type->udp_tnl_add_port)
(void)efx->type->udp_tnl_del_port(efx, tnl);
}
static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
......@@ -2366,6 +2430,8 @@ static const struct net_device_ops efx_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = efx_filter_rfs,
#endif
.ndo_udp_tunnel_add = efx_udp_tunnel_add,
.ndo_udp_tunnel_del = efx_udp_tunnel_del,
};
static void efx_update_name(struct efx_nic *efx)
......@@ -2605,6 +2671,9 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
efx_start_all(efx);
if (efx->type->udp_tnl_push_ports)
efx->type->udp_tnl_push_ports(efx);
return 0;
fail:
......@@ -3136,6 +3205,51 @@ static int efx_pci_probe_main(struct efx_nic *efx)
return rc;
}
static int efx_pci_probe_post_io(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
int rc = efx_pci_probe_main(efx);
if (rc)
return rc;
if (efx->type->sriov_init) {
rc = efx->type->sriov_init(efx);
if (rc)
netif_err(efx, probe, efx->net_dev,
"SR-IOV can't be enabled rc %d\n", rc);
}
/* Determine netdevice features */
net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_RXCSUM);
if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
net_dev->features |= NETIF_F_TSO6;
/* Check whether device supports TSO */
if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
net_dev->features &= ~NETIF_F_ALL_TSO;
/* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
NETIF_F_RXCSUM);
net_dev->hw_features = net_dev->features & ~efx->fixed_features;
/* Disable VLAN filtering by default. It may be enforced if
* the feature is fixed (i.e. VLAN filters are required to
* receive VLAN tagged packets due to vPort restrictions).
*/
net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
net_dev->features |= efx->fixed_features;
rc = efx_register_netdev(efx);
if (!rc)
return 0;
efx_pci_remove_main(efx);
return rc;
}
/* NIC initialisation
*
* This is called at module load (or hotplug insertion,
......@@ -3178,42 +3292,28 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
if (rc)
goto fail2;
rc = efx_pci_probe_main(efx);
rc = efx_pci_probe_post_io(efx);
if (rc) {
/* On failure, retry once immediately.
* If we aborted probe due to a scheduled reset, dismiss it.
*/
efx->reset_pending = 0;
rc = efx_pci_probe_post_io(efx);
if (rc) {
/* On another failure, retry once more
* after a 50-305ms delay.
*/
unsigned char r;
get_random_bytes(&r, 1);
msleep((unsigned int)r + 50);
efx->reset_pending = 0;
rc = efx_pci_probe_post_io(efx);
}
}
if (rc)
goto fail3;
net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_RXCSUM);
if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
net_dev->features |= NETIF_F_TSO6;
/* Check whether device supports TSO */
if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
net_dev->features &= ~NETIF_F_ALL_TSO;
/* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
NETIF_F_RXCSUM);
net_dev->hw_features = net_dev->features & ~efx->fixed_features;
/* Disable VLAN filtering by default. It may be enforced if
* the feature is fixed (i.e. VLAN filters are required to
* receive VLAN tagged packets due to vPort restrictions).
*/
net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
net_dev->features |= efx->fixed_features;
rc = efx_register_netdev(efx);
if (rc)
goto fail4;
if (efx->type->sriov_init) {
rc = efx->type->sriov_init(efx);
if (rc)
netif_err(efx, probe, efx->net_dev,
"SR-IOV can't be enabled rc %d\n", rc);
}
netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
/* Try to create MTDs, but allow this to fail */
......@@ -3230,10 +3330,11 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
"PCIE error reporting unavailable (%d).\n",
rc);
if (efx->type->udp_tnl_push_ports)
efx->type->udp_tnl_push_ports(efx);
return 0;
fail4:
efx_pci_remove_main(efx);
fail3:
efx_fini_io(efx);
fail2:
......
......@@ -77,6 +77,11 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_ip_hdr_chksum_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_inner_tcp_udp_chksum_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_ip_hdr_chksum_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_outer_tcp_udp_chksum_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_eth_crc_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
......
......@@ -128,7 +128,7 @@ int efx_mcdi_init(struct efx_nic *efx)
return rc;
}
void efx_mcdi_fini(struct efx_nic *efx)
void efx_mcdi_detach(struct efx_nic *efx)
{
if (!efx->mcdi)
return;
......@@ -137,6 +137,12 @@ void efx_mcdi_fini(struct efx_nic *efx)
/* Relinquish the device (back to the BMC, if this is a LOM) */
efx_mcdi_drv_attach(efx, false, NULL);
}
void efx_mcdi_fini(struct efx_nic *efx)
{
if (!efx->mcdi)
return;
#ifdef CONFIG_SFC_MCDI_LOGGING
free_page((unsigned long)efx->mcdi->iface.logging_buffer);
......@@ -716,8 +722,11 @@ static int _efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd,
if (cmd == MC_CMD_REBOOT && rc == -EIO) {
/* Don't reset if MC_CMD_REBOOT returns EIO */
} else if (rc == -EIO || rc == -EINTR) {
netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n",
-rc);
netif_err(efx, hw, efx->net_dev, "MC reboot detected\n");
netif_dbg(efx, hw, efx->net_dev, "MC rebooted during command %d rc %d\n",
cmd, -rc);
if (efx->type->mcdi_reboot_detected)
efx->type->mcdi_reboot_detected(efx);
efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
} else if (proxy_handle && (rc == -EPROTO) &&
efx_mcdi_get_proxy_handle(efx, hdr_len, data_len,
......
......@@ -142,6 +142,7 @@ static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
#endif
int efx_mcdi_init(struct efx_nic *efx);
void efx_mcdi_detach(struct efx_nic *efx);
void efx_mcdi_fini(struct efx_nic *efx);
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const efx_dword_t *inbuf,
......
......@@ -11913,6 +11913,27 @@
#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
/* TUNNEL_ENCAP_UDP_PORT_ENTRY structuredef */
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_LEN 4
/* UDP port (the standard ports are named below but any port may be used) */
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_OFST 0
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LEN 2
/* enum: the IANA allocated UDP port for VXLAN */
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_VXLAN_UDP_PORT 0x12b5
/* enum: the IANA allocated UDP port for Geneve */
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_IANA_GENEVE_UDP_PORT 0x17c1
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_LBN 0
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_UDP_PORT_WIDTH 16
/* tunnel encapsulation protocol (only those named below are supported) */
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_OFST 2
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LEN 2
/* enum: VXLAN */
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN 0x0
/* enum: Geneve */
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE 0x1
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_LBN 16
#define TUNNEL_ENCAP_UDP_PORT_ENTRY_PROTOCOL_WIDTH 16
/***********************************/
/* MC_CMD_RX_BALANCING
......
......@@ -307,6 +307,7 @@ struct efx_rx_buffer {
#define EFX_RX_PKT_DISCARD 0x0004
#define EFX_RX_PKT_TCP 0x0040
#define EFX_RX_PKT_PREFIX_LEN 0x0080 /* length is in prefix only */
#define EFX_RX_PKT_CSUM_LEVEL 0x0200
/**
* struct efx_rx_page_state - Page-based rx buffer state
......@@ -469,13 +470,18 @@ struct efx_channel {
u32 *rps_flow_id;
#endif
unsigned n_rx_tobe_disc;
unsigned n_rx_ip_hdr_chksum_err;
unsigned n_rx_tcp_udp_chksum_err;
unsigned n_rx_mcast_mismatch;
unsigned n_rx_frm_trunc;
unsigned n_rx_overlength;
unsigned n_skbuff_leaks;
unsigned int n_rx_tobe_disc;
unsigned int n_rx_ip_hdr_chksum_err;
unsigned int n_rx_tcp_udp_chksum_err;
unsigned int n_rx_outer_ip_hdr_chksum_err;
unsigned int n_rx_outer_tcp_udp_chksum_err;
unsigned int n_rx_inner_ip_hdr_chksum_err;
unsigned int n_rx_inner_tcp_udp_chksum_err;
unsigned int n_rx_eth_crc_err;
unsigned int n_rx_mcast_mismatch;
unsigned int n_rx_frm_trunc;
unsigned int n_rx_overlength;
unsigned int n_skbuff_leaks;
unsigned int n_rx_nodesc_trunc;
unsigned int n_rx_merge_events;
unsigned int n_rx_merge_packets;
......@@ -548,6 +554,8 @@ extern const unsigned int efx_reset_type_max;
#define RESET_TYPE(type) \
STRING_TABLE_LOOKUP(type, efx_reset_type)
void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen);
enum efx_int_mode {
/* Be careful if altering to correct macro below */
EFX_INT_MODE_MSIX = 0,
......@@ -987,6 +995,15 @@ struct efx_mtd_partition {
char name[IFNAMSIZ + 20];
};
struct efx_udp_tunnel {
u16 type; /* TUNNEL_ENCAP_UDP_PORT_ENTRY_foo, see mcdi_pcol.h */
__be16 port;
/* Count of repeated adds of the same port. Used only inside the list,
* not in request arguments.
*/
u16 count;
};
/**
* struct efx_nic_type - Efx device type definition
* @mem_bar: Get the memory BAR
......@@ -1107,6 +1124,10 @@ struct efx_mtd_partition {
* @set_mac_address: Set the MAC address of the device
* @tso_versions: Returns mask of firmware-assisted TSO versions supported.
* If %NULL, then device does not support any TSO version.
* @udp_tnl_push_ports: Push the list of UDP tunnel ports to the NIC if required.
* @udp_tnl_add_port: Add a UDP tunnel port
* @udp_tnl_has_port: Check if a port has been added as UDP tunnel
* @udp_tnl_del_port: Remove a UDP tunnel port
* @revision: Hardware architecture revision
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
......@@ -1266,6 +1287,10 @@ struct efx_nic_type {
int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr);
int (*set_mac_address)(struct efx_nic *efx);
u32 (*tso_versions)(struct efx_nic *efx);
int (*udp_tnl_push_ports)(struct efx_nic *efx);
int (*udp_tnl_add_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl);
bool (*udp_tnl_has_port)(struct efx_nic *efx, __be16 port);
int (*udp_tnl_del_port)(struct efx_nic *efx, struct efx_udp_tunnel tnl);
int revision;
unsigned int txd_ptr_tbl_base;
......
......@@ -369,6 +369,10 @@ enum {
* @vport_mac: The MAC address on the vport, only for PFs; VFs will be zero
* @vlan_list: List of VLANs added over the interface. Serialised by vlan_lock.
* @vlan_lock: Lock to serialize access to vlan_list.
* @udp_tunnels: UDP tunnel port numbers and types.
* @udp_tunnels_dirty: flag indicating a reboot occurred while pushing
* @udp_tunnels to hardware and thus the push must be re-done.
* @udp_tunnels_lock: Serialises writes to @udp_tunnels and @udp_tunnels_dirty.
*/
struct efx_ef10_nic_data {
struct efx_buffer mcdi_buf;
......@@ -405,6 +409,9 @@ struct efx_ef10_nic_data {
u8 vport_mac[ETH_ALEN];
struct list_head vlan_list;
struct mutex vlan_lock;
struct efx_udp_tunnel udp_tunnels[16];
bool udp_tunnels_dirty;
struct mutex udp_tunnels_lock;
};
int efx_init_sriov(void);
......
......@@ -434,6 +434,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
PKT_HASH_TYPE_L3);
skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
for (;;) {
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
......@@ -621,8 +622,10 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
/* Set the SKB flags */
skb_checksum_none_assert(skb);
if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
}
efx_rx_skb_attach_timestamp(channel, skb);
......
......@@ -326,6 +326,7 @@ static int siena_probe_nic(struct efx_nic *efx)
efx_nic_free_buffer(efx, &efx->irq_status);
fail4:
fail3:
efx_mcdi_detach(efx);
efx_mcdi_fini(efx);
fail1:
kfree(efx->nic_data);
......@@ -450,6 +451,7 @@ static void siena_remove_nic(struct efx_nic *efx)
efx_mcdi_reset(efx, RESET_TYPE_ALL);
efx_mcdi_detach(efx);
efx_mcdi_fini(efx);
/* Tear down the private nic state */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment