Commit 255e8765 authored by David S. Miller's avatar David S. Miller

Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next

Ben Hutchings says:

====================
1. Change the TX path to stop queues earlier and avoid returning
NETDEV_TX_BUSY.
2. Remove some inefficiencies in soft-TSO.
3. Fix various bugs involving device state transitions and/or reset
scheduling by error handlers.
4. Take advantage of my previous change to operstate initialisation.
5. Miscellaneous cleanup.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 85c21049 8f8b3d51
...@@ -202,11 +202,21 @@ static void efx_stop_all(struct efx_nic *efx); ...@@ -202,11 +202,21 @@ static void efx_stop_all(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \ #define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \ do { \
if ((efx->state == STATE_RUNNING) || \ if ((efx->state == STATE_READY) || \
(efx->state == STATE_DISABLED)) \ (efx->state == STATE_DISABLED)) \
ASSERT_RTNL(); \ ASSERT_RTNL(); \
} while (0) } while (0)
static int efx_check_disabled(struct efx_nic *efx)
{
if (efx->state == STATE_DISABLED) {
netif_err(efx, drv, efx->net_dev,
"device is disabled due to earlier errors\n");
return -EIO;
}
return 0;
}
/************************************************************************** /**************************************************************************
* *
* Event queue processing * Event queue processing
...@@ -630,6 +640,16 @@ static void efx_start_datapath(struct efx_nic *efx) ...@@ -630,6 +640,16 @@ static void efx_start_datapath(struct efx_nic *efx)
efx->rx_buffer_order = get_order(efx->rx_buffer_len + efx->rx_buffer_order = get_order(efx->rx_buffer_len +
sizeof(struct efx_rx_page_state)); sizeof(struct efx_rx_page_state));
/* We must keep at least one descriptor in a TX ring empty.
* We could avoid this when the queue size does not exactly
* match the hardware ring size, but it's not that important.
* Therefore we stop the queue when one more skb might fill
* the ring completely. We wake it when half way back to
* empty.
*/
efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
/* Initialise the channels */ /* Initialise the channels */
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel) efx_for_each_channel_tx_queue(tx_queue, channel)
...@@ -730,7 +750,11 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) ...@@ -730,7 +750,11 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
u32 old_rxq_entries, old_txq_entries; u32 old_rxq_entries, old_txq_entries;
unsigned i, next_buffer_table = 0; unsigned i, next_buffer_table = 0;
int rc = 0; int rc;
rc = efx_check_disabled(efx);
if (rc)
return rc;
/* Not all channels should be reallocated. We must avoid /* Not all channels should be reallocated. We must avoid
* reallocating their buffer table entries. * reallocating their buffer table entries.
...@@ -1365,6 +1389,8 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq) ...@@ -1365,6 +1389,8 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
{ {
struct efx_channel *channel; struct efx_channel *channel;
BUG_ON(efx->state == STATE_DISABLED);
if (efx->legacy_irq) if (efx->legacy_irq)
efx->legacy_irq_enabled = true; efx->legacy_irq_enabled = true;
efx_nic_enable_interrupts(efx); efx_nic_enable_interrupts(efx);
...@@ -1382,6 +1408,9 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq) ...@@ -1382,6 +1408,9 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
{ {
struct efx_channel *channel; struct efx_channel *channel;
if (efx->state == STATE_DISABLED)
return;
efx_mcdi_mode_poll(efx); efx_mcdi_mode_poll(efx);
efx_nic_disable_interrupts(efx); efx_nic_disable_interrupts(efx);
...@@ -1533,22 +1562,21 @@ static int efx_probe_all(struct efx_nic *efx) ...@@ -1533,22 +1562,21 @@ static int efx_probe_all(struct efx_nic *efx)
return rc; return rc;
} }
/* Called after previous invocation(s) of efx_stop_all, restarts the port, /* If the interface is supposed to be running but is not, start
* kernel transmit queues and NAPI processing, and ensures that the port is * the hardware and software data path, regular activity for the port
* scheduled to be reconfigured. This function is safe to call multiple * (MAC statistics, link polling, etc.) and schedule the port to be
* times when the NIC is in any state. * reconfigured. Interrupts must already be enabled. This function
* is safe to call multiple times, so long as the NIC is not disabled.
* Requires the RTNL lock.
*/ */
static void efx_start_all(struct efx_nic *efx) static void efx_start_all(struct efx_nic *efx)
{ {
EFX_ASSERT_RESET_SERIALISED(efx); EFX_ASSERT_RESET_SERIALISED(efx);
BUG_ON(efx->state == STATE_DISABLED);
/* Check that it is appropriate to restart the interface. All /* Check that it is appropriate to restart the interface. All
* of these flags are safe to read under just the rtnl lock */ * of these flags are safe to read under just the rtnl lock */
if (efx->port_enabled) if (efx->port_enabled || !netif_running(efx->net_dev))
return;
if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
return;
if (!netif_running(efx->net_dev))
return; return;
efx_start_port(efx); efx_start_port(efx);
...@@ -1582,11 +1610,11 @@ static void efx_flush_all(struct efx_nic *efx) ...@@ -1582,11 +1610,11 @@ static void efx_flush_all(struct efx_nic *efx)
cancel_work_sync(&efx->mac_work); cancel_work_sync(&efx->mac_work);
} }
/* Quiesce hardware and software without bringing the link down. /* Quiesce the hardware and software data path, and regular activity
* Safe to call multiple times, when the nic and interface is in any * for the port without bringing the link down. Safe to call multiple
* state. The caller is guaranteed to subsequently be in a position * times with the NIC in almost any state, but interrupts should be
* to modify any hardware and software state they see fit without * enabled. Requires the RTNL lock.
* taking locks. */ */
static void efx_stop_all(struct efx_nic *efx) static void efx_stop_all(struct efx_nic *efx)
{ {
EFX_ASSERT_RESET_SERIALISED(efx); EFX_ASSERT_RESET_SERIALISED(efx);
...@@ -1739,8 +1767,6 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) ...@@ -1739,8 +1767,6 @@ static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
struct mii_ioctl_data *data = if_mii(ifr); struct mii_ioctl_data *data = if_mii(ifr);
EFX_ASSERT_RESET_SERIALISED(efx);
/* Convert phy_id from older PRTAD/DEVAD format */ /* Convert phy_id from older PRTAD/DEVAD format */
if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
(data->phy_id & 0xfc00) == 0x0400) (data->phy_id & 0xfc00) == 0x0400)
...@@ -1820,13 +1846,14 @@ static void efx_netpoll(struct net_device *net_dev) ...@@ -1820,13 +1846,14 @@ static void efx_netpoll(struct net_device *net_dev)
static int efx_net_open(struct net_device *net_dev) static int efx_net_open(struct net_device *net_dev)
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
EFX_ASSERT_RESET_SERIALISED(efx); int rc;
netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
raw_smp_processor_id()); raw_smp_processor_id());
if (efx->state == STATE_DISABLED) rc = efx_check_disabled(efx);
return -EIO; if (rc)
return rc;
if (efx->phy_mode & PHY_MODE_SPECIAL) if (efx->phy_mode & PHY_MODE_SPECIAL)
return -EBUSY; return -EBUSY;
if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
...@@ -1852,10 +1879,8 @@ static int efx_net_stop(struct net_device *net_dev) ...@@ -1852,10 +1879,8 @@ static int efx_net_stop(struct net_device *net_dev)
netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
raw_smp_processor_id()); raw_smp_processor_id());
if (efx->state != STATE_DISABLED) { /* Stop the device and flush all the channels */
/* Stop the device and flush all the channels */ efx_stop_all(efx);
efx_stop_all(efx);
}
return 0; return 0;
} }
...@@ -1915,9 +1940,11 @@ static void efx_watchdog(struct net_device *net_dev) ...@@ -1915,9 +1940,11 @@ static void efx_watchdog(struct net_device *net_dev)
static int efx_change_mtu(struct net_device *net_dev, int new_mtu) static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
int rc;
EFX_ASSERT_RESET_SERIALISED(efx); rc = efx_check_disabled(efx);
if (rc)
return rc;
if (new_mtu > EFX_MAX_MTU) if (new_mtu > EFX_MAX_MTU)
return -EINVAL; return -EINVAL;
...@@ -1926,8 +1953,6 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu) ...@@ -1926,8 +1953,6 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
/* Reconfigure the MAC before enabling the dma queues so that
* the RX buffers don't overflow */
net_dev->mtu = new_mtu; net_dev->mtu = new_mtu;
efx->type->reconfigure_mac(efx); efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
...@@ -1942,8 +1967,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data) ...@@ -1942,8 +1967,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
struct sockaddr *addr = data; struct sockaddr *addr = data;
char *new_addr = addr->sa_data; char *new_addr = addr->sa_data;
EFX_ASSERT_RESET_SERIALISED(efx);
if (!is_valid_ether_addr(new_addr)) { if (!is_valid_ether_addr(new_addr)) {
netif_err(efx, drv, efx->net_dev, netif_err(efx, drv, efx->net_dev,
"invalid ethernet MAC address requested: %pM\n", "invalid ethernet MAC address requested: %pM\n",
...@@ -2079,11 +2102,27 @@ static int efx_register_netdev(struct efx_nic *efx) ...@@ -2079,11 +2102,27 @@ static int efx_register_netdev(struct efx_nic *efx)
rtnl_lock(); rtnl_lock();
/* Enable resets to be scheduled and check whether any were
* already requested. If so, the NIC is probably hosed so we
* abort.
*/
efx->state = STATE_READY;
smp_mb(); /* ensure we change state before checking reset_pending */
if (efx->reset_pending) {
netif_err(efx, probe, efx->net_dev,
"aborting probe due to scheduled reset\n");
rc = -EIO;
goto fail_locked;
}
rc = dev_alloc_name(net_dev, net_dev->name); rc = dev_alloc_name(net_dev, net_dev->name);
if (rc < 0) if (rc < 0)
goto fail_locked; goto fail_locked;
efx_update_name(efx); efx_update_name(efx);
/* Always start with carrier off; PHY events will detect the link */
netif_carrier_off(net_dev);
rc = register_netdevice(net_dev); rc = register_netdevice(net_dev);
if (rc) if (rc)
goto fail_locked; goto fail_locked;
...@@ -2094,9 +2133,6 @@ static int efx_register_netdev(struct efx_nic *efx) ...@@ -2094,9 +2133,6 @@ static int efx_register_netdev(struct efx_nic *efx)
efx_init_tx_queue_core_txq(tx_queue); efx_init_tx_queue_core_txq(tx_queue);
} }
/* Always start with carrier off; PHY events will detect the link */
netif_carrier_off(net_dev);
rtnl_unlock(); rtnl_unlock();
rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
...@@ -2108,14 +2144,14 @@ static int efx_register_netdev(struct efx_nic *efx) ...@@ -2108,14 +2144,14 @@ static int efx_register_netdev(struct efx_nic *efx)
return 0; return 0;
fail_registered:
rtnl_lock();
unregister_netdevice(net_dev);
fail_locked: fail_locked:
efx->state = STATE_UNINIT;
rtnl_unlock(); rtnl_unlock();
netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
return rc; return rc;
fail_registered:
unregister_netdev(net_dev);
return rc;
} }
static void efx_unregister_netdev(struct efx_nic *efx) static void efx_unregister_netdev(struct efx_nic *efx)
...@@ -2138,7 +2174,11 @@ static void efx_unregister_netdev(struct efx_nic *efx) ...@@ -2138,7 +2174,11 @@ static void efx_unregister_netdev(struct efx_nic *efx)
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
unregister_netdev(efx->net_dev);
rtnl_lock();
unregister_netdevice(efx->net_dev);
efx->state = STATE_UNINIT;
rtnl_unlock();
} }
/************************************************************************** /**************************************************************************
...@@ -2154,9 +2194,9 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method) ...@@ -2154,9 +2194,9 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
EFX_ASSERT_RESET_SERIALISED(efx); EFX_ASSERT_RESET_SERIALISED(efx);
efx_stop_all(efx); efx_stop_all(efx);
mutex_lock(&efx->mac_lock);
efx_stop_interrupts(efx, false); efx_stop_interrupts(efx, false);
mutex_lock(&efx->mac_lock);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
efx->phy_op->fini(efx); efx->phy_op->fini(efx);
efx->type->fini(efx); efx->type->fini(efx);
...@@ -2276,16 +2316,15 @@ static void efx_reset_work(struct work_struct *data) ...@@ -2276,16 +2316,15 @@ static void efx_reset_work(struct work_struct *data)
if (!pending) if (!pending)
return; return;
/* If we're not RUNNING then don't reset. Leave the reset_pending
* flags set so that efx_pci_probe_main will be retried */
if (efx->state != STATE_RUNNING) {
netif_info(efx, drv, efx->net_dev,
"scheduled reset quenched. NIC not RUNNING\n");
return;
}
rtnl_lock(); rtnl_lock();
(void)efx_reset(efx, fls(pending) - 1);
/* We checked the state in efx_schedule_reset() but it may
* have changed by now. Now that we have the RTNL lock,
* it cannot change again.
*/
if (efx->state == STATE_READY)
(void)efx_reset(efx, fls(pending) - 1);
rtnl_unlock(); rtnl_unlock();
} }
...@@ -2311,6 +2350,13 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) ...@@ -2311,6 +2350,13 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
} }
set_bit(method, &efx->reset_pending); set_bit(method, &efx->reset_pending);
smp_mb(); /* ensure we change reset_pending before checking state */
/* If we're not READY then just leave the flags set as the cue
* to abort probing or reschedule the reset later.
*/
if (ACCESS_ONCE(efx->state) != STATE_READY)
return;
/* efx_process_channel() will no longer read events once a /* efx_process_channel() will no longer read events once a
* reset is scheduled. So switch back to poll'd MCDI completions. */ * reset is scheduled. So switch back to poll'd MCDI completions. */
...@@ -2376,13 +2422,12 @@ static const struct efx_phy_operations efx_dummy_phy_operations = { ...@@ -2376,13 +2422,12 @@ static const struct efx_phy_operations efx_dummy_phy_operations = {
/* This zeroes out and then fills in the invariants in a struct /* This zeroes out and then fills in the invariants in a struct
* efx_nic (including all sub-structures). * efx_nic (including all sub-structures).
*/ */
static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type, static int efx_init_struct(struct efx_nic *efx,
struct pci_dev *pci_dev, struct net_device *net_dev) struct pci_dev *pci_dev, struct net_device *net_dev)
{ {
int i; int i;
/* Initialise common structures */ /* Initialise common structures */
memset(efx, 0, sizeof(*efx));
spin_lock_init(&efx->biu_lock); spin_lock_init(&efx->biu_lock);
#ifdef CONFIG_SFC_MTD #ifdef CONFIG_SFC_MTD
INIT_LIST_HEAD(&efx->mtd_list); INIT_LIST_HEAD(&efx->mtd_list);
...@@ -2392,7 +2437,7 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type, ...@@ -2392,7 +2437,7 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work); INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
efx->pci_dev = pci_dev; efx->pci_dev = pci_dev;
efx->msg_enable = debug; efx->msg_enable = debug;
efx->state = STATE_INIT; efx->state = STATE_UNINIT;
strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->net_dev = net_dev; efx->net_dev = net_dev;
...@@ -2409,8 +2454,6 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type, ...@@ -2409,8 +2454,6 @@ static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
goto fail; goto fail;
} }
efx->type = type;
EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
/* Higher numbered interrupt modes are less capable! */ /* Higher numbered interrupt modes are less capable! */
...@@ -2455,6 +2498,12 @@ static void efx_fini_struct(struct efx_nic *efx) ...@@ -2455,6 +2498,12 @@ static void efx_fini_struct(struct efx_nic *efx)
*/ */
static void efx_pci_remove_main(struct efx_nic *efx) static void efx_pci_remove_main(struct efx_nic *efx)
{ {
/* Flush reset_work. It can no longer be scheduled since we
* are not READY.
*/
BUG_ON(efx->state == STATE_READY);
cancel_work_sync(&efx->reset_work);
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
efx->net_dev->rx_cpu_rmap = NULL; efx->net_dev->rx_cpu_rmap = NULL;
...@@ -2480,24 +2529,15 @@ static void efx_pci_remove(struct pci_dev *pci_dev) ...@@ -2480,24 +2529,15 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
/* Mark the NIC as fini, then stop the interface */ /* Mark the NIC as fini, then stop the interface */
rtnl_lock(); rtnl_lock();
efx->state = STATE_FINI;
dev_close(efx->net_dev); dev_close(efx->net_dev);
efx_stop_interrupts(efx, false);
/* Allow any queued efx_resets() to complete */
rtnl_unlock(); rtnl_unlock();
efx_stop_interrupts(efx, false);
efx_sriov_fini(efx); efx_sriov_fini(efx);
efx_unregister_netdev(efx); efx_unregister_netdev(efx);
efx_mtd_remove(efx); efx_mtd_remove(efx);
/* Wait for any scheduled resets to complete. No more will be
* scheduled from this point because efx_stop_all() has been
* called, we are no longer registered with driverlink, and
* the net_device's have been removed. */
cancel_work_sync(&efx->reset_work);
efx_pci_remove_main(efx); efx_pci_remove_main(efx);
efx_fini_io(efx); efx_fini_io(efx);
...@@ -2617,7 +2657,6 @@ static int efx_pci_probe_main(struct efx_nic *efx) ...@@ -2617,7 +2657,6 @@ static int efx_pci_probe_main(struct efx_nic *efx)
static int __devinit efx_pci_probe(struct pci_dev *pci_dev, static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *entry) const struct pci_device_id *entry)
{ {
const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
struct net_device *net_dev; struct net_device *net_dev;
struct efx_nic *efx; struct efx_nic *efx;
int rc; int rc;
...@@ -2627,10 +2666,12 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, ...@@ -2627,10 +2666,12 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
EFX_MAX_RX_QUEUES); EFX_MAX_RX_QUEUES);
if (!net_dev) if (!net_dev)
return -ENOMEM; return -ENOMEM;
net_dev->features |= (type->offload_features | NETIF_F_SG | efx = netdev_priv(net_dev);
efx->type = (const struct efx_nic_type *) entry->driver_data;
net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_TSO | NETIF_F_HIGHDMA | NETIF_F_TSO |
NETIF_F_RXCSUM); NETIF_F_RXCSUM);
if (type->offload_features & NETIF_F_V6_CSUM) if (efx->type->offload_features & NETIF_F_V6_CSUM)
net_dev->features |= NETIF_F_TSO6; net_dev->features |= NETIF_F_TSO6;
/* Mask for features that also apply to VLAN devices */ /* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
...@@ -2638,10 +2679,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, ...@@ -2638,10 +2679,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
NETIF_F_RXCSUM); NETIF_F_RXCSUM);
/* All offloads can be toggled */ /* All offloads can be toggled */
net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA; net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
efx = netdev_priv(net_dev);
pci_set_drvdata(pci_dev, efx); pci_set_drvdata(pci_dev, efx);
SET_NETDEV_DEV(net_dev, &pci_dev->dev); SET_NETDEV_DEV(net_dev, &pci_dev->dev);
rc = efx_init_struct(efx, type, pci_dev, net_dev); rc = efx_init_struct(efx, pci_dev, net_dev);
if (rc) if (rc)
goto fail1; goto fail1;
...@@ -2656,28 +2696,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, ...@@ -2656,28 +2696,9 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
goto fail2; goto fail2;
rc = efx_pci_probe_main(efx); rc = efx_pci_probe_main(efx);
/* Serialise against efx_reset(). No more resets will be
* scheduled since efx_stop_all() has been called, and we have
* not and never have been registered.
*/
cancel_work_sync(&efx->reset_work);
if (rc) if (rc)
goto fail3; goto fail3;
/* If there was a scheduled reset during probe, the NIC is
* probably hosed anyway.
*/
if (efx->reset_pending) {
rc = -EIO;
goto fail4;
}
/* Switch to the running state before we expose the device to the OS,
* so that dev_open()|efx_start_all() will actually start the device */
efx->state = STATE_RUNNING;
rc = efx_register_netdev(efx); rc = efx_register_netdev(efx);
if (rc) if (rc)
goto fail4; goto fail4;
...@@ -2717,12 +2738,18 @@ static int efx_pm_freeze(struct device *dev) ...@@ -2717,12 +2738,18 @@ static int efx_pm_freeze(struct device *dev)
{ {
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
efx->state = STATE_FINI; rtnl_lock();
netif_device_detach(efx->net_dev); if (efx->state != STATE_DISABLED) {
efx->state = STATE_UNINIT;
efx_stop_all(efx); netif_device_detach(efx->net_dev);
efx_stop_interrupts(efx, false);
efx_stop_all(efx);
efx_stop_interrupts(efx, false);
}
rtnl_unlock();
return 0; return 0;
} }
...@@ -2731,21 +2758,25 @@ static int efx_pm_thaw(struct device *dev) ...@@ -2731,21 +2758,25 @@ static int efx_pm_thaw(struct device *dev)
{ {
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
efx->state = STATE_INIT; rtnl_lock();
efx_start_interrupts(efx, false); if (efx->state != STATE_DISABLED) {
efx_start_interrupts(efx, false);
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
efx->phy_op->reconfigure(efx); efx->phy_op->reconfigure(efx);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
efx_start_all(efx); efx_start_all(efx);
netif_device_attach(efx->net_dev); netif_device_attach(efx->net_dev);
efx->state = STATE_RUNNING; efx->state = STATE_READY;
efx->type->resume_wol(efx); efx->type->resume_wol(efx);
}
rtnl_unlock();
/* Reschedule any quenched resets scheduled during efx_pm_freeze() */ /* Reschedule any quenched resets scheduled during efx_pm_freeze() */
queue_work(reset_workqueue, &efx->reset_work); queue_work(reset_workqueue, &efx->reset_work);
......
...@@ -529,9 +529,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev, ...@@ -529,9 +529,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
if (!efx_tests) if (!efx_tests)
goto fail; goto fail;
if (efx->state != STATE_READY) {
ASSERT_RTNL();
if (efx->state != STATE_RUNNING) {
rc = -EIO; rc = -EIO;
goto fail1; goto fail1;
} }
......
...@@ -380,7 +380,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev, ...@@ -380,7 +380,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
new_mode = PHY_MODE_SPECIAL; new_mode = PHY_MODE_SPECIAL;
if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) { if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
err = 0; err = 0;
} else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) { } else if (efx->state != STATE_READY || netif_running(efx->net_dev)) {
err = -EBUSY; err = -EBUSY;
} else { } else {
/* Reset the PHY, reconfigure the MAC and enable/disable /* Reset the PHY, reconfigure the MAC and enable/disable
......
...@@ -91,29 +91,31 @@ struct efx_special_buffer { ...@@ -91,29 +91,31 @@ struct efx_special_buffer {
}; };
/** /**
* struct efx_tx_buffer - An Efx TX buffer * struct efx_tx_buffer - buffer state for a TX descriptor
* @skb: The associated socket buffer. * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
* Set only on the final fragment of a packet; %NULL for all other * freed when descriptor completes
* fragments. When this fragment completes, then we can free this * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
* skb. * freed when descriptor completes.
* @tsoh: The associated TSO header structure, or %NULL if this
* buffer is not a TSO header.
* @dma_addr: DMA address of the fragment. * @dma_addr: DMA address of the fragment.
* @flags: Flags for allocation and DMA mapping type
* @len: Length of this fragment. * @len: Length of this fragment.
* This field is zero when the queue slot is empty. * This field is zero when the queue slot is empty.
* @continuation: True if this fragment is not the end of a packet.
* @unmap_single: True if dma_unmap_single should be used.
* @unmap_len: Length of this fragment to unmap * @unmap_len: Length of this fragment to unmap
*/ */
struct efx_tx_buffer { struct efx_tx_buffer {
const struct sk_buff *skb; union {
struct efx_tso_header *tsoh; const struct sk_buff *skb;
void *heap_buf;
};
dma_addr_t dma_addr; dma_addr_t dma_addr;
unsigned short flags;
unsigned short len; unsigned short len;
bool continuation;
bool unmap_single;
unsigned short unmap_len; unsigned short unmap_len;
}; };
#define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */
#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */
#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
/** /**
* struct efx_tx_queue - An Efx TX queue * struct efx_tx_queue - An Efx TX queue
...@@ -133,6 +135,7 @@ struct efx_tx_buffer { ...@@ -133,6 +135,7 @@ struct efx_tx_buffer {
* @channel: The associated channel * @channel: The associated channel
* @core_txq: The networking core TX queue structure * @core_txq: The networking core TX queue structure
* @buffer: The software buffer ring * @buffer: The software buffer ring
* @tsoh_page: Array of pages of TSO header buffers
* @txd: The hardware descriptor ring * @txd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1. * @ptr_mask: The size of the ring minus 1.
* @initialised: Has hardware queue been initialised? * @initialised: Has hardware queue been initialised?
...@@ -156,9 +159,6 @@ struct efx_tx_buffer { ...@@ -156,9 +159,6 @@ struct efx_tx_buffer {
* variable indicates that the queue is full. This is to * variable indicates that the queue is full. This is to
* avoid cache-line ping-pong between the xmit path and the * avoid cache-line ping-pong between the xmit path and the
* completion path. * completion path.
* @tso_headers_free: A list of TSO headers allocated for this TX queue
* that are not in use, and so available for new TSO sends. The list
* is protected by the TX queue lock.
* @tso_bursts: Number of times TSO xmit invoked by kernel * @tso_bursts: Number of times TSO xmit invoked by kernel
* @tso_long_headers: Number of packets with headers too long for standard * @tso_long_headers: Number of packets with headers too long for standard
* blocks * blocks
...@@ -175,6 +175,7 @@ struct efx_tx_queue { ...@@ -175,6 +175,7 @@ struct efx_tx_queue {
struct efx_channel *channel; struct efx_channel *channel;
struct netdev_queue *core_txq; struct netdev_queue *core_txq;
struct efx_tx_buffer *buffer; struct efx_tx_buffer *buffer;
struct efx_buffer *tsoh_page;
struct efx_special_buffer txd; struct efx_special_buffer txd;
unsigned int ptr_mask; unsigned int ptr_mask;
bool initialised; bool initialised;
...@@ -187,7 +188,6 @@ struct efx_tx_queue { ...@@ -187,7 +188,6 @@ struct efx_tx_queue {
unsigned int insert_count ____cacheline_aligned_in_smp; unsigned int insert_count ____cacheline_aligned_in_smp;
unsigned int write_count; unsigned int write_count;
unsigned int old_read_count; unsigned int old_read_count;
struct efx_tso_header *tso_headers_free;
unsigned int tso_bursts; unsigned int tso_bursts;
unsigned int tso_long_headers; unsigned int tso_long_headers;
unsigned int tso_packets; unsigned int tso_packets;
...@@ -430,11 +430,9 @@ enum efx_int_mode { ...@@ -430,11 +430,9 @@ enum efx_int_mode {
#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
enum nic_state { enum nic_state {
STATE_INIT = 0, STATE_UNINIT = 0, /* device being probed/removed or is frozen */
STATE_RUNNING = 1, STATE_READY = 1, /* hardware ready and netdev registered */
STATE_FINI = 2, STATE_DISABLED = 2, /* device disabled due to hardware errors */
STATE_DISABLED = 3,
STATE_MAX,
}; };
/* /*
...@@ -654,7 +652,7 @@ struct vfdi_status; ...@@ -654,7 +652,7 @@ struct vfdi_status;
* @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
* @irq_rx_moderation: IRQ moderation time for RX event queues * @irq_rx_moderation: IRQ moderation time for RX event queues
* @msg_enable: Log message enable flags * @msg_enable: Log message enable flags
* @state: Device state flag. Serialised by the rtnl_lock. * @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
* @reset_pending: Bitmask for pending resets * @reset_pending: Bitmask for pending resets
* @tx_queue: TX DMA queues * @tx_queue: TX DMA queues
* @rx_queue: RX DMA queues * @rx_queue: RX DMA queues
...@@ -664,6 +662,8 @@ struct vfdi_status; ...@@ -664,6 +662,8 @@ struct vfdi_status;
* should be allocated for this NIC * should be allocated for this NIC
* @rxq_entries: Size of receive queues requested by user. * @rxq_entries: Size of receive queues requested by user.
* @txq_entries: Size of transmit queues requested by user. * @txq_entries: Size of transmit queues requested by user.
* @txq_stop_thresh: TX queue fill level at or above which we stop it.
* @txq_wake_thresh: TX queue fill level at or below which we wake it.
* @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
* @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
* @sram_lim_qw: Qword address limit of SRAM * @sram_lim_qw: Qword address limit of SRAM
...@@ -774,6 +774,9 @@ struct efx_nic { ...@@ -774,6 +774,9 @@ struct efx_nic {
unsigned rxq_entries; unsigned rxq_entries;
unsigned txq_entries; unsigned txq_entries;
unsigned int txq_stop_thresh;
unsigned int txq_wake_thresh;
unsigned tx_dc_base; unsigned tx_dc_base;
unsigned rx_dc_base; unsigned rx_dc_base;
unsigned sram_lim_qw; unsigned sram_lim_qw;
......
...@@ -298,7 +298,7 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) ...@@ -298,7 +298,7 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
/************************************************************************** /**************************************************************************
* *
* Generic buffer handling * Generic buffer handling
* These buffers are used for interrupt status and MAC stats * These buffers are used for interrupt status, MAC stats, etc.
* *
**************************************************************************/ **************************************************************************/
...@@ -401,8 +401,10 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) ...@@ -401,8 +401,10 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
++tx_queue->write_count; ++tx_queue->write_count;
/* Create TX descriptor ring entry */ /* Create TX descriptor ring entry */
BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
EFX_POPULATE_QWORD_4(*txd, EFX_POPULATE_QWORD_4(*txd,
FSF_AZ_TX_KER_CONT, buffer->continuation, FSF_AZ_TX_KER_CONT,
buffer->flags & EFX_TX_BUF_CONT,
FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
FSF_AZ_TX_KER_BUF_REGION, 0, FSF_AZ_TX_KER_BUF_REGION, 0,
FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
......
...@@ -22,14 +22,6 @@ ...@@ -22,14 +22,6 @@
#include "nic.h" #include "nic.h"
#include "workarounds.h" #include "workarounds.h"
/*
* TX descriptor ring full threshold
*
* The tx_queue descriptor ring fill-level must fall below this value
* before we restart the netif queue
*/
#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer, struct efx_tx_buffer *buffer,
unsigned int *pkts_compl, unsigned int *pkts_compl,
...@@ -39,67 +31,32 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, ...@@ -39,67 +31,32 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
struct device *dma_dev = &tx_queue->efx->pci_dev->dev; struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len - dma_addr_t unmap_addr = (buffer->dma_addr + buffer->len -
buffer->unmap_len); buffer->unmap_len);
if (buffer->unmap_single) if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
else else
dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
buffer->unmap_len = 0; buffer->unmap_len = 0;
buffer->unmap_single = false;
} }
if (buffer->skb) { if (buffer->flags & EFX_TX_BUF_SKB) {
(*pkts_compl)++; (*pkts_compl)++;
(*bytes_compl) += buffer->skb->len; (*bytes_compl) += buffer->skb->len;
dev_kfree_skb_any((struct sk_buff *) buffer->skb); dev_kfree_skb_any((struct sk_buff *) buffer->skb);
buffer->skb = NULL;
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
"TX queue %d transmission id %x complete\n", "TX queue %d transmission id %x complete\n",
tx_queue->queue, tx_queue->read_count); tx_queue->queue, tx_queue->read_count);
} else if (buffer->flags & EFX_TX_BUF_HEAP) {
kfree(buffer->heap_buf);
} }
}
/** buffer->len = 0;
* struct efx_tso_header - a DMA mapped buffer for packet headers buffer->flags = 0;
* @next: Linked list of free ones. }
* The list is protected by the TX queue lock.
* @dma_unmap_len: Length to unmap for an oversize buffer, or 0.
* @dma_addr: The DMA address of the header below.
*
* This controls the memory used for a TSO header. Use TSOH_DATA()
* to find the packet header data. Use TSOH_SIZE() to calculate the
* total size required for a given packet header length. TSO headers
* in the free list are exactly %TSOH_STD_SIZE bytes in size.
*/
struct efx_tso_header {
union {
struct efx_tso_header *next;
size_t unmap_len;
};
dma_addr_t dma_addr;
};
static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
struct sk_buff *skb); struct sk_buff *skb);
static void efx_fini_tso(struct efx_tx_queue *tx_queue);
static void efx_tsoh_heap_free(struct efx_tx_queue *tx_queue,
struct efx_tso_header *tsoh);
static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer)
{
if (buffer->tsoh) {
if (likely(!buffer->tsoh->unmap_len)) {
buffer->tsoh->next = tx_queue->tso_headers_free;
tx_queue->tso_headers_free = buffer->tsoh;
} else {
efx_tsoh_heap_free(tx_queue, buffer->tsoh);
}
buffer->tsoh = NULL;
}
}
static inline unsigned static inline unsigned
efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
...@@ -138,6 +95,56 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) ...@@ -138,6 +95,56 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
return max_descs; return max_descs;
} }
/* Get partner of a TX queue, seen as part of the same net core queue */
static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
{
if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
else
return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
}
static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
{
/* We need to consider both queues that the net core sees as one */
struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
struct efx_nic *efx = txq1->efx;
unsigned int fill_level;
fill_level = max(txq1->insert_count - txq1->old_read_count,
txq2->insert_count - txq2->old_read_count);
if (likely(fill_level < efx->txq_stop_thresh))
return;
/* We used the stale old_read_count above, which gives us a
* pessimistic estimate of the fill level (which may even
* validly be >= efx->txq_entries). Now try again using
* read_count (more likely to be a cache miss).
*
* If we read read_count and then conditionally stop the
* queue, it is possible for the completion path to race with
* us and complete all outstanding descriptors in the middle,
* after which there will be no more completions to wake it.
* Therefore we stop the queue first, then read read_count
* (with a memory barrier to ensure the ordering), then
* restart the queue if the fill level turns out to be low
* enough.
*/
netif_tx_stop_queue(txq1->core_txq);
smp_mb();
txq1->old_read_count = ACCESS_ONCE(txq1->read_count);
txq2->old_read_count = ACCESS_ONCE(txq2->read_count);
fill_level = max(txq1->insert_count - txq1->old_read_count,
txq2->insert_count - txq2->old_read_count);
EFX_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
if (likely(fill_level < efx->txq_stop_thresh)) {
smp_mb();
if (likely(!efx->loopback_selftest))
netif_tx_start_queue(txq1->core_txq);
}
}
/* /*
* Add a socket buffer to a TX queue * Add a socket buffer to a TX queue
* *
...@@ -151,7 +158,7 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) ...@@ -151,7 +158,7 @@ unsigned int efx_tx_max_skb_descs(struct efx_nic *efx)
* This function is split out from efx_hard_start_xmit to allow the * This function is split out from efx_hard_start_xmit to allow the
* loopback test to direct packets via specific TX queues. * loopback test to direct packets via specific TX queues.
* *
* Returns NETDEV_TX_OK or NETDEV_TX_BUSY * Returns NETDEV_TX_OK.
* You must hold netif_tx_lock() to call this function. * You must hold netif_tx_lock() to call this function.
*/ */
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
...@@ -160,12 +167,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) ...@@ -160,12 +167,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
struct device *dma_dev = &efx->pci_dev->dev; struct device *dma_dev = &efx->pci_dev->dev;
struct efx_tx_buffer *buffer; struct efx_tx_buffer *buffer;
skb_frag_t *fragment; skb_frag_t *fragment;
unsigned int len, unmap_len = 0, fill_level, insert_ptr; unsigned int len, unmap_len = 0, insert_ptr;
dma_addr_t dma_addr, unmap_addr = 0; dma_addr_t dma_addr, unmap_addr = 0;
unsigned int dma_len; unsigned int dma_len;
bool unmap_single; unsigned short dma_flags;
int q_space, i = 0; int i = 0;
netdev_tx_t rc = NETDEV_TX_OK;
EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
...@@ -183,14 +189,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) ...@@ -183,14 +189,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
fill_level = tx_queue->insert_count - tx_queue->old_read_count;
q_space = efx->txq_entries - 1 - fill_level;
/* Map for DMA. Use dma_map_single rather than dma_map_page /* Map for DMA. Use dma_map_single rather than dma_map_page
* since this is more efficient on machines with sparse * since this is more efficient on machines with sparse
* memory. * memory.
*/ */
unmap_single = true; dma_flags = EFX_TX_BUF_MAP_SINGLE;
dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE); dma_addr = dma_map_single(dma_dev, skb->data, len, PCI_DMA_TODEVICE);
/* Process all fragments */ /* Process all fragments */
...@@ -205,39 +208,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) ...@@ -205,39 +208,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Add to TX queue, splitting across DMA boundaries */ /* Add to TX queue, splitting across DMA boundaries */
do { do {
if (unlikely(q_space-- <= 0)) {
/* It might be that completions have
* happened since the xmit path last
* checked. Update the xmit path's
* copy of read_count.
*/
netif_tx_stop_queue(tx_queue->core_txq);
/* This memory barrier protects the
* change of queue state from the access
* of read_count. */
smp_mb();
tx_queue->old_read_count =
ACCESS_ONCE(tx_queue->read_count);
fill_level = (tx_queue->insert_count
- tx_queue->old_read_count);
q_space = efx->txq_entries - 1 - fill_level;
if (unlikely(q_space-- <= 0)) {
rc = NETDEV_TX_BUSY;
goto unwind;
}
smp_mb();
if (likely(!efx->loopback_selftest))
netif_tx_start_queue(
tx_queue->core_txq);
}
insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[insert_ptr]; buffer = &tx_queue->buffer[insert_ptr];
efx_tsoh_free(tx_queue, buffer); EFX_BUG_ON_PARANOID(buffer->flags);
EFX_BUG_ON_PARANOID(buffer->tsoh);
EFX_BUG_ON_PARANOID(buffer->skb);
EFX_BUG_ON_PARANOID(buffer->len); EFX_BUG_ON_PARANOID(buffer->len);
EFX_BUG_ON_PARANOID(!buffer->continuation);
EFX_BUG_ON_PARANOID(buffer->unmap_len); EFX_BUG_ON_PARANOID(buffer->unmap_len);
dma_len = efx_max_tx_len(efx, dma_addr); dma_len = efx_max_tx_len(efx, dma_addr);
...@@ -247,13 +221,14 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) ...@@ -247,13 +221,14 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Fill out per descriptor fields */ /* Fill out per descriptor fields */
buffer->len = dma_len; buffer->len = dma_len;
buffer->dma_addr = dma_addr; buffer->dma_addr = dma_addr;
buffer->flags = EFX_TX_BUF_CONT;
len -= dma_len; len -= dma_len;
dma_addr += dma_len; dma_addr += dma_len;
++tx_queue->insert_count; ++tx_queue->insert_count;
} while (len); } while (len);
/* Transfer ownership of the unmapping to the final buffer */ /* Transfer ownership of the unmapping to the final buffer */
buffer->unmap_single = unmap_single; buffer->flags = EFX_TX_BUF_CONT | dma_flags;
buffer->unmap_len = unmap_len; buffer->unmap_len = unmap_len;
unmap_len = 0; unmap_len = 0;
...@@ -264,20 +239,22 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) ...@@ -264,20 +239,22 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
len = skb_frag_size(fragment); len = skb_frag_size(fragment);
i++; i++;
/* Map for DMA */ /* Map for DMA */
unmap_single = false; dma_flags = 0;
dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
} }
/* Transfer ownership of the skb to the final buffer */ /* Transfer ownership of the skb to the final buffer */
buffer->skb = skb; buffer->skb = skb;
buffer->continuation = false; buffer->flags = EFX_TX_BUF_SKB | dma_flags;
netdev_tx_sent_queue(tx_queue->core_txq, skb->len); netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
/* Pass off to hardware */ /* Pass off to hardware */
efx_nic_push_buffers(tx_queue); efx_nic_push_buffers(tx_queue);
efx_tx_maybe_stop_queue(tx_queue);
return NETDEV_TX_OK; return NETDEV_TX_OK;
dma_err: dma_err:
...@@ -289,7 +266,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) ...@@ -289,7 +266,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
/* Mark the packet as transmitted, and free the SKB ourselves */ /* Mark the packet as transmitted, and free the SKB ourselves */
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
unwind:
/* Work backwards until we hit the original insert pointer value */ /* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != tx_queue->write_count) { while (tx_queue->insert_count != tx_queue->write_count) {
unsigned int pkts_compl = 0, bytes_compl = 0; unsigned int pkts_compl = 0, bytes_compl = 0;
...@@ -297,12 +273,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) ...@@ -297,12 +273,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[insert_ptr]; buffer = &tx_queue->buffer[insert_ptr];
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
buffer->len = 0;
} }
/* Free the fragment we were mid-way through pushing */ /* Free the fragment we were mid-way through pushing */
if (unmap_len) { if (unmap_len) {
if (unmap_single) if (dma_flags & EFX_TX_BUF_MAP_SINGLE)
dma_unmap_single(dma_dev, unmap_addr, unmap_len, dma_unmap_single(dma_dev, unmap_addr, unmap_len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
else else
...@@ -310,7 +285,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) ...@@ -310,7 +285,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
DMA_TO_DEVICE); DMA_TO_DEVICE);
} }
return rc; return NETDEV_TX_OK;
} }
/* Remove packets from the TX queue /* Remove packets from the TX queue
...@@ -340,8 +315,6 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, ...@@ -340,8 +315,6 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
} }
efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
buffer->continuation = true;
buffer->len = 0;
++tx_queue->read_count; ++tx_queue->read_count;
read_ptr = tx_queue->read_count & tx_queue->ptr_mask; read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
...@@ -450,6 +423,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) ...@@ -450,6 +423,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{ {
unsigned fill_level; unsigned fill_level;
struct efx_nic *efx = tx_queue->efx; struct efx_nic *efx = tx_queue->efx;
struct efx_tx_queue *txq2;
unsigned int pkts_compl = 0, bytes_compl = 0; unsigned int pkts_compl = 0, bytes_compl = 0;
EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask); EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
...@@ -457,15 +431,18 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) ...@@ -457,15 +431,18 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl); netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
/* See if we need to restart the netif queue. This barrier /* See if we need to restart the netif queue. This memory
* separates the update of read_count from the test of the * barrier ensures that we write read_count (inside
* queue state. */ * efx_dequeue_buffers()) before reading the queue status.
*/
smp_mb(); smp_mb();
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
likely(efx->port_enabled) && likely(efx->port_enabled) &&
likely(netif_device_present(efx->net_dev))) { likely(netif_device_present(efx->net_dev))) {
fill_level = tx_queue->insert_count - tx_queue->read_count; txq2 = efx_tx_queue_partner(tx_queue);
if (fill_level < EFX_TXQ_THRESHOLD(efx)) fill_level = max(tx_queue->insert_count - tx_queue->read_count,
txq2->insert_count - txq2->read_count);
if (fill_level <= efx->txq_wake_thresh)
netif_tx_wake_queue(tx_queue->core_txq); netif_tx_wake_queue(tx_queue->core_txq);
} }
...@@ -480,11 +457,26 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) ...@@ -480,11 +457,26 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
} }
} }
/* Size of page-based TSO header buffers. Larger blocks must be
* allocated from the heap.
*/
#define TSOH_STD_SIZE 128
#define TSOH_PER_PAGE (PAGE_SIZE / TSOH_STD_SIZE)
/* At most half the descriptors in the queue at any time will refer to
* a TSO header buffer, since they must always be followed by a
* payload descriptor referring to an skb.
*/
static unsigned int efx_tsoh_page_count(struct efx_tx_queue *tx_queue)
{
return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2 * TSOH_PER_PAGE);
}
int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
{ {
struct efx_nic *efx = tx_queue->efx; struct efx_nic *efx = tx_queue->efx;
unsigned int entries; unsigned int entries;
int i, rc; int rc;
/* Create the smallest power-of-two aligned ring */ /* Create the smallest power-of-two aligned ring */
entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
...@@ -500,17 +492,28 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) ...@@ -500,17 +492,28 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
GFP_KERNEL); GFP_KERNEL);
if (!tx_queue->buffer) if (!tx_queue->buffer)
return -ENOMEM; return -ENOMEM;
for (i = 0; i <= tx_queue->ptr_mask; ++i)
tx_queue->buffer[i].continuation = true; if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) {
tx_queue->tsoh_page =
kcalloc(efx_tsoh_page_count(tx_queue),
sizeof(tx_queue->tsoh_page[0]), GFP_KERNEL);
if (!tx_queue->tsoh_page) {
rc = -ENOMEM;
goto fail1;
}
}
/* Allocate hardware ring */ /* Allocate hardware ring */
rc = efx_nic_probe_tx(tx_queue); rc = efx_nic_probe_tx(tx_queue);
if (rc) if (rc)
goto fail; goto fail2;
return 0; return 0;
fail: fail2:
kfree(tx_queue->tsoh_page);
tx_queue->tsoh_page = NULL;
fail1:
kfree(tx_queue->buffer); kfree(tx_queue->buffer);
tx_queue->buffer = NULL; tx_queue->buffer = NULL;
return rc; return rc;
...@@ -546,8 +549,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) ...@@ -546,8 +549,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
unsigned int pkts_compl = 0, bytes_compl = 0; unsigned int pkts_compl = 0, bytes_compl = 0;
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
buffer->continuation = true;
buffer->len = 0;
++tx_queue->read_count; ++tx_queue->read_count;
} }
...@@ -568,13 +569,12 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) ...@@ -568,13 +569,12 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
efx_nic_fini_tx(tx_queue); efx_nic_fini_tx(tx_queue);
efx_release_tx_buffers(tx_queue); efx_release_tx_buffers(tx_queue);
/* Free up TSO header cache */
efx_fini_tso(tx_queue);
} }
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
{ {
int i;
if (!tx_queue->buffer) if (!tx_queue->buffer)
return; return;
...@@ -582,6 +582,14 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) ...@@ -582,6 +582,14 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
"destroying TX queue %d\n", tx_queue->queue); "destroying TX queue %d\n", tx_queue->queue);
efx_nic_remove_tx(tx_queue); efx_nic_remove_tx(tx_queue);
if (tx_queue->tsoh_page) {
for (i = 0; i < efx_tsoh_page_count(tx_queue); i++)
efx_nic_free_buffer(tx_queue->efx,
&tx_queue->tsoh_page[i]);
kfree(tx_queue->tsoh_page);
tx_queue->tsoh_page = NULL;
}
kfree(tx_queue->buffer); kfree(tx_queue->buffer);
tx_queue->buffer = NULL; tx_queue->buffer = NULL;
} }
...@@ -604,22 +612,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) ...@@ -604,22 +612,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
#define TSOH_OFFSET NET_IP_ALIGN #define TSOH_OFFSET NET_IP_ALIGN
#endif #endif
#define TSOH_BUFFER(tsoh) ((u8 *)(tsoh + 1) + TSOH_OFFSET)
/* Total size of struct efx_tso_header, buffer and padding */
#define TSOH_SIZE(hdr_len) \
(sizeof(struct efx_tso_header) + TSOH_OFFSET + hdr_len)
/* Size of blocks on free list. Larger blocks must be allocated from
* the heap.
*/
#define TSOH_STD_SIZE 128
#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2)) #define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
#define ETH_HDR_LEN(skb) (skb_network_header(skb) - (skb)->data)
#define SKB_TCP_OFF(skb) PTR_DIFF(tcp_hdr(skb), (skb)->data)
#define SKB_IPV4_OFF(skb) PTR_DIFF(ip_hdr(skb), (skb)->data)
#define SKB_IPV6_OFF(skb) PTR_DIFF(ipv6_hdr(skb), (skb)->data)
/** /**
* struct tso_state - TSO state for an SKB * struct tso_state - TSO state for an SKB
...@@ -631,10 +624,12 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) ...@@ -631,10 +624,12 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
* @in_len: Remaining length in current SKB fragment * @in_len: Remaining length in current SKB fragment
* @unmap_len: Length of SKB fragment * @unmap_len: Length of SKB fragment
* @unmap_addr: DMA address of SKB fragment * @unmap_addr: DMA address of SKB fragment
* @unmap_single: DMA single vs page mapping flag * @dma_flags: TX buffer flags for DMA mapping - %EFX_TX_BUF_MAP_SINGLE or 0
* @protocol: Network protocol (after any VLAN header) * @protocol: Network protocol (after any VLAN header)
* @ip_off: Offset of IP header
* @tcp_off: Offset of TCP header
* @header_len: Number of bytes of header * @header_len: Number of bytes of header
* @full_packet_size: Number of bytes to put in each outgoing segment * @ip_base_len: IPv4 tot_len or IPv6 payload_len, before TCP payload
* *
* The state used during segmentation. It is put into this data structure * The state used during segmentation. It is put into this data structure
* just to make it easy to pass into inline functions. * just to make it easy to pass into inline functions.
...@@ -651,11 +646,13 @@ struct tso_state { ...@@ -651,11 +646,13 @@ struct tso_state {
unsigned in_len; unsigned in_len;
unsigned unmap_len; unsigned unmap_len;
dma_addr_t unmap_addr; dma_addr_t unmap_addr;
bool unmap_single; unsigned short dma_flags;
__be16 protocol; __be16 protocol;
unsigned int ip_off;
unsigned int tcp_off;
unsigned header_len; unsigned header_len;
int full_packet_size; unsigned int ip_base_len;
}; };
...@@ -687,91 +684,43 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb) ...@@ -687,91 +684,43 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb)
return protocol; return protocol;
} }
static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
/* struct efx_tx_buffer *buffer, unsigned int len)
* Allocate a page worth of efx_tso_header structures, and string them
* into the tx_queue->tso_headers_free linked list. Return 0 or -ENOMEM.
*/
static int efx_tsoh_block_alloc(struct efx_tx_queue *tx_queue)
{ {
struct device *dma_dev = &tx_queue->efx->pci_dev->dev; u8 *result;
struct efx_tso_header *tsoh;
dma_addr_t dma_addr;
u8 *base_kva, *kva;
base_kva = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, GFP_ATOMIC);
if (base_kva == NULL) {
netif_err(tx_queue->efx, tx_err, tx_queue->efx->net_dev,
"Unable to allocate page for TSO headers\n");
return -ENOMEM;
}
/* dma_alloc_coherent() allocates pages. */
EFX_BUG_ON_PARANOID(dma_addr & (PAGE_SIZE - 1u));
for (kva = base_kva; kva < base_kva + PAGE_SIZE; kva += TSOH_STD_SIZE) {
tsoh = (struct efx_tso_header *)kva;
tsoh->dma_addr = dma_addr + (TSOH_BUFFER(tsoh) - base_kva);
tsoh->next = tx_queue->tso_headers_free;
tx_queue->tso_headers_free = tsoh;
}
return 0;
}
EFX_BUG_ON_PARANOID(buffer->len);
EFX_BUG_ON_PARANOID(buffer->flags);
EFX_BUG_ON_PARANOID(buffer->unmap_len);
/* Free up a TSO header, and all others in the same page. */ if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, unsigned index =
struct efx_tso_header *tsoh, (tx_queue->insert_count & tx_queue->ptr_mask) / 2;
struct device *dma_dev) struct efx_buffer *page_buf =
{ &tx_queue->tsoh_page[index / TSOH_PER_PAGE];
struct efx_tso_header **p; unsigned offset =
unsigned long base_kva; TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
dma_addr_t base_dma;
if (unlikely(!page_buf->addr) &&
base_kva = (unsigned long)tsoh & PAGE_MASK; efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE))
base_dma = tsoh->dma_addr & PAGE_MASK; return NULL;
p = &tx_queue->tso_headers_free; result = (u8 *)page_buf->addr + offset;
while (*p != NULL) { buffer->dma_addr = page_buf->dma_addr + offset;
if (((unsigned long)*p & PAGE_MASK) == base_kva) buffer->flags = EFX_TX_BUF_CONT;
*p = (*p)->next; } else {
else tx_queue->tso_long_headers++;
p = &(*p)->next;
}
dma_free_coherent(dma_dev, PAGE_SIZE, (void *)base_kva, base_dma);
}
static struct efx_tso_header * buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len) if (unlikely(!buffer->heap_buf))
{ return NULL;
struct efx_tso_header *tsoh; result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
tsoh = kmalloc(TSOH_SIZE(header_len), GFP_ATOMIC | GFP_DMA);
if (unlikely(!tsoh))
return NULL;
tsoh->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
TSOH_BUFFER(tsoh), header_len,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
tsoh->dma_addr))) {
kfree(tsoh);
return NULL;
} }
tsoh->unmap_len = header_len; buffer->len = len;
return tsoh;
}
static void return result;
efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
{
dma_unmap_single(&tx_queue->efx->pci_dev->dev,
tsoh->dma_addr, tsoh->unmap_len,
DMA_TO_DEVICE);
kfree(tsoh);
} }
/** /**
...@@ -781,47 +730,19 @@ efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh) ...@@ -781,47 +730,19 @@ efx_tsoh_heap_free(struct efx_tx_queue *tx_queue, struct efx_tso_header *tsoh)
* @len: Length of fragment * @len: Length of fragment
* @final_buffer: The final buffer inserted into the queue * @final_buffer: The final buffer inserted into the queue
* *
* Push descriptors onto the TX queue. Return 0 on success or 1 if * Push descriptors onto the TX queue.
* @tx_queue full.
*/ */
static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, static void efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned len, dma_addr_t dma_addr, unsigned len,
struct efx_tx_buffer **final_buffer) struct efx_tx_buffer **final_buffer)
{ {
struct efx_tx_buffer *buffer; struct efx_tx_buffer *buffer;
struct efx_nic *efx = tx_queue->efx; struct efx_nic *efx = tx_queue->efx;
unsigned dma_len, fill_level, insert_ptr; unsigned dma_len, insert_ptr;
int q_space;
EFX_BUG_ON_PARANOID(len <= 0); EFX_BUG_ON_PARANOID(len <= 0);
fill_level = tx_queue->insert_count - tx_queue->old_read_count;
/* -1 as there is no way to represent all descriptors used */
q_space = efx->txq_entries - 1 - fill_level;
while (1) { while (1) {
if (unlikely(q_space-- <= 0)) {
/* It might be that completions have happened
* since the xmit path last checked. Update
* the xmit path's copy of read_count.
*/
netif_tx_stop_queue(tx_queue->core_txq);
/* This memory barrier protects the change of
* queue state from the access of read_count. */
smp_mb();
tx_queue->old_read_count =
ACCESS_ONCE(tx_queue->read_count);
fill_level = (tx_queue->insert_count
- tx_queue->old_read_count);
q_space = efx->txq_entries - 1 - fill_level;
if (unlikely(q_space-- <= 0)) {
*final_buffer = NULL;
return 1;
}
smp_mb();
netif_tx_start_queue(tx_queue->core_txq);
}
insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[insert_ptr]; buffer = &tx_queue->buffer[insert_ptr];
++tx_queue->insert_count; ++tx_queue->insert_count;
...@@ -830,12 +751,9 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, ...@@ -830,12 +751,9 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
tx_queue->read_count >= tx_queue->read_count >=
efx->txq_entries); efx->txq_entries);
efx_tsoh_free(tx_queue, buffer);
EFX_BUG_ON_PARANOID(buffer->len); EFX_BUG_ON_PARANOID(buffer->len);
EFX_BUG_ON_PARANOID(buffer->unmap_len); EFX_BUG_ON_PARANOID(buffer->unmap_len);
EFX_BUG_ON_PARANOID(buffer->skb); EFX_BUG_ON_PARANOID(buffer->flags);
EFX_BUG_ON_PARANOID(!buffer->continuation);
EFX_BUG_ON_PARANOID(buffer->tsoh);
buffer->dma_addr = dma_addr; buffer->dma_addr = dma_addr;
...@@ -845,7 +763,8 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, ...@@ -845,7 +763,8 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
if (dma_len >= len) if (dma_len >= len)
break; break;
buffer->len = dma_len; /* Don't set the other members */ buffer->len = dma_len;
buffer->flags = EFX_TX_BUF_CONT;
dma_addr += dma_len; dma_addr += dma_len;
len -= dma_len; len -= dma_len;
} }
...@@ -853,7 +772,6 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, ...@@ -853,7 +772,6 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
EFX_BUG_ON_PARANOID(!len); EFX_BUG_ON_PARANOID(!len);
buffer->len = len; buffer->len = len;
*final_buffer = buffer; *final_buffer = buffer;
return 0;
} }
...@@ -864,54 +782,42 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue, ...@@ -864,54 +782,42 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
* a single fragment, and we know it doesn't cross a page boundary. It * a single fragment, and we know it doesn't cross a page boundary. It
* also allows us to not worry about end-of-packet etc. * also allows us to not worry about end-of-packet etc.
*/ */
static void efx_tso_put_header(struct efx_tx_queue *tx_queue, static int efx_tso_put_header(struct efx_tx_queue *tx_queue,
struct efx_tso_header *tsoh, unsigned len) struct efx_tx_buffer *buffer, u8 *header)
{ {
struct efx_tx_buffer *buffer; if (unlikely(buffer->flags & EFX_TX_BUF_HEAP)) {
buffer->dma_addr = dma_map_single(&tx_queue->efx->pci_dev->dev,
buffer = &tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask]; header, buffer->len,
efx_tsoh_free(tx_queue, buffer); DMA_TO_DEVICE);
EFX_BUG_ON_PARANOID(buffer->len); if (unlikely(dma_mapping_error(&tx_queue->efx->pci_dev->dev,
EFX_BUG_ON_PARANOID(buffer->unmap_len); buffer->dma_addr))) {
EFX_BUG_ON_PARANOID(buffer->skb); kfree(buffer->heap_buf);
EFX_BUG_ON_PARANOID(!buffer->continuation); buffer->len = 0;
EFX_BUG_ON_PARANOID(buffer->tsoh); buffer->flags = 0;
buffer->len = len; return -ENOMEM;
buffer->dma_addr = tsoh->dma_addr; }
buffer->tsoh = tsoh; buffer->unmap_len = buffer->len;
buffer->flags |= EFX_TX_BUF_MAP_SINGLE;
}
++tx_queue->insert_count; ++tx_queue->insert_count;
return 0;
} }
/* Remove descriptors put into a tx_queue. */ /* Remove buffers put into a tx_queue. None of the buffers must have
* an skb attached.
*/
static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
{ {
struct efx_tx_buffer *buffer; struct efx_tx_buffer *buffer;
dma_addr_t unmap_addr;
/* Work backwards until we hit the original insert pointer value */ /* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != tx_queue->write_count) { while (tx_queue->insert_count != tx_queue->write_count) {
--tx_queue->insert_count; --tx_queue->insert_count;
buffer = &tx_queue->buffer[tx_queue->insert_count & buffer = &tx_queue->buffer[tx_queue->insert_count &
tx_queue->ptr_mask]; tx_queue->ptr_mask];
efx_tsoh_free(tx_queue, buffer); efx_dequeue_buffer(tx_queue, buffer, NULL, NULL);
EFX_BUG_ON_PARANOID(buffer->skb);
if (buffer->unmap_len) {
unmap_addr = (buffer->dma_addr + buffer->len -
buffer->unmap_len);
if (buffer->unmap_single)
dma_unmap_single(&tx_queue->efx->pci_dev->dev,
unmap_addr, buffer->unmap_len,
DMA_TO_DEVICE);
else
dma_unmap_page(&tx_queue->efx->pci_dev->dev,
unmap_addr, buffer->unmap_len,
DMA_TO_DEVICE);
buffer->unmap_len = 0;
}
buffer->len = 0;
buffer->continuation = true;
} }
} }
...@@ -919,17 +825,16 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue) ...@@ -919,17 +825,16 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
/* Parse the SKB header and initialise state. */ /* Parse the SKB header and initialise state. */
static void tso_start(struct tso_state *st, const struct sk_buff *skb) static void tso_start(struct tso_state *st, const struct sk_buff *skb)
{ {
/* All ethernet/IP/TCP headers combined size is TCP header size st->ip_off = skb_network_header(skb) - skb->data;
* plus offset of TCP header relative to start of packet. st->tcp_off = skb_transport_header(skb) - skb->data;
*/ st->header_len = st->tcp_off + (tcp_hdr(skb)->doff << 2u);
st->header_len = ((tcp_hdr(skb)->doff << 2u) if (st->protocol == htons(ETH_P_IP)) {
+ PTR_DIFF(tcp_hdr(skb), skb->data)); st->ip_base_len = st->header_len - st->ip_off;
st->full_packet_size = st->header_len + skb_shinfo(skb)->gso_size;
if (st->protocol == htons(ETH_P_IP))
st->ipv4_id = ntohs(ip_hdr(skb)->id); st->ipv4_id = ntohs(ip_hdr(skb)->id);
else } else {
st->ip_base_len = st->header_len - st->tcp_off;
st->ipv4_id = 0; st->ipv4_id = 0;
}
st->seqnum = ntohl(tcp_hdr(skb)->seq); st->seqnum = ntohl(tcp_hdr(skb)->seq);
EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg); EFX_BUG_ON_PARANOID(tcp_hdr(skb)->urg);
...@@ -938,7 +843,7 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb) ...@@ -938,7 +843,7 @@ static void tso_start(struct tso_state *st, const struct sk_buff *skb)
st->out_len = skb->len - st->header_len; st->out_len = skb->len - st->header_len;
st->unmap_len = 0; st->unmap_len = 0;
st->unmap_single = false; st->dma_flags = 0;
} }
static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
...@@ -947,7 +852,7 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx, ...@@ -947,7 +852,7 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0, st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE); skb_frag_size(frag), DMA_TO_DEVICE);
if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
st->unmap_single = false; st->dma_flags = 0;
st->unmap_len = skb_frag_size(frag); st->unmap_len = skb_frag_size(frag);
st->in_len = skb_frag_size(frag); st->in_len = skb_frag_size(frag);
st->dma_addr = st->unmap_addr; st->dma_addr = st->unmap_addr;
...@@ -965,7 +870,7 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, ...@@ -965,7 +870,7 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl, st->unmap_addr = dma_map_single(&efx->pci_dev->dev, skb->data + hl,
len, DMA_TO_DEVICE); len, DMA_TO_DEVICE);
if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
st->unmap_single = true; st->dma_flags = EFX_TX_BUF_MAP_SINGLE;
st->unmap_len = len; st->unmap_len = len;
st->in_len = len; st->in_len = len;
st->dma_addr = st->unmap_addr; st->dma_addr = st->unmap_addr;
...@@ -982,20 +887,19 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx, ...@@ -982,20 +887,19 @@ static int tso_get_head_fragment(struct tso_state *st, struct efx_nic *efx,
* @st: TSO state * @st: TSO state
* *
* Form descriptors for the current fragment, until we reach the end * Form descriptors for the current fragment, until we reach the end
* of fragment or end-of-packet. Return 0 on success, 1 if not enough * of fragment or end-of-packet.
* space in @tx_queue.
*/ */
static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, static void tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
const struct sk_buff *skb, const struct sk_buff *skb,
struct tso_state *st) struct tso_state *st)
{ {
struct efx_tx_buffer *buffer; struct efx_tx_buffer *buffer;
int n, end_of_packet, rc; int n;
if (st->in_len == 0) if (st->in_len == 0)
return 0; return;
if (st->packet_space == 0) if (st->packet_space == 0)
return 0; return;
EFX_BUG_ON_PARANOID(st->in_len <= 0); EFX_BUG_ON_PARANOID(st->in_len <= 0);
EFX_BUG_ON_PARANOID(st->packet_space <= 0); EFX_BUG_ON_PARANOID(st->packet_space <= 0);
...@@ -1006,25 +910,24 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, ...@@ -1006,25 +910,24 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
st->out_len -= n; st->out_len -= n;
st->in_len -= n; st->in_len -= n;
rc = efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer); efx_tx_queue_insert(tx_queue, st->dma_addr, n, &buffer);
if (likely(rc == 0)) {
if (st->out_len == 0)
/* Transfer ownership of the skb */
buffer->skb = skb;
end_of_packet = st->out_len == 0 || st->packet_space == 0; if (st->out_len == 0) {
buffer->continuation = !end_of_packet; /* Transfer ownership of the skb */
buffer->skb = skb;
buffer->flags = EFX_TX_BUF_SKB;
} else if (st->packet_space != 0) {
buffer->flags = EFX_TX_BUF_CONT;
}
if (st->in_len == 0) { if (st->in_len == 0) {
/* Transfer ownership of the DMA mapping */ /* Transfer ownership of the DMA mapping */
buffer->unmap_len = st->unmap_len; buffer->unmap_len = st->unmap_len;
buffer->unmap_single = st->unmap_single; buffer->flags |= st->dma_flags;
st->unmap_len = 0; st->unmap_len = 0;
}
} }
st->dma_addr += n; st->dma_addr += n;
return rc;
} }
...@@ -1035,36 +938,25 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue, ...@@ -1035,36 +938,25 @@ static int tso_fill_packet_with_fragment(struct efx_tx_queue *tx_queue,
* @st: TSO state * @st: TSO state
* *
* Generate a new header and prepare for the new packet. Return 0 on * Generate a new header and prepare for the new packet. Return 0 on
* success, or -1 if failed to alloc header. * success, or -%ENOMEM if failed to alloc header.
*/ */
static int tso_start_new_packet(struct efx_tx_queue *tx_queue, static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
const struct sk_buff *skb, const struct sk_buff *skb,
struct tso_state *st) struct tso_state *st)
{ {
struct efx_tso_header *tsoh; struct efx_tx_buffer *buffer =
&tx_queue->buffer[tx_queue->insert_count & tx_queue->ptr_mask];
struct tcphdr *tsoh_th; struct tcphdr *tsoh_th;
unsigned ip_length; unsigned ip_length;
u8 *header; u8 *header;
int rc;
/* Allocate a DMA-mapped header buffer. */ /* Allocate and insert a DMA-mapped header buffer. */
if (likely(TSOH_SIZE(st->header_len) <= TSOH_STD_SIZE)) { header = efx_tsoh_get_buffer(tx_queue, buffer, st->header_len);
if (tx_queue->tso_headers_free == NULL) { if (!header)
if (efx_tsoh_block_alloc(tx_queue)) return -ENOMEM;
return -1;
}
EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
tsoh = tx_queue->tso_headers_free;
tx_queue->tso_headers_free = tsoh->next;
tsoh->unmap_len = 0;
} else {
tx_queue->tso_long_headers++;
tsoh = efx_tsoh_heap_alloc(tx_queue, st->header_len);
if (unlikely(!tsoh))
return -1;
}
header = TSOH_BUFFER(tsoh); tsoh_th = (struct tcphdr *)(header + st->tcp_off);
tsoh_th = (struct tcphdr *)(header + SKB_TCP_OFF(skb));
/* Copy and update the headers. */ /* Copy and update the headers. */
memcpy(header, skb->data, st->header_len); memcpy(header, skb->data, st->header_len);
...@@ -1073,19 +965,19 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, ...@@ -1073,19 +965,19 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
st->seqnum += skb_shinfo(skb)->gso_size; st->seqnum += skb_shinfo(skb)->gso_size;
if (st->out_len > skb_shinfo(skb)->gso_size) { if (st->out_len > skb_shinfo(skb)->gso_size) {
/* This packet will not finish the TSO burst. */ /* This packet will not finish the TSO burst. */
ip_length = st->full_packet_size - ETH_HDR_LEN(skb); st->packet_space = skb_shinfo(skb)->gso_size;
tsoh_th->fin = 0; tsoh_th->fin = 0;
tsoh_th->psh = 0; tsoh_th->psh = 0;
} else { } else {
/* This packet will be the last in the TSO burst. */ /* This packet will be the last in the TSO burst. */
ip_length = st->header_len - ETH_HDR_LEN(skb) + st->out_len; st->packet_space = st->out_len;
tsoh_th->fin = tcp_hdr(skb)->fin; tsoh_th->fin = tcp_hdr(skb)->fin;
tsoh_th->psh = tcp_hdr(skb)->psh; tsoh_th->psh = tcp_hdr(skb)->psh;
} }
ip_length = st->ip_base_len + st->packet_space;
if (st->protocol == htons(ETH_P_IP)) { if (st->protocol == htons(ETH_P_IP)) {
struct iphdr *tsoh_iph = struct iphdr *tsoh_iph = (struct iphdr *)(header + st->ip_off);
(struct iphdr *)(header + SKB_IPV4_OFF(skb));
tsoh_iph->tot_len = htons(ip_length); tsoh_iph->tot_len = htons(ip_length);
...@@ -1094,16 +986,16 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, ...@@ -1094,16 +986,16 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
st->ipv4_id++; st->ipv4_id++;
} else { } else {
struct ipv6hdr *tsoh_iph = struct ipv6hdr *tsoh_iph =
(struct ipv6hdr *)(header + SKB_IPV6_OFF(skb)); (struct ipv6hdr *)(header + st->ip_off);
tsoh_iph->payload_len = htons(ip_length - sizeof(*tsoh_iph)); tsoh_iph->payload_len = htons(ip_length);
} }
st->packet_space = skb_shinfo(skb)->gso_size; rc = efx_tso_put_header(tx_queue, buffer, header);
++tx_queue->tso_packets; if (unlikely(rc))
return rc;
/* Form a descriptor for this header. */ ++tx_queue->tso_packets;
efx_tso_put_header(tx_queue, tsoh, st->header_len);
return 0; return 0;
} }
...@@ -1118,13 +1010,13 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue, ...@@ -1118,13 +1010,13 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
* *
* Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if * Add socket buffer @skb to @tx_queue, doing TSO or return != 0 if
* @skb was not enqueued. In all cases @skb is consumed. Return * @skb was not enqueued. In all cases @skb is consumed. Return
* %NETDEV_TX_OK or %NETDEV_TX_BUSY. * %NETDEV_TX_OK.
*/ */
static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct efx_nic *efx = tx_queue->efx; struct efx_nic *efx = tx_queue->efx;
int frag_i, rc, rc2 = NETDEV_TX_OK; int frag_i, rc;
struct tso_state state; struct tso_state state;
/* Find the packet protocol and sanity-check it */ /* Find the packet protocol and sanity-check it */
...@@ -1156,11 +1048,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, ...@@ -1156,11 +1048,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
goto mem_err; goto mem_err;
while (1) { while (1) {
rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); tso_fill_packet_with_fragment(tx_queue, skb, &state);
if (unlikely(rc)) {
rc2 = NETDEV_TX_BUSY;
goto unwind;
}
/* Move onto the next fragment? */ /* Move onto the next fragment? */
if (state.in_len == 0) { if (state.in_len == 0) {
...@@ -1184,6 +1072,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, ...@@ -1184,6 +1072,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
/* Pass off to hardware */ /* Pass off to hardware */
efx_nic_push_buffers(tx_queue); efx_nic_push_buffers(tx_queue);
efx_tx_maybe_stop_queue(tx_queue);
tx_queue->tso_bursts++; tx_queue->tso_bursts++;
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -1192,10 +1082,9 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, ...@@ -1192,10 +1082,9 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
"Out of memory for TSO headers, or DMA mapping error\n"); "Out of memory for TSO headers, or DMA mapping error\n");
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
unwind:
/* Free the DMA mapping we were in the process of writing out */ /* Free the DMA mapping we were in the process of writing out */
if (state.unmap_len) { if (state.unmap_len) {
if (state.unmap_single) if (state.dma_flags & EFX_TX_BUF_MAP_SINGLE)
dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr, dma_unmap_single(&efx->pci_dev->dev, state.unmap_addr,
state.unmap_len, DMA_TO_DEVICE); state.unmap_len, DMA_TO_DEVICE);
else else
...@@ -1204,25 +1093,5 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, ...@@ -1204,25 +1093,5 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
} }
efx_enqueue_unwind(tx_queue); efx_enqueue_unwind(tx_queue);
return rc2; return NETDEV_TX_OK;
}
/*
* Free up all TSO datastructures associated with tx_queue. This
* routine should be called only once the tx_queue is both empty and
* will no longer be used.
*/
static void efx_fini_tso(struct efx_tx_queue *tx_queue)
{
unsigned i;
if (tx_queue->buffer) {
for (i = 0; i <= tx_queue->ptr_mask; ++i)
efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
}
while (tx_queue->tso_headers_free != NULL)
efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
&tx_queue->efx->pci_dev->dev);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment