Commit d853f111 authored by David S. Miller's avatar David S. Miller

Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next

Ben Hutchings says:

====================
1. Refactoring and cleanup in preparation for new hardware support.
2. Some bug fixes for firmware completion handling.  (They're not known
to cause real problems, otherwise I'd be submitting these for net and
stable.)
3. Update to the firmware protocol (MCDI) definitions.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b05930f5 f76fe120
sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \ sfc-y += efx.o nic.o farch.o falcon.o siena.o tx.o rx.o \
filter.o \
selftest.o ethtool.o qt202x_phy.o mdio_10g.o \ selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
tenxpress.o txc43128_phy.o falcon_boards.o \ tenxpress.o txc43128_phy.o falcon_boards.o \
mcdi.o mcdi_port.o mcdi_mon.o ptp.o mcdi.o mcdi_port.o mcdi_mon.o ptp.o
......
...@@ -191,8 +191,8 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); ...@@ -191,8 +191,8 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
* *
*************************************************************************/ *************************************************************************/
static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq); static void efx_soft_enable_interrupts(struct efx_nic *efx);
static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq); static void efx_soft_disable_interrupts(struct efx_nic *efx);
static void efx_remove_channel(struct efx_channel *channel); static void efx_remove_channel(struct efx_channel *channel);
static void efx_remove_channels(struct efx_nic *efx); static void efx_remove_channels(struct efx_nic *efx);
static const struct efx_channel_type efx_default_channel_type; static const struct efx_channel_type efx_default_channel_type;
...@@ -248,30 +248,12 @@ static int efx_process_channel(struct efx_channel *channel, int budget) ...@@ -248,30 +248,12 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
efx_channel_get_rx_queue(channel); efx_channel_get_rx_queue(channel);
efx_rx_flush_packet(channel); efx_rx_flush_packet(channel);
if (rx_queue->enabled) efx_fast_push_rx_descriptors(rx_queue);
efx_fast_push_rx_descriptors(rx_queue);
} }
return spent; return spent;
} }
/* Mark channel as finished processing
*
* Note that since we will not receive further interrupts for this
* channel before we finish processing and call the eventq_read_ack()
* method, there is no need to use the interrupt hold-off timers.
*/
static inline void efx_channel_processed(struct efx_channel *channel)
{
/* The interrupt handler for this channel may set work_pending
* as soon as we acknowledge the events we've seen. Make sure
* it's cleared before then. */
channel->work_pending = false;
smp_wmb();
efx_nic_eventq_read_ack(channel);
}
/* NAPI poll handler /* NAPI poll handler
* *
* NAPI guarantees serialisation of polls of the same device, which * NAPI guarantees serialisation of polls of the same device, which
...@@ -316,58 +298,16 @@ static int efx_poll(struct napi_struct *napi, int budget) ...@@ -316,58 +298,16 @@ static int efx_poll(struct napi_struct *napi, int budget)
/* There is no race here; although napi_disable() will /* There is no race here; although napi_disable() will
* only wait for napi_complete(), this isn't a problem * only wait for napi_complete(), this isn't a problem
* since efx_channel_processed() will have no effect if * since efx_nic_eventq_read_ack() will have no effect if
* interrupts have already been disabled. * interrupts have already been disabled.
*/ */
napi_complete(napi); napi_complete(napi);
efx_channel_processed(channel); efx_nic_eventq_read_ack(channel);
} }
return spent; return spent;
} }
/* Process the eventq of the specified channel immediately on this CPU
*
* Disable hardware generated interrupts, wait for any existing
* processing to finish, then directly poll (and ack ) the eventq.
* Finally reenable NAPI and interrupts.
*
* This is for use only during a loopback self-test. It must not
* deliver any packets up the stack as this can result in deadlock.
*/
void efx_process_channel_now(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
BUG_ON(channel->channel >= efx->n_channels);
BUG_ON(!channel->enabled);
BUG_ON(!efx->loopback_selftest);
/* Disable interrupts and wait for ISRs to complete */
efx_nic_disable_interrupts(efx);
if (efx->legacy_irq) {
synchronize_irq(efx->legacy_irq);
efx->legacy_irq_enabled = false;
}
if (channel->irq)
synchronize_irq(channel->irq);
/* Wait for any NAPI processing to complete */
napi_disable(&channel->napi_str);
/* Poll the channel */
efx_process_channel(channel, channel->eventq_mask + 1);
/* Ack the eventq. This may cause an interrupt to be generated
* when they are reenabled */
efx_channel_processed(channel);
napi_enable(&channel->napi_str);
if (efx->legacy_irq)
efx->legacy_irq_enabled = true;
efx_nic_enable_interrupts(efx);
}
/* Create event queue /* Create event queue
* Event queue memory allocations are done only once. If the channel * Event queue memory allocations are done only once. If the channel
* is reset, the memory buffer will be reused; this guards against * is reset, the memory buffer will be reused; this guards against
...@@ -407,11 +347,7 @@ static void efx_start_eventq(struct efx_channel *channel) ...@@ -407,11 +347,7 @@ static void efx_start_eventq(struct efx_channel *channel)
netif_dbg(channel->efx, ifup, channel->efx->net_dev, netif_dbg(channel->efx, ifup, channel->efx->net_dev,
"chan %d start event queue\n", channel->channel); "chan %d start event queue\n", channel->channel);
/* The interrupt handler for this channel may set work_pending /* Make sure the NAPI handler sees the enabled flag set */
* as soon as we enable it. Make sure it's cleared before
* then. Similarly, make sure it sees the enabled flag set.
*/
channel->work_pending = false;
channel->enabled = true; channel->enabled = true;
smp_wmb(); smp_wmb();
...@@ -583,8 +519,8 @@ static void efx_set_channel_names(struct efx_nic *efx) ...@@ -583,8 +519,8 @@ static void efx_set_channel_names(struct efx_nic *efx)
efx_for_each_channel(channel, efx) efx_for_each_channel(channel, efx)
channel->type->get_name(channel, channel->type->get_name(channel,
efx->channel_name[channel->channel], efx->msi_context[channel->channel].name,
sizeof(efx->channel_name[0])); sizeof(efx->msi_context[0].name));
} }
static int efx_probe_channels(struct efx_nic *efx) static int efx_probe_channels(struct efx_nic *efx)
...@@ -704,30 +640,15 @@ static void efx_stop_datapath(struct efx_nic *efx) ...@@ -704,30 +640,15 @@ static void efx_stop_datapath(struct efx_nic *efx)
struct efx_channel *channel; struct efx_channel *channel;
struct efx_tx_queue *tx_queue; struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue; struct efx_rx_queue *rx_queue;
struct pci_dev *dev = efx->pci_dev;
int rc; int rc;
EFX_ASSERT_RESET_SERIALISED(efx); EFX_ASSERT_RESET_SERIALISED(efx);
BUG_ON(efx->port_enabled); BUG_ON(efx->port_enabled);
/* Only perform flush if dma is enabled */ /* Stop RX refill */
if (dev->is_busmaster && efx->state != STATE_RECOVERY) { efx_for_each_channel(channel, efx) {
rc = efx_nic_flush_queues(efx); efx_for_each_channel_rx_queue(rx_queue, channel)
rx_queue->refill_enabled = false;
if (rc && EFX_WORKAROUND_7803(efx)) {
/* Schedule a reset to recover from the flush failure. The
* descriptor caches reference memory we're about to free,
* but falcon_reconfigure_mac_wrapper() won't reconnect
* the MACs because of the pending reset. */
netif_err(efx, drv, efx->net_dev,
"Resetting to recover from flush failure\n");
efx_schedule_reset(efx, RESET_TYPE_ALL);
} else if (rc) {
netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
} else {
netif_dbg(efx, drv, efx->net_dev,
"successfully flushed all queues\n");
}
} }
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
...@@ -741,7 +662,26 @@ static void efx_stop_datapath(struct efx_nic *efx) ...@@ -741,7 +662,26 @@ static void efx_stop_datapath(struct efx_nic *efx)
efx_stop_eventq(channel); efx_stop_eventq(channel);
efx_start_eventq(channel); efx_start_eventq(channel);
} }
}
rc = efx->type->fini_dmaq(efx);
if (rc && EFX_WORKAROUND_7803(efx)) {
/* Schedule a reset to recover from the flush failure. The
* descriptor caches reference memory we're about to free,
* but falcon_reconfigure_mac_wrapper() won't reconnect
* the MACs because of the pending reset.
*/
netif_err(efx, drv, efx->net_dev,
"Resetting to recover from flush failure\n");
efx_schedule_reset(efx, RESET_TYPE_ALL);
} else if (rc) {
netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
} else {
netif_dbg(efx, drv, efx->net_dev,
"successfully flushed all queues\n");
}
efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel) efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fini_rx_queue(rx_queue); efx_fini_rx_queue(rx_queue);
efx_for_each_possible_channel_tx_queue(tx_queue, channel) efx_for_each_possible_channel_tx_queue(tx_queue, channel)
...@@ -809,7 +749,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) ...@@ -809,7 +749,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
efx_device_detach_sync(efx); efx_device_detach_sync(efx);
efx_stop_all(efx); efx_stop_all(efx);
efx_stop_interrupts(efx, true); efx_soft_disable_interrupts(efx);
/* Clone channels (where possible) */ /* Clone channels (where possible) */
memset(other_channel, 0, sizeof(other_channel)); memset(other_channel, 0, sizeof(other_channel));
...@@ -859,7 +799,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) ...@@ -859,7 +799,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
} }
} }
efx_start_interrupts(efx, true); efx_soft_enable_interrupts(efx);
efx_start_all(efx); efx_start_all(efx);
netif_device_attach(efx->net_dev); netif_device_attach(efx->net_dev);
return rc; return rc;
...@@ -1392,23 +1332,17 @@ static int efx_probe_interrupts(struct efx_nic *efx) ...@@ -1392,23 +1332,17 @@ static int efx_probe_interrupts(struct efx_nic *efx)
return 0; return 0;
} }
/* Enable interrupts, then probe and start the event queues */ static void efx_soft_enable_interrupts(struct efx_nic *efx)
static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
{ {
struct efx_channel *channel; struct efx_channel *channel;
BUG_ON(efx->state == STATE_DISABLED); BUG_ON(efx->state == STATE_DISABLED);
if (efx->eeh_disabled_legacy_irq) { efx->irq_soft_enabled = true;
enable_irq(efx->legacy_irq); smp_wmb();
efx->eeh_disabled_legacy_irq = false;
}
if (efx->legacy_irq)
efx->legacy_irq_enabled = true;
efx_nic_enable_interrupts(efx);
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
if (!channel->type->keep_eventq || !may_keep_eventq) if (!channel->type->keep_eventq)
efx_init_eventq(channel); efx_init_eventq(channel);
efx_start_eventq(channel); efx_start_eventq(channel);
} }
...@@ -1416,7 +1350,7 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq) ...@@ -1416,7 +1350,7 @@ static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
efx_mcdi_mode_event(efx); efx_mcdi_mode_event(efx);
} }
static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq) static void efx_soft_disable_interrupts(struct efx_nic *efx)
{ {
struct efx_channel *channel; struct efx_channel *channel;
...@@ -1425,22 +1359,57 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq) ...@@ -1425,22 +1359,57 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
efx_mcdi_mode_poll(efx); efx_mcdi_mode_poll(efx);
efx_nic_disable_interrupts(efx); efx->irq_soft_enabled = false;
if (efx->legacy_irq) { smp_wmb();
if (efx->legacy_irq)
synchronize_irq(efx->legacy_irq); synchronize_irq(efx->legacy_irq);
efx->legacy_irq_enabled = false;
}
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
if (channel->irq) if (channel->irq)
synchronize_irq(channel->irq); synchronize_irq(channel->irq);
efx_stop_eventq(channel); efx_stop_eventq(channel);
if (!channel->type->keep_eventq || !may_keep_eventq) if (!channel->type->keep_eventq)
efx_fini_eventq(channel); efx_fini_eventq(channel);
} }
} }
static void efx_enable_interrupts(struct efx_nic *efx)
{
struct efx_channel *channel;
BUG_ON(efx->state == STATE_DISABLED);
if (efx->eeh_disabled_legacy_irq) {
enable_irq(efx->legacy_irq);
efx->eeh_disabled_legacy_irq = false;
}
efx->type->irq_enable_master(efx);
efx_for_each_channel(channel, efx) {
if (channel->type->keep_eventq)
efx_init_eventq(channel);
}
efx_soft_enable_interrupts(efx);
}
static void efx_disable_interrupts(struct efx_nic *efx)
{
struct efx_channel *channel;
efx_soft_disable_interrupts(efx);
efx_for_each_channel(channel, efx) {
if (channel->type->keep_eventq)
efx_fini_eventq(channel);
}
efx->type->irq_disable_non_ev(efx);
}
static void efx_remove_interrupts(struct efx_nic *efx) static void efx_remove_interrupts(struct efx_nic *efx)
{ {
struct efx_channel *channel; struct efx_channel *channel;
...@@ -2185,22 +2154,11 @@ static int efx_register_netdev(struct efx_nic *efx) ...@@ -2185,22 +2154,11 @@ static int efx_register_netdev(struct efx_nic *efx)
static void efx_unregister_netdev(struct efx_nic *efx) static void efx_unregister_netdev(struct efx_nic *efx)
{ {
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
if (!efx->net_dev) if (!efx->net_dev)
return; return;
BUG_ON(netdev_priv(efx->net_dev) != efx); BUG_ON(netdev_priv(efx->net_dev) != efx);
/* Free up any skbs still remaining. This has to happen before
* we try to unregister the netdev as running their destructors
* may be needed to get the device ref. count to 0. */
efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_release_tx_buffers(tx_queue);
}
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
...@@ -2223,7 +2181,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method) ...@@ -2223,7 +2181,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
EFX_ASSERT_RESET_SERIALISED(efx); EFX_ASSERT_RESET_SERIALISED(efx);
efx_stop_all(efx); efx_stop_all(efx);
efx_stop_interrupts(efx, false); efx_disable_interrupts(efx);
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
...@@ -2262,7 +2220,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) ...@@ -2262,7 +2220,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
efx->type->reconfigure_mac(efx); efx->type->reconfigure_mac(efx);
efx_start_interrupts(efx, false); efx_enable_interrupts(efx);
efx_restore_filters(efx); efx_restore_filters(efx);
efx_sriov_reset(efx); efx_sriov_reset(efx);
...@@ -2527,6 +2485,8 @@ static int efx_init_struct(struct efx_nic *efx, ...@@ -2527,6 +2485,8 @@ static int efx_init_struct(struct efx_nic *efx,
efx->channel[i] = efx_alloc_channel(efx, i, NULL); efx->channel[i] = efx_alloc_channel(efx, i, NULL);
if (!efx->channel[i]) if (!efx->channel[i])
goto fail; goto fail;
efx->msi_context[i].efx = efx;
efx->msi_context[i].index = i;
} }
EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
...@@ -2579,7 +2539,7 @@ static void efx_pci_remove_main(struct efx_nic *efx) ...@@ -2579,7 +2539,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
BUG_ON(efx->state == STATE_READY); BUG_ON(efx->state == STATE_READY);
cancel_work_sync(&efx->reset_work); cancel_work_sync(&efx->reset_work);
efx_stop_interrupts(efx, false); efx_disable_interrupts(efx);
efx_nic_fini_interrupt(efx); efx_nic_fini_interrupt(efx);
efx_fini_port(efx); efx_fini_port(efx);
efx->type->fini(efx); efx->type->fini(efx);
...@@ -2601,7 +2561,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev) ...@@ -2601,7 +2561,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
/* Mark the NIC as fini, then stop the interface */ /* Mark the NIC as fini, then stop the interface */
rtnl_lock(); rtnl_lock();
dev_close(efx->net_dev); dev_close(efx->net_dev);
efx_stop_interrupts(efx, false); efx_disable_interrupts(efx);
rtnl_unlock(); rtnl_unlock();
efx_sriov_fini(efx); efx_sriov_fini(efx);
...@@ -2703,7 +2663,7 @@ static int efx_pci_probe_main(struct efx_nic *efx) ...@@ -2703,7 +2663,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
rc = efx_nic_init_interrupt(efx); rc = efx_nic_init_interrupt(efx);
if (rc) if (rc)
goto fail5; goto fail5;
efx_start_interrupts(efx, false); efx_enable_interrupts(efx);
return 0; return 0;
...@@ -2824,7 +2784,7 @@ static int efx_pm_freeze(struct device *dev) ...@@ -2824,7 +2784,7 @@ static int efx_pm_freeze(struct device *dev)
efx_device_detach_sync(efx); efx_device_detach_sync(efx);
efx_stop_all(efx); efx_stop_all(efx);
efx_stop_interrupts(efx, false); efx_disable_interrupts(efx);
} }
rtnl_unlock(); rtnl_unlock();
...@@ -2839,7 +2799,7 @@ static int efx_pm_thaw(struct device *dev) ...@@ -2839,7 +2799,7 @@ static int efx_pm_thaw(struct device *dev)
rtnl_lock(); rtnl_lock();
if (efx->state != STATE_DISABLED) { if (efx->state != STATE_DISABLED) {
efx_start_interrupts(efx, false); efx_enable_interrupts(efx);
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
efx->phy_op->reconfigure(efx); efx->phy_op->reconfigure(efx);
...@@ -2942,7 +2902,7 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, ...@@ -2942,7 +2902,7 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
efx_device_detach_sync(efx); efx_device_detach_sync(efx);
efx_stop_all(efx); efx_stop_all(efx);
efx_stop_interrupts(efx, false); efx_disable_interrupts(efx);
status = PCI_ERS_RESULT_NEED_RESET; status = PCI_ERS_RESULT_NEED_RESET;
} else { } else {
......
...@@ -23,7 +23,6 @@ extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); ...@@ -23,7 +23,6 @@ extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue); extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
extern netdev_tx_t extern netdev_tx_t
efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
extern netdev_tx_t extern netdev_tx_t
...@@ -109,7 +108,6 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel) {} ...@@ -109,7 +108,6 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
/* Channels */ /* Channels */
extern int efx_channel_dummy_op_int(struct efx_channel *channel); extern int efx_channel_dummy_op_int(struct efx_channel *channel);
extern void efx_channel_dummy_op_void(struct efx_channel *channel); extern void efx_channel_dummy_op_void(struct efx_channel *channel);
extern void efx_process_channel_now(struct efx_channel *channel);
extern int extern int
efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
...@@ -155,7 +153,6 @@ static inline void efx_schedule_channel(struct efx_channel *channel) ...@@ -155,7 +153,6 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
netif_vdbg(channel->efx, intr, channel->efx->net_dev, netif_vdbg(channel->efx, intr, channel->efx->net_dev,
"channel %d scheduling NAPI poll on CPU%d\n", "channel %d scheduling NAPI poll on CPU%d\n",
channel->channel, raw_smp_processor_id()); channel->channel, raw_smp_processor_id());
channel->work_pending = true;
napi_schedule(&channel->napi_str); napi_schedule(&channel->napi_str);
} }
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
#include "efx.h" #include "efx.h"
#include "spi.h" #include "spi.h"
#include "nic.h" #include "nic.h"
#include "regs.h" #include "farch_regs.h"
#include "io.h" #include "io.h"
#include "phy.h" #include "phy.h"
#include "workarounds.h" #include "workarounds.h"
...@@ -336,7 +336,7 @@ static void falcon_prepare_flush(struct efx_nic *efx) ...@@ -336,7 +336,7 @@ static void falcon_prepare_flush(struct efx_nic *efx)
* *
* NB most hardware supports MSI interrupts * NB most hardware supports MSI interrupts
*/ */
inline void falcon_irq_ack_a1(struct efx_nic *efx) static inline void falcon_irq_ack_a1(struct efx_nic *efx)
{ {
efx_dword_t reg; efx_dword_t reg;
...@@ -346,7 +346,7 @@ inline void falcon_irq_ack_a1(struct efx_nic *efx) ...@@ -346,7 +346,7 @@ inline void falcon_irq_ack_a1(struct efx_nic *efx)
} }
irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
{ {
struct efx_nic *efx = dev_id; struct efx_nic *efx = dev_id;
efx_oword_t *int_ker = efx->irq_status.addr; efx_oword_t *int_ker = efx->irq_status.addr;
...@@ -367,10 +367,13 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) ...@@ -367,10 +367,13 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
"IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
return IRQ_HANDLED;
/* Check to see if we have a serious error condition */ /* Check to see if we have a serious error condition */
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr)) if (unlikely(syserr))
return efx_nic_fatal_interrupt(efx); return efx_farch_fatal_interrupt(efx);
/* Determine interrupting queues, clear interrupt status /* Determine interrupting queues, clear interrupt status
* register and acknowledge the device interrupt. * register and acknowledge the device interrupt.
...@@ -1418,7 +1421,7 @@ static int falcon_probe_port(struct efx_nic *efx) ...@@ -1418,7 +1421,7 @@ static int falcon_probe_port(struct efx_nic *efx)
/* Allocate buffer for stats */ /* Allocate buffer for stats */
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
FALCON_MAC_STATS_SIZE); FALCON_MAC_STATS_SIZE, GFP_KERNEL);
if (rc) if (rc)
return rc; return rc;
netif_dbg(efx, probe, efx->net_dev, netif_dbg(efx, probe, efx->net_dev,
...@@ -1555,7 +1558,7 @@ static int falcon_test_nvram(struct efx_nic *efx) ...@@ -1555,7 +1558,7 @@ static int falcon_test_nvram(struct efx_nic *efx)
return falcon_read_nvram(efx, NULL); return falcon_read_nvram(efx, NULL);
} }
static const struct efx_nic_register_test falcon_b0_register_tests[] = { static const struct efx_farch_register_test falcon_b0_register_tests[] = {
{ FR_AZ_ADR_REGION, { FR_AZ_ADR_REGION,
EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
{ FR_AZ_RX_CFG, { FR_AZ_RX_CFG,
...@@ -1615,8 +1618,8 @@ falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) ...@@ -1615,8 +1618,8 @@ falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
efx_reset_down(efx, reset_method); efx_reset_down(efx, reset_method);
tests->registers = tests->registers =
efx_nic_test_registers(efx, falcon_b0_register_tests, efx_farch_test_registers(efx, falcon_b0_register_tests,
ARRAY_SIZE(falcon_b0_register_tests)) ARRAY_SIZE(falcon_b0_register_tests))
? -1 : 1; ? -1 : 1;
rc = falcon_reset_hw(efx, reset_method); rc = falcon_reset_hw(efx, reset_method);
...@@ -1981,7 +1984,7 @@ static int falcon_probe_nic(struct efx_nic *efx) ...@@ -1981,7 +1984,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
rc = -ENODEV; rc = -ENODEV;
if (efx_nic_fpga_ver(efx) != 0) { if (efx_farch_fpga_ver(efx) != 0) {
netif_err(efx, probe, efx->net_dev, netif_err(efx, probe, efx->net_dev,
"Falcon FPGA not supported\n"); "Falcon FPGA not supported\n");
goto fail1; goto fail1;
...@@ -2035,7 +2038,8 @@ static int falcon_probe_nic(struct efx_nic *efx) ...@@ -2035,7 +2038,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
} }
/* Allocate memory for INT_KER */ /* Allocate memory for INT_KER */
rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
GFP_KERNEL);
if (rc) if (rc)
goto fail4; goto fail4;
BUG_ON(efx->irq_status.dma_addr & 0x0f); BUG_ON(efx->irq_status.dma_addr & 0x0f);
...@@ -2214,7 +2218,7 @@ static int falcon_init_nic(struct efx_nic *efx) ...@@ -2214,7 +2218,7 @@ static int falcon_init_nic(struct efx_nic *efx)
efx_writeo(efx, &temp, FR_BZ_DP_CTRL); efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
} }
efx_nic_init_common(efx); efx_farch_init_common(efx);
return 0; return 0;
} }
...@@ -2339,7 +2343,7 @@ const struct efx_nic_type falcon_a1_nic_type = { ...@@ -2339,7 +2343,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.remove = falcon_remove_nic, .remove = falcon_remove_nic,
.init = falcon_init_nic, .init = falcon_init_nic,
.dimension_resources = falcon_dimension_resources, .dimension_resources = falcon_dimension_resources,
.fini = efx_port_dummy_op_void, .fini = falcon_irq_ack_a1,
.monitor = falcon_monitor, .monitor = falcon_monitor,
.map_reset_reason = falcon_map_reset_reason, .map_reset_reason = falcon_map_reset_reason,
.map_reset_flags = falcon_map_reset_flags, .map_reset_flags = falcon_map_reset_flags,
...@@ -2347,6 +2351,7 @@ const struct efx_nic_type falcon_a1_nic_type = { ...@@ -2347,6 +2351,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.probe_port = falcon_probe_port, .probe_port = falcon_probe_port,
.remove_port = falcon_remove_port, .remove_port = falcon_remove_port,
.handle_global_event = falcon_handle_global_event, .handle_global_event = falcon_handle_global_event,
.fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = falcon_prepare_flush, .prepare_flush = falcon_prepare_flush,
.finish_flush = efx_port_dummy_op_void, .finish_flush = efx_port_dummy_op_void,
.update_stats = falcon_update_nic_stats, .update_stats = falcon_update_nic_stats,
...@@ -2362,6 +2367,28 @@ const struct efx_nic_type falcon_a1_nic_type = { ...@@ -2362,6 +2367,28 @@ const struct efx_nic_type falcon_a1_nic_type = {
.set_wol = falcon_set_wol, .set_wol = falcon_set_wol,
.resume_wol = efx_port_dummy_op_void, .resume_wol = efx_port_dummy_op_void,
.test_nvram = falcon_test_nvram, .test_nvram = falcon_test_nvram,
.irq_enable_master = efx_farch_irq_enable_master,
.irq_test_generate = efx_farch_irq_test_generate,
.irq_disable_non_ev = efx_farch_irq_disable_master,
.irq_handle_msi = efx_farch_msi_interrupt,
.irq_handle_legacy = falcon_legacy_interrupt_a1,
.tx_probe = efx_farch_tx_probe,
.tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
.rx_push_indir_table = efx_farch_rx_push_indir_table,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
.rx_write = efx_farch_rx_write,
.rx_defer_refill = efx_farch_rx_defer_refill,
.ev_probe = efx_farch_ev_probe,
.ev_init = efx_farch_ev_init,
.ev_fini = efx_farch_ev_fini,
.ev_remove = efx_farch_ev_remove,
.ev_process = efx_farch_ev_process,
.ev_read_ack = efx_farch_ev_read_ack,
.ev_test_generate = efx_farch_ev_test_generate,
.revision = EFX_REV_FALCON_A1, .revision = EFX_REV_FALCON_A1,
.mem_map_size = 0x20000, .mem_map_size = 0x20000,
...@@ -2377,6 +2404,7 @@ const struct efx_nic_type falcon_a1_nic_type = { ...@@ -2377,6 +2404,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.phys_addr_channels = 4, .phys_addr_channels = 4,
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
.offload_features = NETIF_F_IP_CSUM, .offload_features = NETIF_F_IP_CSUM,
.mcdi_max_ver = -1,
}; };
const struct efx_nic_type falcon_b0_nic_type = { const struct efx_nic_type falcon_b0_nic_type = {
...@@ -2392,6 +2420,7 @@ const struct efx_nic_type falcon_b0_nic_type = { ...@@ -2392,6 +2420,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.probe_port = falcon_probe_port, .probe_port = falcon_probe_port,
.remove_port = falcon_remove_port, .remove_port = falcon_remove_port,
.handle_global_event = falcon_handle_global_event, .handle_global_event = falcon_handle_global_event,
.fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = falcon_prepare_flush, .prepare_flush = falcon_prepare_flush,
.finish_flush = efx_port_dummy_op_void, .finish_flush = efx_port_dummy_op_void,
.update_stats = falcon_update_nic_stats, .update_stats = falcon_update_nic_stats,
...@@ -2408,6 +2437,28 @@ const struct efx_nic_type falcon_b0_nic_type = { ...@@ -2408,6 +2437,28 @@ const struct efx_nic_type falcon_b0_nic_type = {
.resume_wol = efx_port_dummy_op_void, .resume_wol = efx_port_dummy_op_void,
.test_chip = falcon_b0_test_chip, .test_chip = falcon_b0_test_chip,
.test_nvram = falcon_test_nvram, .test_nvram = falcon_test_nvram,
.irq_enable_master = efx_farch_irq_enable_master,
.irq_test_generate = efx_farch_irq_test_generate,
.irq_disable_non_ev = efx_farch_irq_disable_master,
.irq_handle_msi = efx_farch_msi_interrupt,
.irq_handle_legacy = efx_farch_legacy_interrupt,
.tx_probe = efx_farch_tx_probe,
.tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
.rx_push_indir_table = efx_farch_rx_push_indir_table,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
.rx_write = efx_farch_rx_write,
.rx_defer_refill = efx_farch_rx_defer_refill,
.ev_probe = efx_farch_ev_probe,
.ev_init = efx_farch_ev_init,
.ev_fini = efx_farch_ev_fini,
.ev_remove = efx_farch_ev_remove,
.ev_process = efx_farch_ev_process,
.ev_read_ack = efx_farch_ev_read_ack,
.ev_test_generate = efx_farch_ev_test_generate,
.revision = EFX_REV_FALCON_B0, .revision = EFX_REV_FALCON_B0,
/* Map everything up to and including the RSS indirection /* Map everything up to and including the RSS indirection
...@@ -2431,5 +2482,6 @@ const struct efx_nic_type falcon_b0_nic_type = { ...@@ -2431,5 +2482,6 @@ const struct efx_nic_type falcon_b0_nic_type = {
* channels */ * channels */
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE, .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
.mcdi_max_ver = -1,
}; };
/****************************************************************************
* Driver for Solarflare Solarstorm network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
#include "farch_regs.h"
#include "io.h"
#include "workarounds.h"
/* Falcon-architecture (SFC4000 and SFC9000-family) support */
/**************************************************************************
*
* Configurable values
*
**************************************************************************
*/
/* This is set to 16 for a good reason. In summary, if larger than
* 16, the descriptor cache holds more than a default socket
* buffer's worth of packets (for UDP we can only have at most one
* socket buffer's worth outstanding). This combined with the fact
* that we only get 1 TX event per descriptor cache means the NIC
* goes idle.
*/
#define TX_DC_ENTRIES 16
#define TX_DC_ENTRIES_ORDER 1
#define RX_DC_ENTRIES 64
#define RX_DC_ENTRIES_ORDER 3
/* If EFX_MAX_INT_ERRORS internal errors occur within
* EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
* disable it.
*/
#define EFX_INT_ERROR_EXPIRE 3600
#define EFX_MAX_INT_ERRORS 5
/* Depth of RX flush request fifo */
#define EFX_RX_FLUSH_COUNT 4
/* Driver generated events */
#define _EFX_CHANNEL_MAGIC_TEST 0x000101
#define _EFX_CHANNEL_MAGIC_FILL 0x000102
#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
#define EFX_CHANNEL_MAGIC_TEST(_channel) \
_EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
_EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
efx_rx_queue_index(_rx_queue))
#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
_EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
efx_rx_queue_index(_rx_queue))
#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
_EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
(_tx_queue)->queue)
static void efx_farch_magic_event(struct efx_channel *channel, u32 magic);
/**************************************************************************
*
* Hardware access
*
**************************************************************************/
static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
unsigned int index)
{
efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
value, index);
}
static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
const efx_oword_t *mask)
{
return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
}
int efx_farch_test_registers(struct efx_nic *efx,
const struct efx_farch_register_test *regs,
size_t n_regs)
{
unsigned address = 0, i, j;
efx_oword_t mask, imask, original, reg, buf;
for (i = 0; i < n_regs; ++i) {
address = regs[i].address;
mask = imask = regs[i].mask;
EFX_INVERT_OWORD(imask);
efx_reado(efx, &original, address);
/* bit sweep on and off */
for (j = 0; j < 128; j++) {
if (!EFX_EXTRACT_OWORD32(mask, j, j))
continue;
/* Test this testable bit can be set in isolation */
EFX_AND_OWORD(reg, original, mask);
EFX_SET_OWORD32(reg, j, j, 1);
efx_writeo(efx, &reg, address);
efx_reado(efx, &buf, address);
if (efx_masked_compare_oword(&reg, &buf, &mask))
goto fail;
/* Test this testable bit can be cleared in isolation */
EFX_OR_OWORD(reg, original, mask);
EFX_SET_OWORD32(reg, j, j, 0);
efx_writeo(efx, &reg, address);
efx_reado(efx, &buf, address);
if (efx_masked_compare_oword(&reg, &buf, &mask))
goto fail;
}
efx_writeo(efx, &original, address);
}
return 0;
fail:
netif_err(efx, hw, efx->net_dev,
"wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
" at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
return -EIO;
}
/**************************************************************************
*
* Special buffer handling
* Special buffers are used for event queues and the TX and RX
* descriptor rings.
*
*************************************************************************/
/*
* Initialise a special buffer
*
* This will define a buffer (previously allocated via
* efx_alloc_special_buffer()) in the buffer table, allowing
* it to be used for event queues, descriptor rings etc.
*/
static void
efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
{
efx_qword_t buf_desc;
unsigned int index;
dma_addr_t dma_addr;
int i;
EFX_BUG_ON_PARANOID(!buffer->buf.addr);
/* Write buffer descriptors to NIC */
for (i = 0; i < buffer->entries; i++) {
index = buffer->index + i;
dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE);
netif_dbg(efx, probe, efx->net_dev,
"mapping special buffer %d at %llx\n",
index, (unsigned long long)dma_addr);
EFX_POPULATE_QWORD_3(buf_desc,
FRF_AZ_BUF_ADR_REGION, 0,
FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
FRF_AZ_BUF_OWNER_ID_FBUF, 0);
efx_write_buf_tbl(efx, &buf_desc, index);
}
}
/* Unmaps a buffer and clears the buffer table entries */
static void
efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
{
efx_oword_t buf_tbl_upd;
unsigned int start = buffer->index;
unsigned int end = (buffer->index + buffer->entries - 1);
if (!buffer->entries)
return;
netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
buffer->index, buffer->index + buffer->entries - 1);
EFX_POPULATE_OWORD_4(buf_tbl_upd,
FRF_AZ_BUF_UPD_CMD, 0,
FRF_AZ_BUF_CLR_CMD, 1,
FRF_AZ_BUF_CLR_END_ID, end,
FRF_AZ_BUF_CLR_START_ID, start);
efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
}
/*
* Allocate a new special buffer
*
* This allocates memory for a new buffer, clears it and allocates a
* new buffer ID range. It does not write into the buffer table.
*
* This call will allocate 4KB buffers, since 8KB buffers can't be
* used for event queues and descriptor rings.
*/
static int efx_alloc_special_buffer(struct efx_nic *efx,
struct efx_special_buffer *buffer,
unsigned int len)
{
len = ALIGN(len, EFX_BUF_SIZE);
if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
return -ENOMEM;
buffer->entries = len / EFX_BUF_SIZE;
BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1));
/* Select new buffer ID */
buffer->index = efx->next_buffer_table;
efx->next_buffer_table += buffer->entries;
#ifdef CONFIG_SFC_SRIOV
BUG_ON(efx_sriov_enabled(efx) &&
efx->vf_buftbl_base < efx->next_buffer_table);
#endif
netif_dbg(efx, probe, efx->net_dev,
"allocating special buffers %d-%d at %llx+%x "
"(virt %p phys %llx)\n", buffer->index,
buffer->index + buffer->entries - 1,
(u64)buffer->buf.dma_addr, len,
buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
return 0;
}
static void
efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
{
if (!buffer->buf.addr)
return;
netif_dbg(efx, hw, efx->net_dev,
"deallocating special buffers %d-%d at %llx+%x "
"(virt %p phys %llx)\n", buffer->index,
buffer->index + buffer->entries - 1,
(u64)buffer->buf.dma_addr, buffer->buf.len,
buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
efx_nic_free_buffer(efx, &buffer->buf);
buffer->entries = 0;
}
/**************************************************************************
*
* TX path
*
**************************************************************************/
/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue)
{
unsigned write_ptr;
efx_dword_t reg;
write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
efx_writed_page(tx_queue->efx, &reg,
FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
}
/* Write pointer and first descriptor for TX descriptor ring */
static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
const efx_qword_t *txd)
{
unsigned write_ptr;
efx_oword_t reg;
BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
FRF_AZ_TX_DESC_WPTR, write_ptr);
reg.qword[0] = *txd;
efx_writeo_page(tx_queue->efx, &reg,
FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
}
/* For each entry inserted into the software descriptor ring, create a
* descriptor in the hardware TX descriptor ring (in host memory), and
* write a doorbell.
*/
void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
{
struct efx_tx_buffer *buffer;
efx_qword_t *txd;
unsigned write_ptr;
unsigned old_write_count = tx_queue->write_count;
BUG_ON(tx_queue->write_count == tx_queue->insert_count);
do {
write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[write_ptr];
txd = efx_tx_desc(tx_queue, write_ptr);
++tx_queue->write_count;
/* Create TX descriptor ring entry */
BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
EFX_POPULATE_QWORD_4(*txd,
FSF_AZ_TX_KER_CONT,
buffer->flags & EFX_TX_BUF_CONT,
FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
FSF_AZ_TX_KER_BUF_REGION, 0,
FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
} while (tx_queue->write_count != tx_queue->insert_count);
wmb(); /* Ensure descriptors are written before they are fetched */
if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
txd = efx_tx_desc(tx_queue,
old_write_count & tx_queue->ptr_mask);
efx_farch_push_tx_desc(tx_queue, txd);
++tx_queue->pushes;
} else {
efx_farch_notify_tx_desc(tx_queue);
}
}
/* Allocate hardware resources for a TX queue */
int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
unsigned entries;
entries = tx_queue->ptr_mask + 1;
return efx_alloc_special_buffer(efx, &tx_queue->txd,
entries * sizeof(efx_qword_t));
}
void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
efx_oword_t reg;
/* Pin TX descriptor ring */
efx_init_special_buffer(efx, &tx_queue->txd);
/* Push TX descriptor ring to card */
EFX_POPULATE_OWORD_10(reg,
FRF_AZ_TX_DESCQ_EN, 1,
FRF_AZ_TX_ISCSI_DDIG_EN, 0,
FRF_AZ_TX_ISCSI_HDIG_EN, 0,
FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
FRF_AZ_TX_DESCQ_EVQ_ID,
tx_queue->channel->channel,
FRF_AZ_TX_DESCQ_OWNER_ID, 0,
FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
FRF_AZ_TX_DESCQ_SIZE,
__ffs(tx_queue->txd.entries),
FRF_AZ_TX_DESCQ_TYPE, 0,
FRF_BZ_TX_NON_IP_DROP_DIS, 1);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
!csum);
}
efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
tx_queue->queue);
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
/* Only 128 bits in this register */
BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
__clear_bit_le(tx_queue->queue, &reg);
else
__set_bit_le(tx_queue->queue, &reg);
efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
}
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
EFX_POPULATE_OWORD_1(reg,
FRF_BZ_TX_PACE,
(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
FFE_BZ_TX_PACE_OFF :
FFE_BZ_TX_PACE_RESERVED);
efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
tx_queue->queue);
}
}
static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
efx_oword_t tx_flush_descq;
WARN_ON(atomic_read(&tx_queue->flush_outstanding));
atomic_set(&tx_queue->flush_outstanding, 1);
EFX_POPULATE_OWORD_2(tx_flush_descq,
FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
}
void efx_farch_tx_fini(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
efx_oword_t tx_desc_ptr;
/* Remove TX descriptor ring from card */
EFX_ZERO_OWORD(tx_desc_ptr);
efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
tx_queue->queue);
/* Unpin TX descriptor ring */
efx_fini_special_buffer(efx, &tx_queue->txd);
}
/* Free buffers backing TX queue */
void efx_farch_tx_remove(struct efx_tx_queue *tx_queue)
{
efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
}
/**************************************************************************
*
* RX path
*
**************************************************************************/
/* This creates an entry in the RX descriptor queue */
static inline void
efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
{
struct efx_rx_buffer *rx_buf;
efx_qword_t *rxd;
rxd = efx_rx_desc(rx_queue, index);
rx_buf = efx_rx_buffer(rx_queue, index);
EFX_POPULATE_QWORD_3(*rxd,
FSF_AZ_RX_KER_BUF_SIZE,
rx_buf->len -
rx_queue->efx->type->rx_buffer_padding,
FSF_AZ_RX_KER_BUF_REGION, 0,
FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
}
/* This writes to the RX_DESC_WPTR register for the specified receive
* descriptor ring.
*/
void efx_farch_rx_write(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
efx_dword_t reg;
unsigned write_ptr;
while (rx_queue->notified_count != rx_queue->added_count) {
efx_farch_build_rx_desc(
rx_queue,
rx_queue->notified_count & rx_queue->ptr_mask);
++rx_queue->notified_count;
}
wmb();
write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
efx_rx_queue_index(rx_queue));
}
int efx_farch_rx_probe(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
unsigned entries;
entries = rx_queue->ptr_mask + 1;
return efx_alloc_special_buffer(efx, &rx_queue->rxd,
entries * sizeof(efx_qword_t));
}
void efx_farch_rx_init(struct efx_rx_queue *rx_queue)
{
efx_oword_t rx_desc_ptr;
struct efx_nic *efx = rx_queue->efx;
bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
bool iscsi_digest_en = is_b0;
bool jumbo_en;
/* For kernel-mode queues in Falcon A1, the JUMBO flag enables
* DMA to continue after a PCIe page boundary (and scattering
* is not possible). In Falcon B0 and Siena, it enables
* scatter.
*/
jumbo_en = !is_b0 || efx->rx_scatter;
netif_dbg(efx, hw, efx->net_dev,
"RX queue %d ring in special buffers %d-%d\n",
efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
rx_queue->rxd.index + rx_queue->rxd.entries - 1);
rx_queue->scatter_n = 0;
/* Pin RX descriptor ring */
efx_init_special_buffer(efx, &rx_queue->rxd);
/* Push RX descriptor ring to card */
EFX_POPULATE_OWORD_10(rx_desc_ptr,
FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
FRF_AZ_RX_DESCQ_EVQ_ID,
efx_rx_queue_channel(rx_queue)->channel,
FRF_AZ_RX_DESCQ_OWNER_ID, 0,
FRF_AZ_RX_DESCQ_LABEL,
efx_rx_queue_index(rx_queue),
FRF_AZ_RX_DESCQ_SIZE,
__ffs(rx_queue->rxd.entries),
FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
FRF_AZ_RX_DESCQ_EN, 1);
efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
efx_rx_queue_index(rx_queue));
}
static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
efx_oword_t rx_flush_descq;
EFX_POPULATE_OWORD_2(rx_flush_descq,
FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
FRF_AZ_RX_FLUSH_DESCQ,
efx_rx_queue_index(rx_queue));
efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
}
void efx_farch_rx_fini(struct efx_rx_queue *rx_queue)
{
efx_oword_t rx_desc_ptr;
struct efx_nic *efx = rx_queue->efx;
/* Remove RX descriptor ring from card */
EFX_ZERO_OWORD(rx_desc_ptr);
efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
efx_rx_queue_index(rx_queue));
/* Unpin RX descriptor ring */
efx_fini_special_buffer(efx, &rx_queue->rxd);
}
/* Free buffers backing RX queue */
void efx_farch_rx_remove(struct efx_rx_queue *rx_queue)
{
efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
}
/**************************************************************************
*
* Flush handling
*
**************************************************************************/
/* efx_farch_flush_queues() must be woken up when all flushes are completed,
* or more RX flushes can be kicked off.
*/
static bool efx_farch_flush_wake(struct efx_nic *efx)
{
/* Ensure that all updates are visible to efx_farch_flush_queues() */
smp_mb();
return (atomic_read(&efx->drain_pending) == 0 ||
(atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
&& atomic_read(&efx->rxq_flush_pending) > 0));
}
static bool efx_check_tx_flush_complete(struct efx_nic *efx)
{
bool i = true;
efx_oword_t txd_ptr_tbl;
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel) {
efx_reado_table(efx, &txd_ptr_tbl,
FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
if (EFX_OWORD_FIELD(txd_ptr_tbl,
FRF_AZ_TX_DESCQ_FLUSH) ||
EFX_OWORD_FIELD(txd_ptr_tbl,
FRF_AZ_TX_DESCQ_EN)) {
netif_dbg(efx, hw, efx->net_dev,
"flush did not complete on TXQ %d\n",
tx_queue->queue);
i = false;
} else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
1, 0)) {
/* The flush is complete, but we didn't
* receive a flush completion event
*/
netif_dbg(efx, hw, efx->net_dev,
"flush complete on TXQ %d, so drain "
"the queue\n", tx_queue->queue);
/* Don't need to increment drain_pending as it
* has already been incremented for the queues
* which did not drain
*/
efx_farch_magic_event(channel,
EFX_CHANNEL_MAGIC_TX_DRAIN(
tx_queue));
}
}
}
return i;
}
/* Flush all the transmit queues, and continue flushing receive queues until
* they're all flushed. Wait for the DRAIN events to be recieved so that there
* are no more RX and TX events left on any channel. */
static int efx_farch_do_flush(struct efx_nic *efx)
{
unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
struct efx_channel *channel;
struct efx_rx_queue *rx_queue;
struct efx_tx_queue *tx_queue;
int rc = 0;
efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel) {
atomic_inc(&efx->drain_pending);
efx_farch_flush_tx_queue(tx_queue);
}
efx_for_each_channel_rx_queue(rx_queue, channel) {
atomic_inc(&efx->drain_pending);
rx_queue->flush_pending = true;
atomic_inc(&efx->rxq_flush_pending);
}
}
while (timeout && atomic_read(&efx->drain_pending) > 0) {
/* If SRIOV is enabled, then offload receive queue flushing to
* the firmware (though we will still have to poll for
* completion). If that fails, fall back to the old scheme.
*/
if (efx_sriov_enabled(efx)) {
rc = efx_mcdi_flush_rxqs(efx);
if (!rc)
goto wait;
}
/* The hardware supports four concurrent rx flushes, each of
* which may need to be retried if there is an outstanding
* descriptor fetch
*/
efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel) {
if (atomic_read(&efx->rxq_flush_outstanding) >=
EFX_RX_FLUSH_COUNT)
break;
if (rx_queue->flush_pending) {
rx_queue->flush_pending = false;
atomic_dec(&efx->rxq_flush_pending);
atomic_inc(&efx->rxq_flush_outstanding);
efx_farch_flush_rx_queue(rx_queue);
}
}
}
wait:
timeout = wait_event_timeout(efx->flush_wq,
efx_farch_flush_wake(efx),
timeout);
}
if (atomic_read(&efx->drain_pending) &&
!efx_check_tx_flush_complete(efx)) {
netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
"(rx %d+%d)\n", atomic_read(&efx->drain_pending),
atomic_read(&efx->rxq_flush_outstanding),
atomic_read(&efx->rxq_flush_pending));
rc = -ETIMEDOUT;
atomic_set(&efx->drain_pending, 0);
atomic_set(&efx->rxq_flush_pending, 0);
atomic_set(&efx->rxq_flush_outstanding, 0);
}
return rc;
}
int efx_farch_fini_dmaq(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
int rc = 0;
/* Do not attempt to write to the NIC during EEH recovery */
if (efx->state != STATE_RECOVERY) {
/* Only perform flush if DMA is enabled */
if (efx->pci_dev->is_busmaster) {
efx->type->prepare_flush(efx);
rc = efx_farch_do_flush(efx);
efx->type->finish_flush(efx);
}
efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_farch_rx_fini(rx_queue);
efx_for_each_channel_tx_queue(tx_queue, channel)
efx_farch_tx_fini(tx_queue);
}
}
return rc;
}
/**************************************************************************
*
* Event queue processing
* Event queues are processed by per-channel tasklets.
*
**************************************************************************/
/* Update a channel's event queue's read pointer (RPTR) register
*
* This writes the EVQ_RPTR_REG register for the specified channel's
* event queue.
*/
void efx_farch_ev_read_ack(struct efx_channel *channel)
{
efx_dword_t reg;
struct efx_nic *efx = channel->efx;
EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
channel->eventq_read_ptr & channel->eventq_mask);
/* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
* of 4 bytes, but it is really 16 bytes just like later revisions.
*/
efx_writed(efx, &reg,
efx->type->evq_rptr_tbl_base +
FR_BZ_EVQ_RPTR_STEP * channel->channel);
}
/* Use HW to insert a SW defined event */
void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
efx_qword_t *event)
{
efx_oword_t drv_ev_reg;
BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
FRF_AZ_DRV_EV_DATA_WIDTH != 64);
drv_ev_reg.u32[0] = event->u32[0];
drv_ev_reg.u32[1] = event->u32[1];
drv_ev_reg.u32[2] = 0;
drv_ev_reg.u32[3] = 0;
EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
}
static void efx_farch_magic_event(struct efx_channel *channel, u32 magic)
{
efx_qword_t event;
EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
FSE_AZ_EV_CODE_DRV_GEN_EV,
FSF_AZ_DRV_GEN_EV_MAGIC, magic);
efx_farch_generate_event(channel->efx, channel->channel, &event);
}
/* Handle a transmit completion event
*
* The NIC batches TX completion events; the message we receive is of
* the form "complete all TX events up to this index".
*/
static int
efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
{
unsigned int tx_ev_desc_ptr;
unsigned int tx_ev_q_label;
struct efx_tx_queue *tx_queue;
struct efx_nic *efx = channel->efx;
int tx_packets = 0;
if (unlikely(ACCESS_ONCE(efx->reset_pending)))
return 0;
if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
/* Transmit completion */
tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = efx_channel_get_tx_queue(
channel, tx_ev_q_label % EFX_TXQ_TYPES);
tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
tx_queue->ptr_mask);
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = efx_channel_get_tx_queue(
channel, tx_ev_q_label % EFX_TXQ_TYPES);
netif_tx_lock(efx->net_dev);
efx_farch_notify_tx_desc(tx_queue);
netif_tx_unlock(efx->net_dev);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
EFX_WORKAROUND_10727(efx)) {
efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
} else {
netif_err(efx, tx_err, efx->net_dev,
"channel %d unexpected TX event "
EFX_QWORD_FMT"\n", channel->channel,
EFX_QWORD_VAL(*event));
}
return tx_packets;
}
/* Detect errors included in the rx_evt_pkt_ok bit. */
static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
const efx_qword_t *event)
{
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_nic *efx = rx_queue->efx;
bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
bool rx_ev_other_err, rx_ev_pause_frm;
bool rx_ev_hdr_type, rx_ev_mcast_pkt;
unsigned rx_ev_pkt_type;
rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
/* Every error apart from tobe_disc and pause_frm */
rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
/* Count errors that are not in MAC stats. Ignore expected
* checksum errors during self-test. */
if (rx_ev_frm_trunc)
++channel->n_rx_frm_trunc;
else if (rx_ev_tobe_disc)
++channel->n_rx_tobe_disc;
else if (!efx->loopback_selftest) {
if (rx_ev_ip_hdr_chksum_err)
++channel->n_rx_ip_hdr_chksum_err;
else if (rx_ev_tcp_udp_chksum_err)
++channel->n_rx_tcp_udp_chksum_err;
}
/* TOBE_DISC is expected on unicast mismatches; don't print out an
* error message. FRM_TRUNC indicates RXDP dropped the packet due
* to a FIFO overflow.
*/
#ifdef DEBUG
if (rx_ev_other_err && net_ratelimit()) {
netif_dbg(efx, rx_err, efx->net_dev,
" RX queue %d unexpected RX event "
EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
rx_ev_ip_hdr_chksum_err ?
" [IP_HDR_CHKSUM_ERR]" : "",
rx_ev_tcp_udp_chksum_err ?
" [TCP_UDP_CHKSUM_ERR]" : "",
rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
rx_ev_drib_nib ? " [DRIB_NIB]" : "",
rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
rx_ev_pause_frm ? " [PAUSE]" : "");
}
#endif
/* The frame must be discarded if any of these are true. */
return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
rx_ev_tobe_disc | rx_ev_pause_frm) ?
EFX_RX_PKT_DISCARD : 0;
}
/* Handle receive events that are not in-order. Return true if this
* can be handled as a partial packet discard, false if it's more
* serious.
*/
static bool
efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
{
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_nic *efx = rx_queue->efx;
unsigned expected, dropped;
if (rx_queue->scatter_n &&
index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
rx_queue->ptr_mask)) {
++channel->n_rx_nodesc_trunc;
return true;
}
expected = rx_queue->removed_count & rx_queue->ptr_mask;
dropped = (index - expected) & rx_queue->ptr_mask;
netif_info(efx, rx_err, efx->net_dev,
"dropped %d events (index=%d expected=%d)\n",
dropped, index, expected);
efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
return false;
}
/* Handle a packet received event
*
* The NIC gives a "discard" flag if it's a unicast packet with the
* wrong destination address
* Also "is multicast" and "matches multicast filter" flags can be used to
* discard non-matching multicast packets.
*/
static void
efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
{
unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
unsigned expected_ptr;
bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
u16 flags;
struct efx_rx_queue *rx_queue;
struct efx_nic *efx = channel->efx;
if (unlikely(ACCESS_ONCE(efx->reset_pending)))
return;
rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
channel->channel);
rx_queue = efx_channel_get_rx_queue(channel);
rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
rx_queue->ptr_mask);
/* Check for partial drops and other errors */
if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
if (rx_ev_desc_ptr != expected_ptr &&
!efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
return;
/* Discard all pending fragments */
if (rx_queue->scatter_n) {
efx_rx_packet(
rx_queue,
rx_queue->removed_count & rx_queue->ptr_mask,
rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
rx_queue->removed_count += rx_queue->scatter_n;
rx_queue->scatter_n = 0;
}
/* Return if there is no new fragment */
if (rx_ev_desc_ptr != expected_ptr)
return;
/* Discard new fragment if not SOP */
if (!rx_ev_sop) {
efx_rx_packet(
rx_queue,
rx_queue->removed_count & rx_queue->ptr_mask,
1, 0, EFX_RX_PKT_DISCARD);
++rx_queue->removed_count;
return;
}
}
++rx_queue->scatter_n;
if (rx_ev_cont)
return;
rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
if (likely(rx_ev_pkt_ok)) {
/* If packet is marked as OK then we can rely on the
* hardware checksum and classification.
*/
flags = 0;
switch (rx_ev_hdr_type) {
case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
flags |= EFX_RX_PKT_TCP;
/* fall through */
case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
flags |= EFX_RX_PKT_CSUMMED;
/* fall through */
case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
break;
}
} else {
flags = efx_farch_handle_rx_not_ok(rx_queue, event);
}
/* Detect multicast packets that didn't match the filter */
rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
if (rx_ev_mcast_pkt) {
unsigned int rx_ev_mcast_hash_match =
EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
if (unlikely(!rx_ev_mcast_hash_match)) {
++channel->n_rx_mcast_mismatch;
flags |= EFX_RX_PKT_DISCARD;
}
}
channel->irq_mod_score += 2;
/* Handle received packet */
efx_rx_packet(rx_queue,
rx_queue->removed_count & rx_queue->ptr_mask,
rx_queue->scatter_n, rx_ev_byte_cnt, flags);
rx_queue->removed_count += rx_queue->scatter_n;
rx_queue->scatter_n = 0;
}
/* If this flush done event corresponds to a &struct efx_tx_queue, then
* send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
* of all transmit completions.
*/
static void
efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
struct efx_tx_queue *tx_queue;
int qid;
qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
qid % EFX_TXQ_TYPES);
if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
efx_farch_magic_event(tx_queue->channel,
EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
}
}
}
/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
* was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
* the RX queue back to the mask of RX queues in need of flushing.
*/
static void
efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
struct efx_channel *channel;
struct efx_rx_queue *rx_queue;
int qid;
bool failed;
qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
if (qid >= efx->n_channels)
return;
channel = efx_get_channel(efx, qid);
if (!efx_channel_has_rx_queue(channel))
return;
rx_queue = efx_channel_get_rx_queue(channel);
if (failed) {
netif_info(efx, hw, efx->net_dev,
"RXQ %d flush retry\n", qid);
rx_queue->flush_pending = true;
atomic_inc(&efx->rxq_flush_pending);
} else {
efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
}
atomic_dec(&efx->rxq_flush_outstanding);
if (efx_farch_flush_wake(efx))
wake_up(&efx->flush_wq);
}
static void
efx_farch_handle_drain_event(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
WARN_ON(atomic_read(&efx->drain_pending) == 0);
atomic_dec(&efx->drain_pending);
if (efx_farch_flush_wake(efx))
wake_up(&efx->flush_wq);
}
static void efx_farch_handle_generated_event(struct efx_channel *channel,
efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
struct efx_rx_queue *rx_queue =
efx_channel_has_rx_queue(channel) ?
efx_channel_get_rx_queue(channel) : NULL;
unsigned magic, code;
magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
code = _EFX_CHANNEL_MAGIC_CODE(magic);
if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
channel->event_test_cpu = raw_smp_processor_id();
} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
* queue. Refill it here */
efx_fast_push_rx_descriptors(rx_queue);
} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
efx_farch_handle_drain_event(channel);
} else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
efx_farch_handle_drain_event(channel);
} else {
netif_dbg(efx, hw, efx->net_dev, "channel %d received "
"generated event "EFX_QWORD_FMT"\n",
channel->channel, EFX_QWORD_VAL(*event));
}
}
static void
efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
unsigned int ev_sub_code;
unsigned int ev_sub_data;
ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
switch (ev_sub_code) {
case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_farch_handle_tx_flush_done(efx, event);
efx_sriov_tx_flush_done(efx, event);
break;
case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_farch_handle_rx_flush_done(efx, event);
efx_sriov_rx_flush_done(efx, event);
break;
case FSE_AZ_EVQ_INIT_DONE_EV:
netif_dbg(efx, hw, efx->net_dev,
"channel %d EVQ %d initialised\n",
channel->channel, ev_sub_data);
break;
case FSE_AZ_SRM_UPD_DONE_EV:
netif_vdbg(efx, hw, efx->net_dev,
"channel %d SRAM update done\n", channel->channel);
break;
case FSE_AZ_WAKE_UP_EV:
netif_vdbg(efx, hw, efx->net_dev,
"channel %d RXQ %d wakeup event\n",
channel->channel, ev_sub_data);
break;
case FSE_AZ_TIMER_EV:
netif_vdbg(efx, hw, efx->net_dev,
"channel %d RX queue %d timer expired\n",
channel->channel, ev_sub_data);
break;
case FSE_AA_RX_RECOVER_EV:
netif_err(efx, rx_err, efx->net_dev,
"channel %d seen DRIVER RX_RESET event. "
"Resetting.\n", channel->channel);
atomic_inc(&efx->rx_reset);
efx_schedule_reset(efx,
EFX_WORKAROUND_6555(efx) ?
RESET_TYPE_RX_RECOVERY :
RESET_TYPE_DISABLE);
break;
case FSE_BZ_RX_DSC_ERROR_EV:
if (ev_sub_data < EFX_VI_BASE) {
netif_err(efx, rx_err, efx->net_dev,
"RX DMA Q %d reports descriptor fetch error."
" RX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
} else
efx_sriov_desc_fetch_err(efx, ev_sub_data);
break;
case FSE_BZ_TX_DSC_ERROR_EV:
if (ev_sub_data < EFX_VI_BASE) {
netif_err(efx, tx_err, efx->net_dev,
"TX DMA Q %d reports descriptor fetch error."
" TX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
} else
efx_sriov_desc_fetch_err(efx, ev_sub_data);
break;
default:
netif_vdbg(efx, hw, efx->net_dev,
"channel %d unknown driver event code %d "
"data %04x\n", channel->channel, ev_sub_code,
ev_sub_data);
break;
}
}
int efx_farch_ev_process(struct efx_channel *channel, int budget)
{
struct efx_nic *efx = channel->efx;
unsigned int read_ptr;
efx_qword_t event, *p_event;
int ev_code;
int tx_packets = 0;
int spent = 0;
read_ptr = channel->eventq_read_ptr;
for (;;) {
p_event = efx_event(channel, read_ptr);
event = *p_event;
if (!efx_event_present(&event))
/* End of events */
break;
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
"channel %d event is "EFX_QWORD_FMT"\n",
channel->channel, EFX_QWORD_VAL(event));
/* Clear this event by marking it all ones */
EFX_SET_QWORD(*p_event);
++read_ptr;
ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
switch (ev_code) {
case FSE_AZ_EV_CODE_RX_EV:
efx_farch_handle_rx_event(channel, &event);
if (++spent == budget)
goto out;
break;
case FSE_AZ_EV_CODE_TX_EV:
tx_packets += efx_farch_handle_tx_event(channel,
&event);
if (tx_packets > efx->txq_entries) {
spent = budget;
goto out;
}
break;
case FSE_AZ_EV_CODE_DRV_GEN_EV:
efx_farch_handle_generated_event(channel, &event);
break;
case FSE_AZ_EV_CODE_DRIVER_EV:
efx_farch_handle_driver_event(channel, &event);
break;
case FSE_CZ_EV_CODE_USER_EV:
efx_sriov_event(channel, &event);
break;
case FSE_CZ_EV_CODE_MCDI_EV:
efx_mcdi_process_event(channel, &event);
break;
case FSE_AZ_EV_CODE_GLOBAL_EV:
if (efx->type->handle_global_event &&
efx->type->handle_global_event(channel, &event))
break;
/* else fall through */
default:
netif_err(channel->efx, hw, channel->efx->net_dev,
"channel %d unknown event type %d (data "
EFX_QWORD_FMT ")\n", channel->channel,
ev_code, EFX_QWORD_VAL(event));
}
}
out:
channel->eventq_read_ptr = read_ptr;
return spent;
}
/* Allocate buffer table entries for event queue */
int efx_farch_ev_probe(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
unsigned entries;
entries = channel->eventq_mask + 1;
return efx_alloc_special_buffer(efx, &channel->eventq,
entries * sizeof(efx_qword_t));
}
void efx_farch_ev_init(struct efx_channel *channel)
{
efx_oword_t reg;
struct efx_nic *efx = channel->efx;
netif_dbg(efx, hw, efx->net_dev,
"channel %d event queue in special buffers %d-%d\n",
channel->channel, channel->eventq.index,
channel->eventq.index + channel->eventq.entries - 1);
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
EFX_POPULATE_OWORD_3(reg,
FRF_CZ_TIMER_Q_EN, 1,
FRF_CZ_HOST_NOTIFY_MODE, 0,
FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
}
/* Pin event queue buffer */
efx_init_special_buffer(efx, &channel->eventq);
/* Fill event queue with all ones (i.e. empty events) */
memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
/* Push event queue to card */
EFX_POPULATE_OWORD_3(reg,
FRF_AZ_EVQ_EN, 1,
FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
channel->channel);
efx->type->push_irq_moderation(channel);
}
void efx_farch_ev_fini(struct efx_channel *channel)
{
efx_oword_t reg;
struct efx_nic *efx = channel->efx;
/* Remove event queue from card */
EFX_ZERO_OWORD(reg);
efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
channel->channel);
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
/* Unpin event queue */
efx_fini_special_buffer(efx, &channel->eventq);
}
/* Free buffers backing event queue */
void efx_farch_ev_remove(struct efx_channel *channel)
{
efx_free_special_buffer(channel->efx, &channel->eventq);
}
void efx_farch_ev_test_generate(struct efx_channel *channel)
{
efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
}
void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue)
{
efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
EFX_CHANNEL_MAGIC_FILL(rx_queue));
}
/**************************************************************************
*
* Hardware interrupts
* The hardware interrupt handler does very little work; all the event
* queue processing is carried out by per-channel tasklets.
*
**************************************************************************/
/* Enable/disable/generate interrupts */
static inline void efx_farch_interrupts(struct efx_nic *efx,
bool enabled, bool force)
{
efx_oword_t int_en_reg_ker;
EFX_POPULATE_OWORD_3(int_en_reg_ker,
FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
FRF_AZ_KER_INT_KER, force,
FRF_AZ_DRV_INT_EN_KER, enabled);
efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
}
void efx_farch_irq_enable_master(struct efx_nic *efx)
{
EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
efx_farch_interrupts(efx, true, false);
}
void efx_farch_irq_disable_master(struct efx_nic *efx)
{
/* Disable interrupts */
efx_farch_interrupts(efx, false, false);
}
/* Generate a test interrupt
* Interrupt must already have been enabled, otherwise nasty things
* may happen.
*/
void efx_farch_irq_test_generate(struct efx_nic *efx)
{
efx_farch_interrupts(efx, true, true);
}
/* Process a fatal interrupt
* Disable bus mastering ASAP and schedule a reset
*/
irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t *int_ker = efx->irq_status.addr;
efx_oword_t fatal_intr;
int error, mem_perr;
efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
EFX_OWORD_VAL(fatal_intr),
error ? "disabling bus mastering" : "no recognised error");
/* If this is a memory parity error dump which blocks are offending */
mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
if (mem_perr) {
efx_oword_t reg;
efx_reado(efx, &reg, FR_AZ_MEM_STAT);
netif_err(efx, hw, efx->net_dev,
"SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
EFX_OWORD_VAL(reg));
}
/* Disable both devices */
pci_clear_master(efx->pci_dev);
if (efx_nic_is_dual_func(efx))
pci_clear_master(nic_data->pci_dev2);
efx_farch_irq_disable_master(efx);
/* Count errors and reset or disable the NIC accordingly */
if (efx->int_error_count == 0 ||
time_after(jiffies, efx->int_error_expire)) {
efx->int_error_count = 0;
efx->int_error_expire =
jiffies + EFX_INT_ERROR_EXPIRE * HZ;
}
if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
netif_err(efx, hw, efx->net_dev,
"SYSTEM ERROR - reset scheduled\n");
efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
} else {
netif_err(efx, hw, efx->net_dev,
"SYSTEM ERROR - max number of errors seen."
"NIC will be disabled\n");
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
}
return IRQ_HANDLED;
}
/* Handle a legacy interrupt
* Acknowledges the interrupt and schedule event queue processing.
*/
irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
efx_oword_t *int_ker = efx->irq_status.addr;
irqreturn_t result = IRQ_NONE;
struct efx_channel *channel;
efx_dword_t reg;
u32 queues;
int syserr;
/* Read the ISR which also ACKs the interrupts */
efx_readd(efx, &reg, FR_BZ_INT_ISR0);
queues = EFX_EXTRACT_DWORD(reg, 0, 31);
/* Legacy interrupts are disabled too late by the EEH kernel
* code. Disable them earlier.
* If an EEH error occurred, the read will have returned all ones.
*/
if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
!efx->eeh_disabled_legacy_irq) {
disable_irq_nosync(efx->legacy_irq);
efx->eeh_disabled_legacy_irq = true;
}
/* Handle non-event-queue sources */
if (queues & (1U << efx->irq_level) && soft_enabled) {
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr))
return efx_farch_fatal_interrupt(efx);
efx->last_irq_cpu = raw_smp_processor_id();
}
if (queues != 0) {
if (EFX_WORKAROUND_15783(efx))
efx->irq_zero_count = 0;
/* Schedule processing of any interrupting queues */
if (likely(soft_enabled)) {
efx_for_each_channel(channel, efx) {
if (queues & 1)
efx_schedule_channel_irq(channel);
queues >>= 1;
}
}
result = IRQ_HANDLED;
} else if (EFX_WORKAROUND_15783(efx)) {
efx_qword_t *event;
/* We can't return IRQ_HANDLED more than once on seeing ISR=0
* because this might be a shared interrupt. */
if (efx->irq_zero_count++ == 0)
result = IRQ_HANDLED;
/* Ensure we schedule or rearm all event queues */
if (likely(soft_enabled)) {
efx_for_each_channel(channel, efx) {
event = efx_event(channel,
channel->eventq_read_ptr);
if (efx_event_present(event))
efx_schedule_channel_irq(channel);
else
efx_farch_ev_read_ack(channel);
}
}
}
if (result == IRQ_HANDLED)
netif_vdbg(efx, intr, efx->net_dev,
"IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
return result;
}
/* Handle an MSI interrupt
*
* Handle an MSI hardware interrupt. This routine schedules event
* queue processing. No interrupt acknowledgement cycle is necessary.
* Also, we never need to check that the interrupt is for us, since
* MSI interrupts cannot be shared.
*/
irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
{
struct efx_msi_context *context = dev_id;
struct efx_nic *efx = context->efx;
efx_oword_t *int_ker = efx->irq_status.addr;
int syserr;
netif_vdbg(efx, intr, efx->net_dev,
"IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
return IRQ_HANDLED;
/* Handle non-event-queue sources */
if (context->index == efx->irq_level) {
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr))
return efx_farch_fatal_interrupt(efx);
efx->last_irq_cpu = raw_smp_processor_id();
}
/* Schedule processing of the channel */
efx_schedule_channel_irq(efx->channel[context->index]);
return IRQ_HANDLED;
}
/* Setup RSS indirection table.
* This maps from the hash value of the packet to RXQ
*/
void efx_farch_rx_push_indir_table(struct efx_nic *efx)
{
size_t i = 0;
efx_dword_t dword;
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
return;
BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
FR_BZ_RX_INDIRECTION_TBL_ROWS);
for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
efx->rx_indir_table[i]);
efx_writed(efx, &dword,
FR_BZ_RX_INDIRECTION_TBL +
FR_BZ_RX_INDIRECTION_TBL_STEP * i);
}
}
/* Looks at available SRAM resources and works out how many queues we
* can support, and where things like descriptor caches should live.
*
* SRAM is split up as follows:
* 0 buftbl entries for channels
* efx->vf_buftbl_base buftbl entries for SR-IOV
* efx->rx_dc_base RX descriptor caches
* efx->tx_dc_base TX descriptor caches
*/
void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
{
unsigned vi_count, buftbl_min;
/* Account for the buffer table entries backing the datapath channels
* and the descriptor caches for those channels.
*/
buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
efx->n_channels * EFX_MAX_EVQ_SIZE)
* sizeof(efx_qword_t) / EFX_BUF_SIZE);
vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
#ifdef CONFIG_SFC_SRIOV
if (efx_sriov_wanted(efx)) {
unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
efx->vf_buftbl_base = buftbl_min;
vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
vi_count = max(vi_count, EFX_VI_BASE);
buftbl_free = (sram_lim_qw - buftbl_min -
vi_count * vi_dc_entries);
entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
efx_vf_size(efx));
vf_limit = min(buftbl_free / entries_per_vf,
(1024U - EFX_VI_BASE) >> efx->vi_scale);
if (efx->vf_count > vf_limit) {
netif_err(efx, probe, efx->net_dev,
"Reducing VF count from from %d to %d\n",
efx->vf_count, vf_limit);
efx->vf_count = vf_limit;
}
vi_count += efx->vf_count * efx_vf_size(efx);
}
#endif
efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
}
u32 efx_farch_fpga_ver(struct efx_nic *efx)
{
efx_oword_t altera_build;
efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
}
void efx_farch_init_common(struct efx_nic *efx)
{
efx_oword_t temp;
/* Set positions of descriptor caches in SRAM. */
EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
/* Set TX descriptor cache size. */
BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
/* Set RX descriptor cache size. Set low watermark to size-8, as
* this allows most efficient prefetching.
*/
BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
/* Program INT_KER address */
EFX_POPULATE_OWORD_2(temp,
FRF_AZ_NORM_INT_VEC_DIS_KER,
EFX_INT_MODE_USE_MSI(efx),
FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
/* Use an interrupt level unused by event queues */
efx->irq_level = 0x1f;
else
/* Use a valid MSI-X vector */
efx->irq_level = 0;
/* Enable all the genuinely fatal interrupts. (They are still
* masked by the overall interrupt mask, controlled by
* falcon_interrupts()).
*
* Note: All other fatal interrupts are enabled
*/
EFX_POPULATE_OWORD_3(temp,
FRF_AZ_ILL_ADR_INT_KER_EN, 1,
FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
EFX_INVERT_OWORD(temp);
efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
efx_farch_rx_push_indir_table(efx);
/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
* controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
*/
efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
/* Enable SW_EV to inherit in char driver - assume harmless here */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
/* Prefetch threshold 2 => fetch when descriptor cache half empty */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
/* Disable hardware watchdog which can misfire */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
/* Squash TX of packets of 16 bytes or less */
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
EFX_POPULATE_OWORD_4(temp,
/* Default values */
FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
FRF_BZ_TX_PACE_SB_AF, 0xb,
FRF_BZ_TX_PACE_FB_BASE, 0,
/* Allow large pace values in the
* fast bin. */
FRF_BZ_TX_PACE_BIN_TH,
FFE_BZ_TX_PACE_RESERVED);
efx_writeo(efx, &temp, FR_BZ_TX_PACE);
}
}
...@@ -8,8 +8,8 @@ ...@@ -8,8 +8,8 @@
* by the Free Software Foundation, incorporated herein by reference. * by the Free Software Foundation, incorporated herein by reference.
*/ */
#ifndef EFX_REGS_H #ifndef EFX_FARCH_REGS_H
#define EFX_REGS_H #define EFX_FARCH_REGS_H
/* /*
* Falcon hardware architecture definitions have a name prefix following * Falcon hardware architecture definitions have a name prefix following
...@@ -2925,4 +2925,4 @@ ...@@ -2925,4 +2925,4 @@
#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0 #define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0
#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32 #define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32
#endif /* EFX_REGS_H */ #endif /* EFX_FARCH_REGS_H */
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include "filter.h" #include "filter.h"
#include "io.h" #include "io.h"
#include "nic.h" #include "nic.h"
#include "regs.h" #include "farch_regs.h"
/* "Fudge factors" - difference between programmed value and actual depth. /* "Fudge factors" - difference between programmed value and actual depth.
* Due to pipelined implementation we need to program H/W with a value that * Due to pipelined implementation we need to program H/W with a value that
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#include "net_driver.h" #include "net_driver.h"
#include "nic.h" #include "nic.h"
#include "io.h" #include "io.h"
#include "regs.h" #include "farch_regs.h"
#include "mcdi_pcol.h" #include "mcdi_pcol.h"
#include "phy.h" #include "phy.h"
...@@ -24,13 +24,6 @@ ...@@ -24,13 +24,6 @@
#define MCDI_RPC_TIMEOUT (10 * HZ) #define MCDI_RPC_TIMEOUT (10 * HZ)
#define MCDI_PDU(efx) \
(efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
#define MCDI_DOORBELL(efx) \
(efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST)
#define MCDI_STATUS(efx) \
(efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST)
/* A reboot/assertion causes the MCDI status word to be set after the /* A reboot/assertion causes the MCDI status word to be set after the
* command word is set or a REBOOT event is sent. If we notice a reboot * command word is set or a REBOOT event is sent. If we notice a reboot
* via these mechanisms then wait 10ms for the status word to be set. */ * via these mechanisms then wait 10ms for the status word to be set. */
...@@ -44,16 +37,18 @@ ...@@ -44,16 +37,18 @@
static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
{ {
struct siena_nic_data *nic_data; EFX_BUG_ON_PARANOID(!efx->mcdi);
EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0); return &efx->mcdi->iface;
nic_data = efx->nic_data;
return &nic_data->mcdi;
} }
int efx_mcdi_init(struct efx_nic *efx) int efx_mcdi_init(struct efx_nic *efx)
{ {
struct efx_mcdi_iface *mcdi; struct efx_mcdi_iface *mcdi;
efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
if (!efx->mcdi)
return -ENOMEM;
mcdi = efx_mcdi(efx); mcdi = efx_mcdi(efx);
init_waitqueue_head(&mcdi->wq); init_waitqueue_head(&mcdi->wq);
spin_lock_init(&mcdi->iface_lock); spin_lock_init(&mcdi->iface_lock);
...@@ -66,73 +61,142 @@ int efx_mcdi_init(struct efx_nic *efx) ...@@ -66,73 +61,142 @@ int efx_mcdi_init(struct efx_nic *efx)
return efx_mcdi_handle_assertion(efx); return efx_mcdi_handle_assertion(efx);
} }
void efx_mcdi_fini(struct efx_nic *efx)
{
BUG_ON(efx->mcdi &&
atomic_read(&efx->mcdi->iface.state) != MCDI_STATE_QUIESCENT);
kfree(efx->mcdi);
}
static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
const efx_dword_t *inbuf, size_t inlen) const efx_dword_t *inbuf, size_t inlen)
{ {
struct efx_mcdi_iface *mcdi = efx_mcdi(efx); struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); efx_dword_t hdr[2];
unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); size_t hdr_len;
unsigned int i;
efx_dword_t hdr;
u32 xflags, seqno; u32 xflags, seqno;
unsigned int inlen_dw = DIV_ROUND_UP(inlen, 4);
BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V1);
seqno = mcdi->seqno & SEQ_MASK; seqno = mcdi->seqno & SEQ_MASK;
xflags = 0; xflags = 0;
if (mcdi->mode == MCDI_MODE_EVENTS) if (mcdi->mode == MCDI_MODE_EVENTS)
xflags |= MCDI_HEADER_XFLAGS_EVREQ; xflags |= MCDI_HEADER_XFLAGS_EVREQ;
EFX_POPULATE_DWORD_6(hdr, if (efx->type->mcdi_max_ver == 1) {
MCDI_HEADER_RESPONSE, 0, /* MCDI v1 */
MCDI_HEADER_RESYNC, 1, EFX_POPULATE_DWORD_6(hdr[0],
MCDI_HEADER_CODE, cmd, MCDI_HEADER_RESPONSE, 0,
MCDI_HEADER_DATALEN, inlen, MCDI_HEADER_RESYNC, 1,
MCDI_HEADER_SEQ, seqno, MCDI_HEADER_CODE, cmd,
MCDI_HEADER_XFLAGS, xflags); MCDI_HEADER_DATALEN, inlen,
MCDI_HEADER_SEQ, seqno,
efx_writed(efx, &hdr, pdu); MCDI_HEADER_XFLAGS, xflags);
hdr_len = 4;
for (i = 0; i < inlen_dw; i++) } else {
efx_writed(efx, &inbuf[i], pdu + 4 + 4 * i); /* MCDI v2 */
BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
EFX_POPULATE_DWORD_6(hdr[0],
MCDI_HEADER_RESPONSE, 0,
MCDI_HEADER_RESYNC, 1,
MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
MCDI_HEADER_DATALEN, 0,
MCDI_HEADER_SEQ, seqno,
MCDI_HEADER_XFLAGS, xflags);
EFX_POPULATE_DWORD_2(hdr[1],
MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
hdr_len = 8;
}
/* Ensure the payload is written out before the header */ efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
wmb(); }
/* ring the doorbell with a distinctive value */ static int efx_mcdi_errno(unsigned int mcdi_err)
_efx_writed(efx, (__force __le32) 0x45789abc, doorbell); {
switch (mcdi_err) {
case 0:
return 0;
#define TRANSLATE_ERROR(name) \
case MC_CMD_ERR_ ## name: \
return -name;
TRANSLATE_ERROR(EPERM);
TRANSLATE_ERROR(ENOENT);
TRANSLATE_ERROR(EINTR);
TRANSLATE_ERROR(EAGAIN);
TRANSLATE_ERROR(EACCES);
TRANSLATE_ERROR(EBUSY);
TRANSLATE_ERROR(EINVAL);
TRANSLATE_ERROR(EDEADLK);
TRANSLATE_ERROR(ENOSYS);
TRANSLATE_ERROR(ETIME);
TRANSLATE_ERROR(EALREADY);
TRANSLATE_ERROR(ENOSPC);
#undef TRANSLATE_ERROR
case MC_CMD_ERR_ALLOC_FAIL:
return -ENOBUFS;
case MC_CMD_ERR_MAC_EXIST:
return -EADDRINUSE;
default:
return -EPROTO;
}
} }
static void static void efx_mcdi_read_response_header(struct efx_nic *efx)
efx_mcdi_copyout(struct efx_nic *efx, efx_dword_t *outbuf, size_t outlen)
{ {
struct efx_mcdi_iface *mcdi = efx_mcdi(efx); struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); unsigned int respseq, respcmd, error;
unsigned int outlen_dw = DIV_ROUND_UP(outlen, 4); efx_dword_t hdr;
int i;
BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); efx->type->mcdi_read_response(efx, &hdr, 0, 4);
BUG_ON(outlen > MCDI_CTL_SDU_LEN_MAX_V1); respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
if (respcmd != MC_CMD_V2_EXTN) {
mcdi->resp_hdr_len = 4;
mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
} else {
efx->type->mcdi_read_response(efx, &hdr, 4, 4);
mcdi->resp_hdr_len = 8;
mcdi->resp_data_len =
EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
}
for (i = 0; i < outlen_dw; i++) if (error && mcdi->resp_data_len == 0) {
efx_readd(efx, &outbuf[i], pdu + 4 + 4 * i); netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
mcdi->resprc = -EIO;
} else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
netif_err(efx, hw, efx->net_dev,
"MC response mismatch tx seq 0x%x rx seq 0x%x\n",
respseq, mcdi->seqno);
mcdi->resprc = -EIO;
} else if (error) {
efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
mcdi->resprc =
efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
} else {
mcdi->resprc = 0;
}
} }
static int efx_mcdi_poll(struct efx_nic *efx) static int efx_mcdi_poll(struct efx_nic *efx)
{ {
struct efx_mcdi_iface *mcdi = efx_mcdi(efx); struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
unsigned long time, finish; unsigned long time, finish;
unsigned int respseq, respcmd, error; unsigned int spins;
unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); int rc;
unsigned int rc, spins;
efx_dword_t reg;
/* Check for a reboot atomically with respect to efx_mcdi_copyout() */ /* Check for a reboot atomically with respect to efx_mcdi_copyout() */
rc = -efx_mcdi_poll_reboot(efx); rc = efx_mcdi_poll_reboot(efx);
if (rc) if (rc) {
goto out; spin_lock_bh(&mcdi->iface_lock);
mcdi->resprc = rc;
mcdi->resp_hdr_len = 0;
mcdi->resp_data_len = 0;
spin_unlock_bh(&mcdi->iface_lock);
return 0;
}
/* Poll for completion. Poll quickly (once a us) for the 1st jiffy, /* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
* because generally mcdi responses are fast. After that, back off * because generally mcdi responses are fast. After that, back off
...@@ -152,59 +216,16 @@ static int efx_mcdi_poll(struct efx_nic *efx) ...@@ -152,59 +216,16 @@ static int efx_mcdi_poll(struct efx_nic *efx)
time = jiffies; time = jiffies;
rmb(); rmb();
efx_readd(efx, &reg, pdu); if (efx->type->mcdi_poll_response(efx))
/* All 1's indicates that shared memory is in reset (and is
* not a valid header). Wait for it to come out reset before
* completing the command */
if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff &&
EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
break; break;
if (time_after(time, finish)) if (time_after(time, finish))
return -ETIMEDOUT; return -ETIMEDOUT;
} }
mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN); spin_lock_bh(&mcdi->iface_lock);
respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ); efx_mcdi_read_response_header(efx);
respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE); spin_unlock_bh(&mcdi->iface_lock);
error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR);
if (error && mcdi->resplen == 0) {
netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
rc = EIO;
} else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
netif_err(efx, hw, efx->net_dev,
"MC response mismatch tx seq 0x%x rx seq 0x%x\n",
respseq, mcdi->seqno);
rc = EIO;
} else if (error) {
efx_readd(efx, &reg, pdu + 4);
switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
#define TRANSLATE_ERROR(name) \
case MC_CMD_ERR_ ## name: \
rc = name; \
break
TRANSLATE_ERROR(ENOENT);
TRANSLATE_ERROR(EINTR);
TRANSLATE_ERROR(EACCES);
TRANSLATE_ERROR(EBUSY);
TRANSLATE_ERROR(EINVAL);
TRANSLATE_ERROR(EDEADLK);
TRANSLATE_ERROR(ENOSYS);
TRANSLATE_ERROR(ETIME);
#undef TRANSLATE_ERROR
default:
rc = EIO;
break;
}
} else
rc = 0;
out:
mcdi->resprc = rc;
if (rc)
mcdi->resplen = 0;
/* Return rc=0 like wait_event_timeout() */ /* Return rc=0 like wait_event_timeout() */
return 0; return 0;
...@@ -215,17 +236,13 @@ static int efx_mcdi_poll(struct efx_nic *efx) ...@@ -215,17 +236,13 @@ static int efx_mcdi_poll(struct efx_nic *efx)
*/ */
int efx_mcdi_poll_reboot(struct efx_nic *efx) int efx_mcdi_poll_reboot(struct efx_nic *efx)
{ {
unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx); int rc;
efx_dword_t reg;
uint32_t value;
if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
return false;
efx_readd(efx, &reg, addr); if (!efx->mcdi)
value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); return 0;
if (value == 0) rc = efx->type->mcdi_poll_reboot(efx);
if (!rc)
return 0; return 0;
/* MAC statistics have been cleared on the NIC; clear our copy /* MAC statistics have been cleared on the NIC; clear our copy
...@@ -233,13 +250,7 @@ int efx_mcdi_poll_reboot(struct efx_nic *efx) ...@@ -233,13 +250,7 @@ int efx_mcdi_poll_reboot(struct efx_nic *efx)
*/ */
memset(&efx->mac_stats, 0, sizeof(efx->mac_stats)); memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
EFX_ZERO_DWORD(reg); return rc;
efx_writed(efx, &reg, addr);
if (value == MC_STATUS_DWORD_ASSERT)
return -EINTR;
else
return -EIO;
} }
static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
...@@ -302,7 +313,7 @@ static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) ...@@ -302,7 +313,7 @@ static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
} }
static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
unsigned int datalen, unsigned int errno) unsigned int datalen, unsigned int mcdi_err)
{ {
struct efx_mcdi_iface *mcdi = efx_mcdi(efx); struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
bool wake = false; bool wake = false;
...@@ -318,8 +329,14 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, ...@@ -318,8 +329,14 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
"MC response mismatch tx seq 0x%x rx " "MC response mismatch tx seq 0x%x rx "
"seq 0x%x\n", seqno, mcdi->seqno); "seq 0x%x\n", seqno, mcdi->seqno);
} else { } else {
mcdi->resprc = errno; if (efx->type->mcdi_max_ver >= 2) {
mcdi->resplen = datalen; /* MCDI v2 responses don't fit in an event */
efx_mcdi_read_response_header(efx);
} else {
mcdi->resprc = efx_mcdi_errno(mcdi_err);
mcdi->resp_hdr_len = 4;
mcdi->resp_data_len = datalen;
}
wake = true; wake = true;
} }
...@@ -335,17 +352,29 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, ...@@ -335,17 +352,29 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
efx_dword_t *outbuf, size_t outlen, efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual) size_t *outlen_actual)
{ {
efx_mcdi_rpc_start(efx, cmd, inbuf, inlen); int rc;
rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
if (rc)
return rc;
return efx_mcdi_rpc_finish(efx, cmd, inlen, return efx_mcdi_rpc_finish(efx, cmd, inlen,
outbuf, outlen, outlen_actual); outbuf, outlen, outlen_actual);
} }
void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
const efx_dword_t *inbuf, size_t inlen) const efx_dword_t *inbuf, size_t inlen)
{ {
struct efx_mcdi_iface *mcdi = efx_mcdi(efx); struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); if (efx->type->mcdi_max_ver < 0 ||
(efx->type->mcdi_max_ver < 2 &&
cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
return -EINVAL;
if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
(efx->type->mcdi_max_ver < 2 &&
inlen > MCDI_CTL_SDU_LEN_MAX_V1))
return -EMSGSIZE;
efx_mcdi_acquire(mcdi); efx_mcdi_acquire(mcdi);
...@@ -355,6 +384,7 @@ void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, ...@@ -355,6 +384,7 @@ void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
spin_unlock_bh(&mcdi->iface_lock); spin_unlock_bh(&mcdi->iface_lock);
efx_mcdi_copyin(efx, cmd, inbuf, inlen); efx_mcdi_copyin(efx, cmd, inbuf, inlen);
return 0;
} }
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
...@@ -364,8 +394,6 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, ...@@ -364,8 +394,6 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
struct efx_mcdi_iface *mcdi = efx_mcdi(efx); struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
int rc; int rc;
BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
if (mcdi->mode == MCDI_MODE_POLL) if (mcdi->mode == MCDI_MODE_POLL)
rc = efx_mcdi_poll(efx); rc = efx_mcdi_poll(efx);
else else
...@@ -385,22 +413,25 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, ...@@ -385,22 +413,25 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
"MC command 0x%x inlen %d mode %d timed out\n", "MC command 0x%x inlen %d mode %d timed out\n",
cmd, (int)inlen, mcdi->mode); cmd, (int)inlen, mcdi->mode);
} else { } else {
size_t resplen; size_t hdr_len, data_len;
/* At the very least we need a memory barrier here to ensure /* At the very least we need a memory barrier here to ensure
* we pick up changes from efx_mcdi_ev_cpl(). Protect against * we pick up changes from efx_mcdi_ev_cpl(). Protect against
* a spurious efx_mcdi_ev_cpl() running concurrently by * a spurious efx_mcdi_ev_cpl() running concurrently by
* acquiring the iface_lock. */ * acquiring the iface_lock. */
spin_lock_bh(&mcdi->iface_lock); spin_lock_bh(&mcdi->iface_lock);
rc = -mcdi->resprc; rc = mcdi->resprc;
resplen = mcdi->resplen; hdr_len = mcdi->resp_hdr_len;
data_len = mcdi->resp_data_len;
spin_unlock_bh(&mcdi->iface_lock); spin_unlock_bh(&mcdi->iface_lock);
BUG_ON(rc > 0);
if (rc == 0) { if (rc == 0) {
efx_mcdi_copyout(efx, outbuf, efx->type->mcdi_read_response(efx, outbuf, hdr_len,
min(outlen, mcdi->resplen)); min(outlen, data_len));
if (outlen_actual != NULL) if (outlen_actual != NULL)
*outlen_actual = resplen; *outlen_actual = data_len;
} else if (cmd == MC_CMD_REBOOT && rc == -EIO) } else if (cmd == MC_CMD_REBOOT && rc == -EIO)
; /* Don't reset if MC_CMD_REBOOT returns EIO */ ; /* Don't reset if MC_CMD_REBOOT returns EIO */
else if (rc == -EIO || rc == -EINTR) { else if (rc == -EIO || rc == -EINTR) {
...@@ -426,7 +457,7 @@ void efx_mcdi_mode_poll(struct efx_nic *efx) ...@@ -426,7 +457,7 @@ void efx_mcdi_mode_poll(struct efx_nic *efx)
{ {
struct efx_mcdi_iface *mcdi; struct efx_mcdi_iface *mcdi;
if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) if (!efx->mcdi)
return; return;
mcdi = efx_mcdi(efx); mcdi = efx_mcdi(efx);
...@@ -450,7 +481,7 @@ void efx_mcdi_mode_event(struct efx_nic *efx) ...@@ -450,7 +481,7 @@ void efx_mcdi_mode_event(struct efx_nic *efx)
{ {
struct efx_mcdi_iface *mcdi; struct efx_mcdi_iface *mcdi;
if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) if (!efx->mcdi)
return; return;
mcdi = efx_mcdi(efx); mcdi = efx_mcdi(efx);
...@@ -494,7 +525,8 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) ...@@ -494,7 +525,8 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
if (efx_mcdi_complete(mcdi)) { if (efx_mcdi_complete(mcdi)) {
if (mcdi->mode == MCDI_MODE_EVENTS) { if (mcdi->mode == MCDI_MODE_EVENTS) {
mcdi->resprc = rc; mcdi->resprc = rc;
mcdi->resplen = 0; mcdi->resp_hdr_len = 0;
mcdi->resp_data_len = 0;
++mcdi->credits; ++mcdi->credits;
} }
} else { } else {
...@@ -526,7 +558,7 @@ void efx_mcdi_process_event(struct efx_channel *channel, ...@@ -526,7 +558,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
case MCDI_EVENT_CODE_BADSSERT: case MCDI_EVENT_CODE_BADSSERT:
netif_err(efx, hw, efx->net_dev, netif_err(efx, hw, efx->net_dev,
"MC watchdog or assertion failure at 0x%x\n", data); "MC watchdog or assertion failure at 0x%x\n", data);
efx_mcdi_ev_death(efx, EINTR); efx_mcdi_ev_death(efx, -EINTR);
break; break;
case MCDI_EVENT_CODE_PMNOTICE: case MCDI_EVENT_CODE_PMNOTICE:
...@@ -552,7 +584,7 @@ void efx_mcdi_process_event(struct efx_channel *channel, ...@@ -552,7 +584,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
break; break;
case MCDI_EVENT_CODE_REBOOT: case MCDI_EVENT_CODE_REBOOT:
netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
efx_mcdi_ev_death(efx, EIO); efx_mcdi_ev_death(efx, -EIO);
break; break;
case MCDI_EVENT_CODE_MAC_STATS_DMA: case MCDI_EVENT_CODE_MAC_STATS_DMA:
/* MAC stats are gather lazily. We can ignore this. */ /* MAC stats are gather lazily. We can ignore this. */
...@@ -620,6 +652,7 @@ int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, ...@@ -620,6 +652,7 @@ int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
driver_operating ? 1 : 0); driver_operating ? 1 : 0);
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen); outbuf, sizeof(outbuf), &outlen);
......
...@@ -11,14 +11,14 @@ ...@@ -11,14 +11,14 @@
#define EFX_MCDI_H #define EFX_MCDI_H
/** /**
* enum efx_mcdi_state * enum efx_mcdi_state - MCDI request handling state
* @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the
* mcdi_lock then they are able to move to MCDI_STATE_RUNNING * mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING
* @MCDI_STATE_RUNNING: There is an MCDI request pending. Only the thread that * @MCDI_STATE_RUNNING: There is an MCDI request pending. Only the thread that
* moved into this state is allowed to move out of it. * moved into this state is allowed to move out of it.
* @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
* has not yet consumed the result. For all other threads, equivalent to * has not yet consumed the result. For all other threads, equivalent to
* MCDI_STATE_RUNNING. * %MCDI_STATE_RUNNING.
*/ */
enum efx_mcdi_state { enum efx_mcdi_state {
MCDI_STATE_QUIESCENT, MCDI_STATE_QUIESCENT,
...@@ -32,28 +32,28 @@ enum efx_mcdi_mode { ...@@ -32,28 +32,28 @@ enum efx_mcdi_mode {
}; };
/** /**
* struct efx_mcdi_iface * struct efx_mcdi_iface - MCDI protocol context
* @state: Interface state. Waited for by mcdi_wq. * @state: Request handling state. Waited for by @wq.
* @wq: Wait queue for threads waiting for state != STATE_RUNNING
* @iface_lock: Protects @credits, @seqno, @resprc, @resplen
* @mode: Poll for mcdi completion, or wait for an mcdi_event. * @mode: Poll for mcdi completion, or wait for an mcdi_event.
* Serialised by @lock * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING
* @iface_lock: Serialises access to all the following fields
* @seqno: The next sequence number to use for mcdi requests. * @seqno: The next sequence number to use for mcdi requests.
* Serialised by @lock
* @credits: Number of spurious MCDI completion events allowed before we * @credits: Number of spurious MCDI completion events allowed before we
* trigger a fatal error. Protected by @lock * trigger a fatal error
* @resprc: Returned MCDI completion * @resprc: Response error/success code (Linux numbering)
* @resplen: Returned payload length * @resp_hdr_len: Response header length
* @resp_data_len: Response data (SDU or error) length
*/ */
struct efx_mcdi_iface { struct efx_mcdi_iface {
atomic_t state; atomic_t state;
enum efx_mcdi_mode mode;
wait_queue_head_t wq; wait_queue_head_t wq;
spinlock_t iface_lock; spinlock_t iface_lock;
enum efx_mcdi_mode mode;
unsigned int credits; unsigned int credits;
unsigned int seqno; unsigned int seqno;
unsigned int resprc; int resprc;
size_t resplen; size_t resp_hdr_len;
size_t resp_data_len;
}; };
struct efx_mcdi_mon { struct efx_mcdi_mon {
...@@ -65,15 +65,36 @@ struct efx_mcdi_mon { ...@@ -65,15 +65,36 @@ struct efx_mcdi_mon {
unsigned int n_attrs; unsigned int n_attrs;
}; };
/**
* struct efx_mcdi_data - extra state for NICs that implement MCDI
* @iface: Interface/protocol state
* @hwmon: Hardware monitor state
*/
struct efx_mcdi_data {
struct efx_mcdi_iface iface;
#ifdef CONFIG_SFC_MCDI_MON
struct efx_mcdi_mon hwmon;
#endif
};
#ifdef CONFIG_SFC_MCDI_MON
static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
{
EFX_BUG_ON_PARANOID(!efx->mcdi);
return &efx->mcdi->hwmon;
}
#endif
extern int efx_mcdi_init(struct efx_nic *efx); extern int efx_mcdi_init(struct efx_nic *efx);
extern void efx_mcdi_fini(struct efx_nic *efx);
extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
const efx_dword_t *inbuf, size_t inlen, const efx_dword_t *inbuf, size_t inlen,
efx_dword_t *outbuf, size_t outlen, efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual); size_t *outlen_actual);
extern void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, extern int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
const efx_dword_t *inbuf, size_t inlen); const efx_dword_t *inbuf, size_t inlen);
extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen, extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
efx_dword_t *outbuf, size_t outlen, efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual); size_t *outlen_actual);
......
...@@ -261,7 +261,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx) ...@@ -261,7 +261,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
return -EIO; return -EIO;
rc = efx_nic_alloc_buffer(efx, &hwmon->dma_buf, rc = efx_nic_alloc_buffer(efx, &hwmon->dma_buf,
4 * MC_CMD_SENSOR_ENTRY_MAXNUM); 4 * MC_CMD_SENSOR_ENTRY_MAXNUM, GFP_KERNEL);
if (rc) if (rc)
return rc; return rc;
......
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -111,7 +111,8 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes) ...@@ -111,7 +111,8 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
if (rc) if (rc)
goto fail; goto fail;
if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) { if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) {
rc = -EIO; rc = -EIO;
goto fail; goto fail;
} }
...@@ -989,7 +990,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx) ...@@ -989,7 +990,7 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
/* Allocate buffer for stats */ /* Allocate buffer for stats */
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
MC_CMD_MAC_NSTATS * sizeof(u64)); MC_CMD_MAC_NSTATS * sizeof(u64), GFP_KERNEL);
if (rc) if (rc)
return rc; return rc;
netif_dbg(efx, probe, efx->net_dev, netif_dbg(efx, probe, efx->net_dev,
......
...@@ -93,21 +93,36 @@ struct efx_ptp_data; ...@@ -93,21 +93,36 @@ struct efx_ptp_data;
struct efx_self_tests; struct efx_self_tests;
/** /**
* struct efx_special_buffer - An Efx special buffer * struct efx_buffer - A general-purpose DMA buffer
* @addr: CPU base address of the buffer * @addr: host base address of the buffer
* @dma_addr: DMA base address of the buffer * @dma_addr: DMA base address of the buffer
* @len: Buffer length, in bytes * @len: Buffer length, in bytes
* @index: Buffer index within controller;s buffer table
* @entries: Number of buffer table entries
* *
* Special buffers are used for the event queues and the TX and RX * The NIC uses these buffers for its interrupt status registers and
* descriptor queues for each channel. They are *not* used for the * MAC stats dumps.
* actual transmit and receive buffers.
*/ */
struct efx_special_buffer { struct efx_buffer {
void *addr; void *addr;
dma_addr_t dma_addr; dma_addr_t dma_addr;
unsigned int len; unsigned int len;
};
/**
* struct efx_special_buffer - DMA buffer entered into buffer table
* @buf: Standard &struct efx_buffer
* @index: Buffer index within controller;s buffer table
* @entries: Number of buffer table entries
*
* The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE.
* Event and descriptor rings are addressed via one or more buffer
* table entries (and so can be physically non-contiguous, although we
* currently do not take advantage of that). On Falcon and Siena we
* have to take care of allocating and initialising the entries
* ourselves. On later hardware this is managed by the firmware and
* @index and @entries are left as 0.
*/
struct efx_special_buffer {
struct efx_buffer buf;
unsigned int index; unsigned int index;
unsigned int entries; unsigned int entries;
}; };
...@@ -271,7 +286,7 @@ struct efx_rx_page_state { ...@@ -271,7 +286,7 @@ struct efx_rx_page_state {
* @buffer: The software buffer ring * @buffer: The software buffer ring
* @rxd: The hardware descriptor ring * @rxd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1. * @ptr_mask: The size of the ring minus 1.
* @enabled: Receive queue enabled indicator. * @refill_enabled: Enable refill whenever fill level is low
* @flush_pending: Set when a RX flush is pending. Has the same lifetime as * @flush_pending: Set when a RX flush is pending. Has the same lifetime as
* @rxq_flush_pending. * @rxq_flush_pending.
* @added_count: Number of buffers added to the receive queue. * @added_count: Number of buffers added to the receive queue.
...@@ -302,7 +317,7 @@ struct efx_rx_queue { ...@@ -302,7 +317,7 @@ struct efx_rx_queue {
struct efx_rx_buffer *buffer; struct efx_rx_buffer *buffer;
struct efx_special_buffer rxd; struct efx_special_buffer rxd;
unsigned int ptr_mask; unsigned int ptr_mask;
bool enabled; bool refill_enabled;
bool flush_pending; bool flush_pending;
unsigned int added_count; unsigned int added_count;
...@@ -325,22 +340,6 @@ struct efx_rx_queue { ...@@ -325,22 +340,6 @@ struct efx_rx_queue {
unsigned int slow_fill_count; unsigned int slow_fill_count;
}; };
/**
* struct efx_buffer - An Efx general-purpose buffer
* @addr: host base address of the buffer
* @dma_addr: DMA base address of the buffer
* @len: Buffer length, in bytes
*
* The NIC uses these buffers for its interrupt status registers and
* MAC stats dumps.
*/
struct efx_buffer {
void *addr;
dma_addr_t dma_addr;
unsigned int len;
};
enum efx_rx_alloc_method { enum efx_rx_alloc_method {
RX_ALLOC_METHOD_AUTO = 0, RX_ALLOC_METHOD_AUTO = 0,
RX_ALLOC_METHOD_SKB = 1, RX_ALLOC_METHOD_SKB = 1,
...@@ -362,7 +361,6 @@ enum efx_rx_alloc_method { ...@@ -362,7 +361,6 @@ enum efx_rx_alloc_method {
* @irq_moderation: IRQ moderation value (in hardware ticks) * @irq_moderation: IRQ moderation value (in hardware ticks)
* @napi_dev: Net device used with NAPI * @napi_dev: Net device used with NAPI
* @napi_str: NAPI control structure * @napi_str: NAPI control structure
* @work_pending: Is work pending via NAPI?
* @eventq: Event queue buffer * @eventq: Event queue buffer
* @eventq_mask: Event queue pointer mask * @eventq_mask: Event queue pointer mask
* @eventq_read_ptr: Event queue read pointer * @eventq_read_ptr: Event queue read pointer
...@@ -394,7 +392,6 @@ struct efx_channel { ...@@ -394,7 +392,6 @@ struct efx_channel {
unsigned int irq_moderation; unsigned int irq_moderation;
struct net_device *napi_dev; struct net_device *napi_dev;
struct napi_struct napi_str; struct napi_struct napi_str;
bool work_pending;
struct efx_special_buffer eventq; struct efx_special_buffer eventq;
unsigned int eventq_mask; unsigned int eventq_mask;
unsigned int eventq_read_ptr; unsigned int eventq_read_ptr;
...@@ -422,6 +419,21 @@ struct efx_channel { ...@@ -422,6 +419,21 @@ struct efx_channel {
struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
}; };
/**
* struct efx_msi_context - Context for each MSI
* @efx: The associated NIC
* @index: Index of the channel/IRQ
* @name: Name of the channel/IRQ
*
* Unlike &struct efx_channel, this is never reallocated and is always
* safe for the IRQ handler to access.
*/
struct efx_msi_context {
struct efx_nic *efx;
unsigned int index;
char name[IFNAMSIZ + 6];
};
/** /**
* struct efx_channel_type - distinguishes traffic and extra channels * struct efx_channel_type - distinguishes traffic and extra channels
* @handle_no_channel: Handle failure to allocate an extra channel * @handle_no_channel: Handle failure to allocate an extra channel
...@@ -672,7 +684,6 @@ struct vfdi_status; ...@@ -672,7 +684,6 @@ struct vfdi_status;
* @pci_dev: The PCI device * @pci_dev: The PCI device
* @type: Controller type attributes * @type: Controller type attributes
* @legacy_irq: IRQ number * @legacy_irq: IRQ number
* @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
* @workqueue: Workqueue for port reconfigures and the HW monitor. * @workqueue: Workqueue for port reconfigures and the HW monitor.
* Work items do not hold and must not acquire RTNL. * Work items do not hold and must not acquire RTNL.
* @workqueue_name: Name of workqueue * @workqueue_name: Name of workqueue
...@@ -689,7 +700,7 @@ struct vfdi_status; ...@@ -689,7 +700,7 @@ struct vfdi_status;
* @tx_queue: TX DMA queues * @tx_queue: TX DMA queues
* @rx_queue: RX DMA queues * @rx_queue: RX DMA queues
* @channel: Channels * @channel: Channels
* @channel_name: Names for channels and their IRQs * @msi_context: Context for each MSI
* @extra_channel_types: Types of extra (non-traffic) channels that * @extra_channel_types: Types of extra (non-traffic) channels that
* should be allocated for this NIC * should be allocated for this NIC
* @rxq_entries: Size of receive queues requested by user. * @rxq_entries: Size of receive queues requested by user.
...@@ -712,12 +723,15 @@ struct vfdi_status; ...@@ -712,12 +723,15 @@ struct vfdi_status;
* @rx_scatter: Scatter mode enabled for receives * @rx_scatter: Scatter mode enabled for receives
* @int_error_count: Number of internal errors seen recently * @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired * @int_error_expire: Time at which error count will be expired
* @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
* acknowledge but do nothing else.
* @irq_status: Interrupt status buffer * @irq_status: Interrupt status buffer
* @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
* @irq_level: IRQ level/index for IRQs not triggered by an event queue * @irq_level: IRQ level/index for IRQs not triggered by an event queue
* @selftest_work: Work item for asynchronous self-test * @selftest_work: Work item for asynchronous self-test
* @mtd_list: List of MTDs attached to the NIC * @mtd_list: List of MTDs attached to the NIC
* @nic_data: Hardware dependent state * @nic_data: Hardware dependent state
* @mcdi: Management-Controller-to-Driver Interface state
* @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
* efx_monitor() and efx_reconfigure_port() * efx_monitor() and efx_reconfigure_port()
* @port_enabled: Port enabled indicator. * @port_enabled: Port enabled indicator.
...@@ -788,7 +802,6 @@ struct efx_nic { ...@@ -788,7 +802,6 @@ struct efx_nic {
unsigned int port_num; unsigned int port_num;
const struct efx_nic_type *type; const struct efx_nic_type *type;
int legacy_irq; int legacy_irq;
bool legacy_irq_enabled;
bool eeh_disabled_legacy_irq; bool eeh_disabled_legacy_irq;
struct workqueue_struct *workqueue; struct workqueue_struct *workqueue;
char workqueue_name[16]; char workqueue_name[16];
...@@ -806,7 +819,7 @@ struct efx_nic { ...@@ -806,7 +819,7 @@ struct efx_nic {
unsigned long reset_pending; unsigned long reset_pending;
struct efx_channel *channel[EFX_MAX_CHANNELS]; struct efx_channel *channel[EFX_MAX_CHANNELS];
char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6]; struct efx_msi_context msi_context[EFX_MAX_CHANNELS];
const struct efx_channel_type * const struct efx_channel_type *
extra_channel_type[EFX_MAX_EXTRA_CHANNELS]; extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
...@@ -837,6 +850,7 @@ struct efx_nic { ...@@ -837,6 +850,7 @@ struct efx_nic {
unsigned int_error_count; unsigned int_error_count;
unsigned long int_error_expire; unsigned long int_error_expire;
bool irq_soft_enabled;
struct efx_buffer irq_status; struct efx_buffer irq_status;
unsigned irq_zero_count; unsigned irq_zero_count;
unsigned irq_level; unsigned irq_level;
...@@ -847,6 +861,7 @@ struct efx_nic { ...@@ -847,6 +861,7 @@ struct efx_nic {
#endif #endif
void *nic_data; void *nic_data;
struct efx_mcdi_data *mcdi;
struct mutex mac_lock; struct mutex mac_lock;
struct work_struct mac_work; struct work_struct mac_work;
...@@ -938,8 +953,11 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) ...@@ -938,8 +953,11 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @probe_port: Probe the MAC and PHY * @probe_port: Probe the MAC and PHY
* @remove_port: Free resources allocated by probe_port() * @remove_port: Free resources allocated by probe_port()
* @handle_global_event: Handle a "global" event (may be %NULL) * @handle_global_event: Handle a "global" event (may be %NULL)
* @fini_dmaq: Flush and finalise DMA queues (RX and TX queues)
* @prepare_flush: Prepare the hardware for flushing the DMA queues * @prepare_flush: Prepare the hardware for flushing the DMA queues
* @finish_flush: Clean up after flushing the DMA queues * (for Falcon architecture)
* @finish_flush: Clean up after flushing the DMA queues (for Falcon
* architecture)
* @update_stats: Update statistics not provided by event handling * @update_stats: Update statistics not provided by event handling
* @start_stats: Start the regular fetching of statistics * @start_stats: Start the regular fetching of statistics
* @stop_stats: Stop the regular fetching of statistics * @stop_stats: Stop the regular fetching of statistics
...@@ -953,9 +971,46 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) ...@@ -953,9 +971,46 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @get_wol: Get WoL configuration from driver state * @get_wol: Get WoL configuration from driver state
* @set_wol: Push WoL configuration to the NIC * @set_wol: Push WoL configuration to the NIC
* @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
* @test_chip: Test registers. Should use efx_nic_test_registers(), and is * @test_chip: Test registers. May use efx_farch_test_registers(), and is
* expected to reset the NIC. * expected to reset the NIC.
* @test_nvram: Test validity of NVRAM contents * @test_nvram: Test validity of NVRAM contents
* @mcdi_request: Send an MCDI request with the given header and SDU.
* The SDU length may be any value from 0 up to the protocol-
* defined maximum, but its buffer will be padded to a multiple
* of 4 bytes.
* @mcdi_poll_response: Test whether an MCDI response is available.
* @mcdi_read_response: Read the MCDI response PDU. The offset will
* be a multiple of 4. The length may not be, but the buffer
* will be padded so it is safe to round up.
* @mcdi_poll_reboot: Test whether the MCDI has rebooted. If so,
* return an appropriate error code for aborting any current
* request; otherwise return 0.
* @irq_enable_master: Enable IRQs on the NIC. Each event queue must
* be separately enabled after this.
* @irq_test_generate: Generate a test IRQ
* @irq_disable_non_ev: Disable non-event IRQs on the NIC. Each event
* queue must be separately disabled before this.
* @irq_handle_msi: Handle MSI for a channel. The @dev_id argument is
* a pointer to the &struct efx_msi_context for the channel.
* @irq_handle_legacy: Handle legacy interrupt. The @dev_id argument
* is a pointer to the &struct efx_nic.
* @tx_probe: Allocate resources for TX queue
* @tx_init: Initialise TX queue on the NIC
* @tx_remove: Free resources for TX queue
* @tx_write: Write TX descriptors and doorbell
* @rx_push_indir_table: Write RSS indirection table to the NIC
* @rx_probe: Allocate resources for RX queue
* @rx_init: Initialise RX queue on the NIC
* @rx_remove: Free resources for RX queue
* @rx_write: Write RX descriptors and doorbell
* @rx_defer_refill: Generate a refill reminder event
* @ev_probe: Allocate resources for event queue
* @ev_init: Initialise event queue on the NIC
* @ev_fini: Deinitialise event queue on the NIC
* @ev_remove: Free resources for event queue
* @ev_process: Process events for a queue, up to the given NAPI quota
* @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ
* @ev_test_generate: Generate a test event
* @revision: Hardware architecture revision * @revision: Hardware architecture revision
* @mem_map_size: Memory BAR mapped size * @mem_map_size: Memory BAR mapped size
* @txd_ptr_tbl_base: TX descriptor ring base address * @txd_ptr_tbl_base: TX descriptor ring base address
...@@ -974,6 +1029,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) ...@@ -974,6 +1029,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @timer_period_max: Maximum period of interrupt timer (in ticks) * @timer_period_max: Maximum period of interrupt timer (in ticks)
* @offload_features: net_device feature flags for protocol offload * @offload_features: net_device feature flags for protocol offload
* features implemented in hardware * features implemented in hardware
* @mcdi_max_ver: Maximum MCDI version supported
*/ */
struct efx_nic_type { struct efx_nic_type {
int (*probe)(struct efx_nic *efx); int (*probe)(struct efx_nic *efx);
...@@ -988,6 +1044,7 @@ struct efx_nic_type { ...@@ -988,6 +1044,7 @@ struct efx_nic_type {
int (*probe_port)(struct efx_nic *efx); int (*probe_port)(struct efx_nic *efx);
void (*remove_port)(struct efx_nic *efx); void (*remove_port)(struct efx_nic *efx);
bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *); bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
int (*fini_dmaq)(struct efx_nic *efx);
void (*prepare_flush)(struct efx_nic *efx); void (*prepare_flush)(struct efx_nic *efx);
void (*finish_flush)(struct efx_nic *efx); void (*finish_flush)(struct efx_nic *efx);
void (*update_stats)(struct efx_nic *efx); void (*update_stats)(struct efx_nic *efx);
...@@ -1004,6 +1061,35 @@ struct efx_nic_type { ...@@ -1004,6 +1061,35 @@ struct efx_nic_type {
void (*resume_wol)(struct efx_nic *efx); void (*resume_wol)(struct efx_nic *efx);
int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests); int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
int (*test_nvram)(struct efx_nic *efx); int (*test_nvram)(struct efx_nic *efx);
void (*mcdi_request)(struct efx_nic *efx,
const efx_dword_t *hdr, size_t hdr_len,
const efx_dword_t *sdu, size_t sdu_len);
bool (*mcdi_poll_response)(struct efx_nic *efx);
void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu,
size_t pdu_offset, size_t pdu_len);
int (*mcdi_poll_reboot)(struct efx_nic *efx);
void (*irq_enable_master)(struct efx_nic *efx);
void (*irq_test_generate)(struct efx_nic *efx);
void (*irq_disable_non_ev)(struct efx_nic *efx);
irqreturn_t (*irq_handle_msi)(int irq, void *dev_id);
irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id);
int (*tx_probe)(struct efx_tx_queue *tx_queue);
void (*tx_init)(struct efx_tx_queue *tx_queue);
void (*tx_remove)(struct efx_tx_queue *tx_queue);
void (*tx_write)(struct efx_tx_queue *tx_queue);
void (*rx_push_indir_table)(struct efx_nic *efx);
int (*rx_probe)(struct efx_rx_queue *rx_queue);
void (*rx_init)(struct efx_rx_queue *rx_queue);
void (*rx_remove)(struct efx_rx_queue *rx_queue);
void (*rx_write)(struct efx_rx_queue *rx_queue);
void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
int (*ev_probe)(struct efx_channel *channel);
void (*ev_init)(struct efx_channel *channel);
void (*ev_fini)(struct efx_channel *channel);
void (*ev_remove)(struct efx_channel *channel);
int (*ev_process)(struct efx_channel *channel, int quota);
void (*ev_read_ack)(struct efx_channel *channel);
void (*ev_test_generate)(struct efx_channel *channel);
int revision; int revision;
unsigned int mem_map_size; unsigned int mem_map_size;
...@@ -1020,6 +1106,7 @@ struct efx_nic_type { ...@@ -1020,6 +1106,7 @@ struct efx_nic_type {
unsigned int phys_addr_channels; unsigned int phys_addr_channels;
unsigned int timer_period_max; unsigned int timer_period_max;
netdev_features_t offload_features; netdev_features_t offload_features;
int mcdi_max_ver;
}; };
/************************************************************************** /**************************************************************************
......
...@@ -19,282 +19,10 @@ ...@@ -19,282 +19,10 @@
#include "bitfield.h" #include "bitfield.h"
#include "efx.h" #include "efx.h"
#include "nic.h" #include "nic.h"
#include "regs.h" #include "farch_regs.h"
#include "io.h" #include "io.h"
#include "workarounds.h" #include "workarounds.h"
/**************************************************************************
*
* Configurable values
*
**************************************************************************
*/
/* This is set to 16 for a good reason. In summary, if larger than
* 16, the descriptor cache holds more than a default socket
* buffer's worth of packets (for UDP we can only have at most one
* socket buffer's worth outstanding). This combined with the fact
* that we only get 1 TX event per descriptor cache means the NIC
* goes idle.
*/
#define TX_DC_ENTRIES 16
#define TX_DC_ENTRIES_ORDER 1
#define RX_DC_ENTRIES 64
#define RX_DC_ENTRIES_ORDER 3
/* If EFX_MAX_INT_ERRORS internal errors occur within
* EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
* disable it.
*/
#define EFX_INT_ERROR_EXPIRE 3600
#define EFX_MAX_INT_ERRORS 5
/* Depth of RX flush request fifo */
#define EFX_RX_FLUSH_COUNT 4
/* Driver generated events */
#define _EFX_CHANNEL_MAGIC_TEST 0x000101
#define _EFX_CHANNEL_MAGIC_FILL 0x000102
#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
#define EFX_CHANNEL_MAGIC_TEST(_channel) \
_EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
_EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
efx_rx_queue_index(_rx_queue))
#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
_EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
efx_rx_queue_index(_rx_queue))
#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
_EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
(_tx_queue)->queue)
static void efx_magic_event(struct efx_channel *channel, u32 magic);
/**************************************************************************
*
* Solarstorm hardware access
*
**************************************************************************/
static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
unsigned int index)
{
efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
value, index);
}
/* Read the current event from the event queue */
static inline efx_qword_t *efx_event(struct efx_channel *channel,
unsigned int index)
{
return ((efx_qword_t *) (channel->eventq.addr)) +
(index & channel->eventq_mask);
}
/* See if an event is present
*
* We check both the high and low dword of the event for all ones. We
* wrote all ones when we cleared the event, and no valid event can
* have all ones in either its high or low dwords. This approach is
* robust against reordering.
*
* Note that using a single 64-bit comparison is incorrect; even
* though the CPU read will be atomic, the DMA write may not be.
*/
static inline int efx_event_present(efx_qword_t *event)
{
return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
EFX_DWORD_IS_ALL_ONES(event->dword[1]));
}
static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
const efx_oword_t *mask)
{
return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
}
int efx_nic_test_registers(struct efx_nic *efx,
const struct efx_nic_register_test *regs,
size_t n_regs)
{
unsigned address = 0, i, j;
efx_oword_t mask, imask, original, reg, buf;
for (i = 0; i < n_regs; ++i) {
address = regs[i].address;
mask = imask = regs[i].mask;
EFX_INVERT_OWORD(imask);
efx_reado(efx, &original, address);
/* bit sweep on and off */
for (j = 0; j < 128; j++) {
if (!EFX_EXTRACT_OWORD32(mask, j, j))
continue;
/* Test this testable bit can be set in isolation */
EFX_AND_OWORD(reg, original, mask);
EFX_SET_OWORD32(reg, j, j, 1);
efx_writeo(efx, &reg, address);
efx_reado(efx, &buf, address);
if (efx_masked_compare_oword(&reg, &buf, &mask))
goto fail;
/* Test this testable bit can be cleared in isolation */
EFX_OR_OWORD(reg, original, mask);
EFX_SET_OWORD32(reg, j, j, 0);
efx_writeo(efx, &reg, address);
efx_reado(efx, &buf, address);
if (efx_masked_compare_oword(&reg, &buf, &mask))
goto fail;
}
efx_writeo(efx, &original, address);
}
return 0;
fail:
netif_err(efx, hw, efx->net_dev,
"wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
" at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
return -EIO;
}
/**************************************************************************
*
* Special buffer handling
* Special buffers are used for event queues and the TX and RX
* descriptor rings.
*
*************************************************************************/
/*
* Initialise a special buffer
*
* This will define a buffer (previously allocated via
* efx_alloc_special_buffer()) in the buffer table, allowing
* it to be used for event queues, descriptor rings etc.
*/
static void
efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
{
efx_qword_t buf_desc;
unsigned int index;
dma_addr_t dma_addr;
int i;
EFX_BUG_ON_PARANOID(!buffer->addr);
/* Write buffer descriptors to NIC */
for (i = 0; i < buffer->entries; i++) {
index = buffer->index + i;
dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE);
netif_dbg(efx, probe, efx->net_dev,
"mapping special buffer %d at %llx\n",
index, (unsigned long long)dma_addr);
EFX_POPULATE_QWORD_3(buf_desc,
FRF_AZ_BUF_ADR_REGION, 0,
FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
FRF_AZ_BUF_OWNER_ID_FBUF, 0);
efx_write_buf_tbl(efx, &buf_desc, index);
}
}
/* Unmaps a buffer and clears the buffer table entries */
static void
efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
{
efx_oword_t buf_tbl_upd;
unsigned int start = buffer->index;
unsigned int end = (buffer->index + buffer->entries - 1);
if (!buffer->entries)
return;
netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
buffer->index, buffer->index + buffer->entries - 1);
EFX_POPULATE_OWORD_4(buf_tbl_upd,
FRF_AZ_BUF_UPD_CMD, 0,
FRF_AZ_BUF_CLR_CMD, 1,
FRF_AZ_BUF_CLR_END_ID, end,
FRF_AZ_BUF_CLR_START_ID, start);
efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
}
/*
* Allocate a new special buffer
*
* This allocates memory for a new buffer, clears it and allocates a
* new buffer ID range. It does not write into the buffer table.
*
* This call will allocate 4KB buffers, since 8KB buffers can't be
* used for event queues and descriptor rings.
*/
static int efx_alloc_special_buffer(struct efx_nic *efx,
struct efx_special_buffer *buffer,
unsigned int len)
{
len = ALIGN(len, EFX_BUF_SIZE);
buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
&buffer->dma_addr, GFP_KERNEL);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
buffer->entries = len / EFX_BUF_SIZE;
BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
/* Select new buffer ID */
buffer->index = efx->next_buffer_table;
efx->next_buffer_table += buffer->entries;
#ifdef CONFIG_SFC_SRIOV
BUG_ON(efx_sriov_enabled(efx) &&
efx->vf_buftbl_base < efx->next_buffer_table);
#endif
netif_dbg(efx, probe, efx->net_dev,
"allocating special buffers %d-%d at %llx+%x "
"(virt %p phys %llx)\n", buffer->index,
buffer->index + buffer->entries - 1,
(u64)buffer->dma_addr, len,
buffer->addr, (u64)virt_to_phys(buffer->addr));
return 0;
}
static void
efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
{
if (!buffer->addr)
return;
netif_dbg(efx, hw, efx->net_dev,
"deallocating special buffers %d-%d at %llx+%x "
"(virt %p phys %llx)\n", buffer->index,
buffer->index + buffer->entries - 1,
(u64)buffer->dma_addr, buffer->len,
buffer->addr, (u64)virt_to_phys(buffer->addr));
dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
buffer->dma_addr);
buffer->addr = NULL;
buffer->entries = 0;
}
/************************************************************************** /**************************************************************************
* *
* Generic buffer handling * Generic buffer handling
...@@ -303,11 +31,11 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) ...@@ -303,11 +31,11 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
**************************************************************************/ **************************************************************************/
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len) unsigned int len, gfp_t gfp_flags)
{ {
buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
&buffer->dma_addr, &buffer->dma_addr,
GFP_ATOMIC | __GFP_ZERO); gfp_flags | __GFP_ZERO);
if (!buffer->addr) if (!buffer->addr)
return -ENOMEM; return -ENOMEM;
buffer->len = len; buffer->len = len;
...@@ -323,1057 +51,6 @@ void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) ...@@ -323,1057 +51,6 @@ void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
} }
} }
/**************************************************************************
*
* TX path
*
**************************************************************************/
/* Returns a pointer to the specified transmit descriptor in the TX
* descriptor queue belonging to the specified channel.
*/
static inline efx_qword_t *
efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
{
return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
}
/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
{
unsigned write_ptr;
efx_dword_t reg;
write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
efx_writed_page(tx_queue->efx, &reg,
FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
}
/* Write pointer and first descriptor for TX descriptor ring */
static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
const efx_qword_t *txd)
{
unsigned write_ptr;
efx_oword_t reg;
BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
FRF_AZ_TX_DESC_WPTR, write_ptr);
reg.qword[0] = *txd;
efx_writeo_page(tx_queue->efx, &reg,
FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
}
static inline bool
efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
{
unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
if (empty_read_count == 0)
return false;
tx_queue->empty_read_count = 0;
return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
&& tx_queue->write_count - write_count == 1;
}
/* For each entry inserted into the software descriptor ring, create a
* descriptor in the hardware TX descriptor ring (in host memory), and
* write a doorbell.
*/
void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
{
struct efx_tx_buffer *buffer;
efx_qword_t *txd;
unsigned write_ptr;
unsigned old_write_count = tx_queue->write_count;
BUG_ON(tx_queue->write_count == tx_queue->insert_count);
do {
write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[write_ptr];
txd = efx_tx_desc(tx_queue, write_ptr);
++tx_queue->write_count;
/* Create TX descriptor ring entry */
BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
EFX_POPULATE_QWORD_4(*txd,
FSF_AZ_TX_KER_CONT,
buffer->flags & EFX_TX_BUF_CONT,
FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
FSF_AZ_TX_KER_BUF_REGION, 0,
FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
} while (tx_queue->write_count != tx_queue->insert_count);
wmb(); /* Ensure descriptors are written before they are fetched */
if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
txd = efx_tx_desc(tx_queue,
old_write_count & tx_queue->ptr_mask);
efx_push_tx_desc(tx_queue, txd);
++tx_queue->pushes;
} else {
efx_notify_tx_desc(tx_queue);
}
}
/* Allocate hardware resources for a TX queue */
int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
unsigned entries;
entries = tx_queue->ptr_mask + 1;
return efx_alloc_special_buffer(efx, &tx_queue->txd,
entries * sizeof(efx_qword_t));
}
void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
efx_oword_t reg;
/* Pin TX descriptor ring */
efx_init_special_buffer(efx, &tx_queue->txd);
/* Push TX descriptor ring to card */
EFX_POPULATE_OWORD_10(reg,
FRF_AZ_TX_DESCQ_EN, 1,
FRF_AZ_TX_ISCSI_DDIG_EN, 0,
FRF_AZ_TX_ISCSI_HDIG_EN, 0,
FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
FRF_AZ_TX_DESCQ_EVQ_ID,
tx_queue->channel->channel,
FRF_AZ_TX_DESCQ_OWNER_ID, 0,
FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
FRF_AZ_TX_DESCQ_SIZE,
__ffs(tx_queue->txd.entries),
FRF_AZ_TX_DESCQ_TYPE, 0,
FRF_BZ_TX_NON_IP_DROP_DIS, 1);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
!csum);
}
efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
tx_queue->queue);
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
/* Only 128 bits in this register */
BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
__clear_bit_le(tx_queue->queue, &reg);
else
__set_bit_le(tx_queue->queue, &reg);
efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
}
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
EFX_POPULATE_OWORD_1(reg,
FRF_BZ_TX_PACE,
(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
FFE_BZ_TX_PACE_OFF :
FFE_BZ_TX_PACE_RESERVED);
efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
tx_queue->queue);
}
}
static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
efx_oword_t tx_flush_descq;
WARN_ON(atomic_read(&tx_queue->flush_outstanding));
atomic_set(&tx_queue->flush_outstanding, 1);
EFX_POPULATE_OWORD_2(tx_flush_descq,
FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
}
void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
efx_oword_t tx_desc_ptr;
/* Remove TX descriptor ring from card */
EFX_ZERO_OWORD(tx_desc_ptr);
efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
tx_queue->queue);
/* Unpin TX descriptor ring */
efx_fini_special_buffer(efx, &tx_queue->txd);
}
/* Free buffers backing TX queue */
void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
{
efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
}
/**************************************************************************
*
* RX path
*
**************************************************************************/
/* Returns a pointer to the specified descriptor in the RX descriptor queue */
static inline efx_qword_t *
efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
{
return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
}
/* This creates an entry in the RX descriptor queue */
static inline void
efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
{
struct efx_rx_buffer *rx_buf;
efx_qword_t *rxd;
rxd = efx_rx_desc(rx_queue, index);
rx_buf = efx_rx_buffer(rx_queue, index);
EFX_POPULATE_QWORD_3(*rxd,
FSF_AZ_RX_KER_BUF_SIZE,
rx_buf->len -
rx_queue->efx->type->rx_buffer_padding,
FSF_AZ_RX_KER_BUF_REGION, 0,
FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
}
/* This writes to the RX_DESC_WPTR register for the specified receive
* descriptor ring.
*/
void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
efx_dword_t reg;
unsigned write_ptr;
while (rx_queue->notified_count != rx_queue->added_count) {
efx_build_rx_desc(
rx_queue,
rx_queue->notified_count & rx_queue->ptr_mask);
++rx_queue->notified_count;
}
wmb();
write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
efx_rx_queue_index(rx_queue));
}
int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
unsigned entries;
entries = rx_queue->ptr_mask + 1;
return efx_alloc_special_buffer(efx, &rx_queue->rxd,
entries * sizeof(efx_qword_t));
}
void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
{
efx_oword_t rx_desc_ptr;
struct efx_nic *efx = rx_queue->efx;
bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
bool iscsi_digest_en = is_b0;
bool jumbo_en;
/* For kernel-mode queues in Falcon A1, the JUMBO flag enables
* DMA to continue after a PCIe page boundary (and scattering
* is not possible). In Falcon B0 and Siena, it enables
* scatter.
*/
jumbo_en = !is_b0 || efx->rx_scatter;
netif_dbg(efx, hw, efx->net_dev,
"RX queue %d ring in special buffers %d-%d\n",
efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
rx_queue->rxd.index + rx_queue->rxd.entries - 1);
rx_queue->scatter_n = 0;
/* Pin RX descriptor ring */
efx_init_special_buffer(efx, &rx_queue->rxd);
/* Push RX descriptor ring to card */
EFX_POPULATE_OWORD_10(rx_desc_ptr,
FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
FRF_AZ_RX_DESCQ_EVQ_ID,
efx_rx_queue_channel(rx_queue)->channel,
FRF_AZ_RX_DESCQ_OWNER_ID, 0,
FRF_AZ_RX_DESCQ_LABEL,
efx_rx_queue_index(rx_queue),
FRF_AZ_RX_DESCQ_SIZE,
__ffs(rx_queue->rxd.entries),
FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
FRF_AZ_RX_DESCQ_EN, 1);
efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
efx_rx_queue_index(rx_queue));
}
static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
{
struct efx_nic *efx = rx_queue->efx;
efx_oword_t rx_flush_descq;
EFX_POPULATE_OWORD_2(rx_flush_descq,
FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
FRF_AZ_RX_FLUSH_DESCQ,
efx_rx_queue_index(rx_queue));
efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
}
void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
{
efx_oword_t rx_desc_ptr;
struct efx_nic *efx = rx_queue->efx;
/* Remove RX descriptor ring from card */
EFX_ZERO_OWORD(rx_desc_ptr);
efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
efx_rx_queue_index(rx_queue));
/* Unpin RX descriptor ring */
efx_fini_special_buffer(efx, &rx_queue->rxd);
}
/* Free buffers backing RX queue */
void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
{
efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
}
/**************************************************************************
*
* Flush handling
*
**************************************************************************/
/* efx_nic_flush_queues() must be woken up when all flushes are completed,
* or more RX flushes can be kicked off.
*/
static bool efx_flush_wake(struct efx_nic *efx)
{
/* Ensure that all updates are visible to efx_nic_flush_queues() */
smp_mb();
return (atomic_read(&efx->drain_pending) == 0 ||
(atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
&& atomic_read(&efx->rxq_flush_pending) > 0));
}
static bool efx_check_tx_flush_complete(struct efx_nic *efx)
{
bool i = true;
efx_oword_t txd_ptr_tbl;
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel) {
efx_reado_table(efx, &txd_ptr_tbl,
FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
if (EFX_OWORD_FIELD(txd_ptr_tbl,
FRF_AZ_TX_DESCQ_FLUSH) ||
EFX_OWORD_FIELD(txd_ptr_tbl,
FRF_AZ_TX_DESCQ_EN)) {
netif_dbg(efx, hw, efx->net_dev,
"flush did not complete on TXQ %d\n",
tx_queue->queue);
i = false;
} else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
1, 0)) {
/* The flush is complete, but we didn't
* receive a flush completion event
*/
netif_dbg(efx, hw, efx->net_dev,
"flush complete on TXQ %d, so drain "
"the queue\n", tx_queue->queue);
/* Don't need to increment drain_pending as it
* has already been incremented for the queues
* which did not drain
*/
efx_magic_event(channel,
EFX_CHANNEL_MAGIC_TX_DRAIN(
tx_queue));
}
}
}
return i;
}
/* Flush all the transmit queues, and continue flushing receive queues until
* they're all flushed. Wait for the DRAIN events to be recieved so that there
* are no more RX and TX events left on any channel. */
int efx_nic_flush_queues(struct efx_nic *efx)
{
unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
struct efx_channel *channel;
struct efx_rx_queue *rx_queue;
struct efx_tx_queue *tx_queue;
int rc = 0;
efx->type->prepare_flush(efx);
efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel) {
atomic_inc(&efx->drain_pending);
efx_flush_tx_queue(tx_queue);
}
efx_for_each_channel_rx_queue(rx_queue, channel) {
atomic_inc(&efx->drain_pending);
rx_queue->flush_pending = true;
atomic_inc(&efx->rxq_flush_pending);
}
}
while (timeout && atomic_read(&efx->drain_pending) > 0) {
/* If SRIOV is enabled, then offload receive queue flushing to
* the firmware (though we will still have to poll for
* completion). If that fails, fall back to the old scheme.
*/
if (efx_sriov_enabled(efx)) {
rc = efx_mcdi_flush_rxqs(efx);
if (!rc)
goto wait;
}
/* The hardware supports four concurrent rx flushes, each of
* which may need to be retried if there is an outstanding
* descriptor fetch
*/
efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel) {
if (atomic_read(&efx->rxq_flush_outstanding) >=
EFX_RX_FLUSH_COUNT)
break;
if (rx_queue->flush_pending) {
rx_queue->flush_pending = false;
atomic_dec(&efx->rxq_flush_pending);
atomic_inc(&efx->rxq_flush_outstanding);
efx_flush_rx_queue(rx_queue);
}
}
}
wait:
timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx),
timeout);
}
if (atomic_read(&efx->drain_pending) &&
!efx_check_tx_flush_complete(efx)) {
netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
"(rx %d+%d)\n", atomic_read(&efx->drain_pending),
atomic_read(&efx->rxq_flush_outstanding),
atomic_read(&efx->rxq_flush_pending));
rc = -ETIMEDOUT;
atomic_set(&efx->drain_pending, 0);
atomic_set(&efx->rxq_flush_pending, 0);
atomic_set(&efx->rxq_flush_outstanding, 0);
}
efx->type->finish_flush(efx);
return rc;
}
/**************************************************************************
*
* Event queue processing
* Event queues are processed by per-channel tasklets.
*
**************************************************************************/
/* Update a channel's event queue's read pointer (RPTR) register
*
* This writes the EVQ_RPTR_REG register for the specified channel's
* event queue.
*/
void efx_nic_eventq_read_ack(struct efx_channel *channel)
{
efx_dword_t reg;
struct efx_nic *efx = channel->efx;
EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
channel->eventq_read_ptr & channel->eventq_mask);
/* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
* of 4 bytes, but it is really 16 bytes just like later revisions.
*/
efx_writed(efx, &reg,
efx->type->evq_rptr_tbl_base +
FR_BZ_EVQ_RPTR_STEP * channel->channel);
}
/* Use HW to insert a SW defined event */
void efx_generate_event(struct efx_nic *efx, unsigned int evq,
efx_qword_t *event)
{
efx_oword_t drv_ev_reg;
BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
FRF_AZ_DRV_EV_DATA_WIDTH != 64);
drv_ev_reg.u32[0] = event->u32[0];
drv_ev_reg.u32[1] = event->u32[1];
drv_ev_reg.u32[2] = 0;
drv_ev_reg.u32[3] = 0;
EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
}
static void efx_magic_event(struct efx_channel *channel, u32 magic)
{
efx_qword_t event;
EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
FSE_AZ_EV_CODE_DRV_GEN_EV,
FSF_AZ_DRV_GEN_EV_MAGIC, magic);
efx_generate_event(channel->efx, channel->channel, &event);
}
/* Handle a transmit completion event
*
* The NIC batches TX completion events; the message we receive is of
* the form "complete all TX events up to this index".
*/
static int
efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
{
unsigned int tx_ev_desc_ptr;
unsigned int tx_ev_q_label;
struct efx_tx_queue *tx_queue;
struct efx_nic *efx = channel->efx;
int tx_packets = 0;
if (unlikely(ACCESS_ONCE(efx->reset_pending)))
return 0;
if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
/* Transmit completion */
tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = efx_channel_get_tx_queue(
channel, tx_ev_q_label % EFX_TXQ_TYPES);
tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
tx_queue->ptr_mask);
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = efx_channel_get_tx_queue(
channel, tx_ev_q_label % EFX_TXQ_TYPES);
netif_tx_lock(efx->net_dev);
efx_notify_tx_desc(tx_queue);
netif_tx_unlock(efx->net_dev);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
EFX_WORKAROUND_10727(efx)) {
efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
} else {
netif_err(efx, tx_err, efx->net_dev,
"channel %d unexpected TX event "
EFX_QWORD_FMT"\n", channel->channel,
EFX_QWORD_VAL(*event));
}
return tx_packets;
}
/* Detect errors included in the rx_evt_pkt_ok bit. */
static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
const efx_qword_t *event)
{
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_nic *efx = rx_queue->efx;
bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
bool rx_ev_other_err, rx_ev_pause_frm;
bool rx_ev_hdr_type, rx_ev_mcast_pkt;
unsigned rx_ev_pkt_type;
rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
/* Every error apart from tobe_disc and pause_frm */
rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
/* Count errors that are not in MAC stats. Ignore expected
* checksum errors during self-test. */
if (rx_ev_frm_trunc)
++channel->n_rx_frm_trunc;
else if (rx_ev_tobe_disc)
++channel->n_rx_tobe_disc;
else if (!efx->loopback_selftest) {
if (rx_ev_ip_hdr_chksum_err)
++channel->n_rx_ip_hdr_chksum_err;
else if (rx_ev_tcp_udp_chksum_err)
++channel->n_rx_tcp_udp_chksum_err;
}
/* TOBE_DISC is expected on unicast mismatches; don't print out an
* error message. FRM_TRUNC indicates RXDP dropped the packet due
* to a FIFO overflow.
*/
#ifdef DEBUG
if (rx_ev_other_err && net_ratelimit()) {
netif_dbg(efx, rx_err, efx->net_dev,
" RX queue %d unexpected RX event "
EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
rx_ev_ip_hdr_chksum_err ?
" [IP_HDR_CHKSUM_ERR]" : "",
rx_ev_tcp_udp_chksum_err ?
" [TCP_UDP_CHKSUM_ERR]" : "",
rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
rx_ev_drib_nib ? " [DRIB_NIB]" : "",
rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
rx_ev_pause_frm ? " [PAUSE]" : "");
}
#endif
/* The frame must be discarded if any of these are true. */
return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
rx_ev_tobe_disc | rx_ev_pause_frm) ?
EFX_RX_PKT_DISCARD : 0;
}
/* Handle receive events that are not in-order. Return true if this
* can be handled as a partial packet discard, false if it's more
* serious.
*/
static bool
efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
{
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_nic *efx = rx_queue->efx;
unsigned expected, dropped;
if (rx_queue->scatter_n &&
index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
rx_queue->ptr_mask)) {
++channel->n_rx_nodesc_trunc;
return true;
}
expected = rx_queue->removed_count & rx_queue->ptr_mask;
dropped = (index - expected) & rx_queue->ptr_mask;
netif_info(efx, rx_err, efx->net_dev,
"dropped %d events (index=%d expected=%d)\n",
dropped, index, expected);
efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
return false;
}
/* Handle a packet received event
*
* The NIC gives a "discard" flag if it's a unicast packet with the
* wrong destination address
* Also "is multicast" and "matches multicast filter" flags can be used to
* discard non-matching multicast packets.
*/
static void
efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
{
unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
unsigned expected_ptr;
bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
u16 flags;
struct efx_rx_queue *rx_queue;
struct efx_nic *efx = channel->efx;
if (unlikely(ACCESS_ONCE(efx->reset_pending)))
return;
rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
channel->channel);
rx_queue = efx_channel_get_rx_queue(channel);
rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
rx_queue->ptr_mask);
/* Check for partial drops and other errors */
if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
if (rx_ev_desc_ptr != expected_ptr &&
!efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
return;
/* Discard all pending fragments */
if (rx_queue->scatter_n) {
efx_rx_packet(
rx_queue,
rx_queue->removed_count & rx_queue->ptr_mask,
rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
rx_queue->removed_count += rx_queue->scatter_n;
rx_queue->scatter_n = 0;
}
/* Return if there is no new fragment */
if (rx_ev_desc_ptr != expected_ptr)
return;
/* Discard new fragment if not SOP */
if (!rx_ev_sop) {
efx_rx_packet(
rx_queue,
rx_queue->removed_count & rx_queue->ptr_mask,
1, 0, EFX_RX_PKT_DISCARD);
++rx_queue->removed_count;
return;
}
}
++rx_queue->scatter_n;
if (rx_ev_cont)
return;
rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
if (likely(rx_ev_pkt_ok)) {
/* If packet is marked as OK then we can rely on the
* hardware checksum and classification.
*/
flags = 0;
switch (rx_ev_hdr_type) {
case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
flags |= EFX_RX_PKT_TCP;
/* fall through */
case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
flags |= EFX_RX_PKT_CSUMMED;
/* fall through */
case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
break;
}
} else {
flags = efx_handle_rx_not_ok(rx_queue, event);
}
/* Detect multicast packets that didn't match the filter */
rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
if (rx_ev_mcast_pkt) {
unsigned int rx_ev_mcast_hash_match =
EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
if (unlikely(!rx_ev_mcast_hash_match)) {
++channel->n_rx_mcast_mismatch;
flags |= EFX_RX_PKT_DISCARD;
}
}
channel->irq_mod_score += 2;
/* Handle received packet */
efx_rx_packet(rx_queue,
rx_queue->removed_count & rx_queue->ptr_mask,
rx_queue->scatter_n, rx_ev_byte_cnt, flags);
rx_queue->removed_count += rx_queue->scatter_n;
rx_queue->scatter_n = 0;
}
/* If this flush done event corresponds to a &struct efx_tx_queue, then
* send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
* of all transmit completions.
*/
static void
efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
struct efx_tx_queue *tx_queue;
int qid;
qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
qid % EFX_TXQ_TYPES);
if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
efx_magic_event(tx_queue->channel,
EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
}
}
}
/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
* was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
* the RX queue back to the mask of RX queues in need of flushing.
*/
static void
efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
{
struct efx_channel *channel;
struct efx_rx_queue *rx_queue;
int qid;
bool failed;
qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
if (qid >= efx->n_channels)
return;
channel = efx_get_channel(efx, qid);
if (!efx_channel_has_rx_queue(channel))
return;
rx_queue = efx_channel_get_rx_queue(channel);
if (failed) {
netif_info(efx, hw, efx->net_dev,
"RXQ %d flush retry\n", qid);
rx_queue->flush_pending = true;
atomic_inc(&efx->rxq_flush_pending);
} else {
efx_magic_event(efx_rx_queue_channel(rx_queue),
EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
}
atomic_dec(&efx->rxq_flush_outstanding);
if (efx_flush_wake(efx))
wake_up(&efx->flush_wq);
}
static void
efx_handle_drain_event(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
WARN_ON(atomic_read(&efx->drain_pending) == 0);
atomic_dec(&efx->drain_pending);
if (efx_flush_wake(efx))
wake_up(&efx->flush_wq);
}
static void
efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
struct efx_rx_queue *rx_queue =
efx_channel_has_rx_queue(channel) ?
efx_channel_get_rx_queue(channel) : NULL;
unsigned magic, code;
magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
code = _EFX_CHANNEL_MAGIC_CODE(magic);
if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
channel->event_test_cpu = raw_smp_processor_id();
} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
* queue. Refill it here */
efx_fast_push_rx_descriptors(rx_queue);
} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
rx_queue->enabled = false;
efx_handle_drain_event(channel);
} else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
efx_handle_drain_event(channel);
} else {
netif_dbg(efx, hw, efx->net_dev, "channel %d received "
"generated event "EFX_QWORD_FMT"\n",
channel->channel, EFX_QWORD_VAL(*event));
}
}
static void
efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
{
struct efx_nic *efx = channel->efx;
unsigned int ev_sub_code;
unsigned int ev_sub_data;
ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
switch (ev_sub_code) {
case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_handle_tx_flush_done(efx, event);
efx_sriov_tx_flush_done(efx, event);
break;
case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
channel->channel, ev_sub_data);
efx_handle_rx_flush_done(efx, event);
efx_sriov_rx_flush_done(efx, event);
break;
case FSE_AZ_EVQ_INIT_DONE_EV:
netif_dbg(efx, hw, efx->net_dev,
"channel %d EVQ %d initialised\n",
channel->channel, ev_sub_data);
break;
case FSE_AZ_SRM_UPD_DONE_EV:
netif_vdbg(efx, hw, efx->net_dev,
"channel %d SRAM update done\n", channel->channel);
break;
case FSE_AZ_WAKE_UP_EV:
netif_vdbg(efx, hw, efx->net_dev,
"channel %d RXQ %d wakeup event\n",
channel->channel, ev_sub_data);
break;
case FSE_AZ_TIMER_EV:
netif_vdbg(efx, hw, efx->net_dev,
"channel %d RX queue %d timer expired\n",
channel->channel, ev_sub_data);
break;
case FSE_AA_RX_RECOVER_EV:
netif_err(efx, rx_err, efx->net_dev,
"channel %d seen DRIVER RX_RESET event. "
"Resetting.\n", channel->channel);
atomic_inc(&efx->rx_reset);
efx_schedule_reset(efx,
EFX_WORKAROUND_6555(efx) ?
RESET_TYPE_RX_RECOVERY :
RESET_TYPE_DISABLE);
break;
case FSE_BZ_RX_DSC_ERROR_EV:
if (ev_sub_data < EFX_VI_BASE) {
netif_err(efx, rx_err, efx->net_dev,
"RX DMA Q %d reports descriptor fetch error."
" RX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
} else
efx_sriov_desc_fetch_err(efx, ev_sub_data);
break;
case FSE_BZ_TX_DSC_ERROR_EV:
if (ev_sub_data < EFX_VI_BASE) {
netif_err(efx, tx_err, efx->net_dev,
"TX DMA Q %d reports descriptor fetch error."
" TX Q %d is disabled.\n", ev_sub_data,
ev_sub_data);
efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
} else
efx_sriov_desc_fetch_err(efx, ev_sub_data);
break;
default:
netif_vdbg(efx, hw, efx->net_dev,
"channel %d unknown driver event code %d "
"data %04x\n", channel->channel, ev_sub_code,
ev_sub_data);
break;
}
}
int efx_nic_process_eventq(struct efx_channel *channel, int budget)
{
struct efx_nic *efx = channel->efx;
unsigned int read_ptr;
efx_qword_t event, *p_event;
int ev_code;
int tx_packets = 0;
int spent = 0;
read_ptr = channel->eventq_read_ptr;
for (;;) {
p_event = efx_event(channel, read_ptr);
event = *p_event;
if (!efx_event_present(&event))
/* End of events */
break;
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
"channel %d event is "EFX_QWORD_FMT"\n",
channel->channel, EFX_QWORD_VAL(event));
/* Clear this event by marking it all ones */
EFX_SET_QWORD(*p_event);
++read_ptr;
ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
switch (ev_code) {
case FSE_AZ_EV_CODE_RX_EV:
efx_handle_rx_event(channel, &event);
if (++spent == budget)
goto out;
break;
case FSE_AZ_EV_CODE_TX_EV:
tx_packets += efx_handle_tx_event(channel, &event);
if (tx_packets > efx->txq_entries) {
spent = budget;
goto out;
}
break;
case FSE_AZ_EV_CODE_DRV_GEN_EV:
efx_handle_generated_event(channel, &event);
break;
case FSE_AZ_EV_CODE_DRIVER_EV:
efx_handle_driver_event(channel, &event);
break;
case FSE_CZ_EV_CODE_USER_EV:
efx_sriov_event(channel, &event);
break;
case FSE_CZ_EV_CODE_MCDI_EV:
efx_mcdi_process_event(channel, &event);
break;
case FSE_AZ_EV_CODE_GLOBAL_EV:
if (efx->type->handle_global_event &&
efx->type->handle_global_event(channel, &event))
break;
/* else fall through */
default:
netif_err(channel->efx, hw, channel->efx->net_dev,
"channel %d unknown event type %d (data "
EFX_QWORD_FMT ")\n", channel->channel,
ev_code, EFX_QWORD_VAL(event));
}
}
out:
channel->eventq_read_ptr = read_ptr;
return spent;
}
/* Check whether an event is present in the eventq at the current /* Check whether an event is present in the eventq at the current
* read pointer. Only useful for self-test. * read pointer. Only useful for self-test.
*/ */
...@@ -1382,323 +59,18 @@ bool efx_nic_event_present(struct efx_channel *channel) ...@@ -1382,323 +59,18 @@ bool efx_nic_event_present(struct efx_channel *channel)
return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
} }
/* Allocate buffer table entries for event queue */
int efx_nic_probe_eventq(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
unsigned entries;
entries = channel->eventq_mask + 1;
return efx_alloc_special_buffer(efx, &channel->eventq,
entries * sizeof(efx_qword_t));
}
void efx_nic_init_eventq(struct efx_channel *channel)
{
efx_oword_t reg;
struct efx_nic *efx = channel->efx;
netif_dbg(efx, hw, efx->net_dev,
"channel %d event queue in special buffers %d-%d\n",
channel->channel, channel->eventq.index,
channel->eventq.index + channel->eventq.entries - 1);
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
EFX_POPULATE_OWORD_3(reg,
FRF_CZ_TIMER_Q_EN, 1,
FRF_CZ_HOST_NOTIFY_MODE, 0,
FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
}
/* Pin event queue buffer */
efx_init_special_buffer(efx, &channel->eventq);
/* Fill event queue with all ones (i.e. empty events) */
memset(channel->eventq.addr, 0xff, channel->eventq.len);
/* Push event queue to card */
EFX_POPULATE_OWORD_3(reg,
FRF_AZ_EVQ_EN, 1,
FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
channel->channel);
efx->type->push_irq_moderation(channel);
}
void efx_nic_fini_eventq(struct efx_channel *channel)
{
efx_oword_t reg;
struct efx_nic *efx = channel->efx;
/* Remove event queue from card */
EFX_ZERO_OWORD(reg);
efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
channel->channel);
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
/* Unpin event queue */
efx_fini_special_buffer(efx, &channel->eventq);
}
/* Free buffers backing event queue */
void efx_nic_remove_eventq(struct efx_channel *channel)
{
efx_free_special_buffer(channel->efx, &channel->eventq);
}
void efx_nic_event_test_start(struct efx_channel *channel) void efx_nic_event_test_start(struct efx_channel *channel)
{ {
channel->event_test_cpu = -1; channel->event_test_cpu = -1;
smp_wmb(); smp_wmb();
efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); channel->efx->type->ev_test_generate(channel);
}
void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
{
efx_magic_event(efx_rx_queue_channel(rx_queue),
EFX_CHANNEL_MAGIC_FILL(rx_queue));
}
/**************************************************************************
*
* Hardware interrupts
* The hardware interrupt handler does very little work; all the event
* queue processing is carried out by per-channel tasklets.
*
**************************************************************************/
/* Enable/disable/generate interrupts */
static inline void efx_nic_interrupts(struct efx_nic *efx,
bool enabled, bool force)
{
efx_oword_t int_en_reg_ker;
EFX_POPULATE_OWORD_3(int_en_reg_ker,
FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
FRF_AZ_KER_INT_KER, force,
FRF_AZ_DRV_INT_EN_KER, enabled);
efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
} }
void efx_nic_enable_interrupts(struct efx_nic *efx)
{
EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
efx_nic_interrupts(efx, true, false);
}
void efx_nic_disable_interrupts(struct efx_nic *efx)
{
/* Disable interrupts */
efx_nic_interrupts(efx, false, false);
}
/* Generate a test interrupt
* Interrupt must already have been enabled, otherwise nasty things
* may happen.
*/
void efx_nic_irq_test_start(struct efx_nic *efx) void efx_nic_irq_test_start(struct efx_nic *efx)
{ {
efx->last_irq_cpu = -1; efx->last_irq_cpu = -1;
smp_wmb(); smp_wmb();
efx_nic_interrupts(efx, true, true); efx->type->irq_test_generate(efx);
}
/* Process a fatal interrupt
* Disable bus mastering ASAP and schedule a reset
*/
irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t *int_ker = efx->irq_status.addr;
efx_oword_t fatal_intr;
int error, mem_perr;
efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
EFX_OWORD_VAL(fatal_intr),
error ? "disabling bus mastering" : "no recognised error");
/* If this is a memory parity error dump which blocks are offending */
mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
if (mem_perr) {
efx_oword_t reg;
efx_reado(efx, &reg, FR_AZ_MEM_STAT);
netif_err(efx, hw, efx->net_dev,
"SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
EFX_OWORD_VAL(reg));
}
/* Disable both devices */
pci_clear_master(efx->pci_dev);
if (efx_nic_is_dual_func(efx))
pci_clear_master(nic_data->pci_dev2);
efx_nic_disable_interrupts(efx);
/* Count errors and reset or disable the NIC accordingly */
if (efx->int_error_count == 0 ||
time_after(jiffies, efx->int_error_expire)) {
efx->int_error_count = 0;
efx->int_error_expire =
jiffies + EFX_INT_ERROR_EXPIRE * HZ;
}
if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
netif_err(efx, hw, efx->net_dev,
"SYSTEM ERROR - reset scheduled\n");
efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
} else {
netif_err(efx, hw, efx->net_dev,
"SYSTEM ERROR - max number of errors seen."
"NIC will be disabled\n");
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
}
return IRQ_HANDLED;
}
/* Handle a legacy interrupt
* Acknowledges the interrupt and schedule event queue processing.
*/
static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
efx_oword_t *int_ker = efx->irq_status.addr;
irqreturn_t result = IRQ_NONE;
struct efx_channel *channel;
efx_dword_t reg;
u32 queues;
int syserr;
/* Could this be ours? If interrupts are disabled then the
* channel state may not be valid.
*/
if (!efx->legacy_irq_enabled)
return result;
/* Read the ISR which also ACKs the interrupts */
efx_readd(efx, &reg, FR_BZ_INT_ISR0);
queues = EFX_EXTRACT_DWORD(reg, 0, 31);
/* Legacy interrupts are disabled too late by the EEH kernel
* code. Disable them earlier.
* If an EEH error occurred, the read will have returned all ones.
*/
if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
!efx->eeh_disabled_legacy_irq) {
disable_irq_nosync(efx->legacy_irq);
efx->eeh_disabled_legacy_irq = true;
}
/* Handle non-event-queue sources */
if (queues & (1U << efx->irq_level)) {
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr))
return efx_nic_fatal_interrupt(efx);
efx->last_irq_cpu = raw_smp_processor_id();
}
if (queues != 0) {
if (EFX_WORKAROUND_15783(efx))
efx->irq_zero_count = 0;
/* Schedule processing of any interrupting queues */
efx_for_each_channel(channel, efx) {
if (queues & 1)
efx_schedule_channel_irq(channel);
queues >>= 1;
}
result = IRQ_HANDLED;
} else if (EFX_WORKAROUND_15783(efx)) {
efx_qword_t *event;
/* We can't return IRQ_HANDLED more than once on seeing ISR=0
* because this might be a shared interrupt. */
if (efx->irq_zero_count++ == 0)
result = IRQ_HANDLED;
/* Ensure we schedule or rearm all event queues */
efx_for_each_channel(channel, efx) {
event = efx_event(channel, channel->eventq_read_ptr);
if (efx_event_present(event))
efx_schedule_channel_irq(channel);
else
efx_nic_eventq_read_ack(channel);
}
}
if (result == IRQ_HANDLED)
netif_vdbg(efx, intr, efx->net_dev,
"IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
return result;
}
/* Handle an MSI interrupt
*
* Handle an MSI hardware interrupt. This routine schedules event
* queue processing. No interrupt acknowledgement cycle is necessary.
* Also, we never need to check that the interrupt is for us, since
* MSI interrupts cannot be shared.
*/
static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
{
struct efx_channel *channel = *(struct efx_channel **)dev_id;
struct efx_nic *efx = channel->efx;
efx_oword_t *int_ker = efx->irq_status.addr;
int syserr;
netif_vdbg(efx, intr, efx->net_dev,
"IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
/* Handle non-event-queue sources */
if (channel->channel == efx->irq_level) {
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr))
return efx_nic_fatal_interrupt(efx);
efx->last_irq_cpu = raw_smp_processor_id();
}
/* Schedule processing of the channel */
efx_schedule_channel_irq(channel);
return IRQ_HANDLED;
}
/* Setup RSS indirection table.
* This maps from the hash value of the packet to RXQ
*/
void efx_nic_push_rx_indir_table(struct efx_nic *efx)
{
size_t i = 0;
efx_dword_t dword;
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
return;
BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
FR_BZ_RX_INDIRECTION_TBL_ROWS);
for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
efx->rx_indir_table[i]);
efx_writed(efx, &dword,
FR_BZ_RX_INDIRECTION_TBL +
FR_BZ_RX_INDIRECTION_TBL_STEP * i);
}
} }
/* Hook interrupt handler(s) /* Hook interrupt handler(s)
...@@ -1711,13 +83,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx) ...@@ -1711,13 +83,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
int rc; int rc;
if (!EFX_INT_MODE_USE_MSI(efx)) { if (!EFX_INT_MODE_USE_MSI(efx)) {
irq_handler_t handler; rc = request_irq(efx->legacy_irq,
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) efx->type->irq_handle_legacy, IRQF_SHARED,
handler = efx_legacy_interrupt;
else
handler = falcon_legacy_interrupt_a1;
rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
efx->name, efx); efx->name, efx);
if (rc) { if (rc) {
netif_err(efx, drv, efx->net_dev, netif_err(efx, drv, efx->net_dev,
...@@ -1742,10 +109,10 @@ int efx_nic_init_interrupt(struct efx_nic *efx) ...@@ -1742,10 +109,10 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
/* Hook MSI or MSI-X interrupt */ /* Hook MSI or MSI-X interrupt */
n_irqs = 0; n_irqs = 0;
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
rc = request_irq(channel->irq, efx_msi_interrupt, rc = request_irq(channel->irq, efx->type->irq_handle_msi,
IRQF_PROBE_SHARED, /* Not shared */ IRQF_PROBE_SHARED, /* Not shared */
efx->channel_name[channel->channel], efx->msi_context[channel->channel].name,
&efx->channel[channel->channel]); &efx->msi_context[channel->channel]);
if (rc) { if (rc) {
netif_err(efx, drv, efx->net_dev, netif_err(efx, drv, efx->net_dev,
"failed to hook IRQ %d\n", channel->irq); "failed to hook IRQ %d\n", channel->irq);
...@@ -1774,7 +141,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx) ...@@ -1774,7 +141,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
if (n_irqs-- == 0) if (n_irqs-- == 0)
break; break;
free_irq(channel->irq, &efx->channel[channel->channel]); free_irq(channel->irq, &efx->msi_context[channel->channel]);
} }
fail1: fail1:
return rc; return rc;
...@@ -1783,7 +150,6 @@ int efx_nic_init_interrupt(struct efx_nic *efx) ...@@ -1783,7 +150,6 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
void efx_nic_fini_interrupt(struct efx_nic *efx) void efx_nic_fini_interrupt(struct efx_nic *efx)
{ {
struct efx_channel *channel; struct efx_channel *channel;
efx_oword_t reg;
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
...@@ -1792,167 +158,13 @@ void efx_nic_fini_interrupt(struct efx_nic *efx) ...@@ -1792,167 +158,13 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
/* Disable MSI/MSI-X interrupts */ /* Disable MSI/MSI-X interrupts */
efx_for_each_channel(channel, efx) efx_for_each_channel(channel, efx)
free_irq(channel->irq, &efx->channel[channel->channel]); free_irq(channel->irq, &efx->msi_context[channel->channel]);
/* ACK legacy interrupt */
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
efx_reado(efx, &reg, FR_BZ_INT_ISR0);
else
falcon_irq_ack_a1(efx);
/* Disable legacy interrupt */ /* Disable legacy interrupt */
if (efx->legacy_irq) if (efx->legacy_irq)
free_irq(efx->legacy_irq, efx); free_irq(efx->legacy_irq, efx);
} }
/* Looks at available SRAM resources and works out how many queues we
* can support, and where things like descriptor caches should live.
*
* SRAM is split up as follows:
* 0 buftbl entries for channels
* efx->vf_buftbl_base buftbl entries for SR-IOV
* efx->rx_dc_base RX descriptor caches
* efx->tx_dc_base TX descriptor caches
*/
void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
{
unsigned vi_count, buftbl_min;
/* Account for the buffer table entries backing the datapath channels
* and the descriptor caches for those channels.
*/
buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
efx->n_channels * EFX_MAX_EVQ_SIZE)
* sizeof(efx_qword_t) / EFX_BUF_SIZE);
vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
#ifdef CONFIG_SFC_SRIOV
if (efx_sriov_wanted(efx)) {
unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
efx->vf_buftbl_base = buftbl_min;
vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
vi_count = max(vi_count, EFX_VI_BASE);
buftbl_free = (sram_lim_qw - buftbl_min -
vi_count * vi_dc_entries);
entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
efx_vf_size(efx));
vf_limit = min(buftbl_free / entries_per_vf,
(1024U - EFX_VI_BASE) >> efx->vi_scale);
if (efx->vf_count > vf_limit) {
netif_err(efx, probe, efx->net_dev,
"Reducing VF count from from %d to %d\n",
efx->vf_count, vf_limit);
efx->vf_count = vf_limit;
}
vi_count += efx->vf_count * efx_vf_size(efx);
}
#endif
efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
}
u32 efx_nic_fpga_ver(struct efx_nic *efx)
{
efx_oword_t altera_build;
efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
}
void efx_nic_init_common(struct efx_nic *efx)
{
efx_oword_t temp;
/* Set positions of descriptor caches in SRAM. */
EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
/* Set TX descriptor cache size. */
BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
/* Set RX descriptor cache size. Set low watermark to size-8, as
* this allows most efficient prefetching.
*/
BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
/* Program INT_KER address */
EFX_POPULATE_OWORD_2(temp,
FRF_AZ_NORM_INT_VEC_DIS_KER,
EFX_INT_MODE_USE_MSI(efx),
FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
/* Use an interrupt level unused by event queues */
efx->irq_level = 0x1f;
else
/* Use a valid MSI-X vector */
efx->irq_level = 0;
/* Enable all the genuinely fatal interrupts. (They are still
* masked by the overall interrupt mask, controlled by
* falcon_interrupts()).
*
* Note: All other fatal interrupts are enabled
*/
EFX_POPULATE_OWORD_3(temp,
FRF_AZ_ILL_ADR_INT_KER_EN, 1,
FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
EFX_INVERT_OWORD(temp);
efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
efx_nic_push_rx_indir_table(efx);
/* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
* controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
*/
efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
/* Enable SW_EV to inherit in char driver - assume harmless here */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
/* Prefetch threshold 2 => fetch when descriptor cache half empty */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
/* Disable hardware watchdog which can misfire */
EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
/* Squash TX of packets of 16 bytes or less */
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
EFX_POPULATE_OWORD_4(temp,
/* Default values */
FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
FRF_BZ_TX_PACE_SB_AF, 0xb,
FRF_BZ_TX_PACE_FB_BASE, 0,
/* Allow large pace values in the
* fast bin. */
FRF_BZ_TX_PACE_BIN_TH,
FFE_BZ_TX_PACE_RESERVED);
efx_writeo(efx, &temp, FR_BZ_TX_PACE);
}
}
/* Register dump */ /* Register dump */
#define REGISTER_REVISION_A 1 #define REGISTER_REVISION_A 1
......
...@@ -34,7 +34,7 @@ static inline int efx_nic_rev(struct efx_nic *efx) ...@@ -34,7 +34,7 @@ static inline int efx_nic_rev(struct efx_nic *efx)
return efx->type->revision; return efx->type->revision;
} }
extern u32 efx_nic_fpga_ver(struct efx_nic *efx); extern u32 efx_farch_fpga_ver(struct efx_nic *efx);
/* NIC has two interlinked PCI functions for the same port. */ /* NIC has two interlinked PCI functions for the same port. */
static inline bool efx_nic_is_dual_func(struct efx_nic *efx) static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
...@@ -42,6 +42,65 @@ static inline bool efx_nic_is_dual_func(struct efx_nic *efx) ...@@ -42,6 +42,65 @@ static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
return efx_nic_rev(efx) < EFX_REV_FALCON_B0; return efx_nic_rev(efx) < EFX_REV_FALCON_B0;
} }
/* Read the current event from the event queue */
static inline efx_qword_t *efx_event(struct efx_channel *channel,
unsigned int index)
{
return ((efx_qword_t *) (channel->eventq.buf.addr)) +
(index & channel->eventq_mask);
}
/* See if an event is present
*
* We check both the high and low dword of the event for all ones. We
* wrote all ones when we cleared the event, and no valid event can
* have all ones in either its high or low dwords. This approach is
* robust against reordering.
*
* Note that using a single 64-bit comparison is incorrect; even
* though the CPU read will be atomic, the DMA write may not be.
*/
static inline int efx_event_present(efx_qword_t *event)
{
return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
EFX_DWORD_IS_ALL_ONES(event->dword[1]));
}
/* Returns a pointer to the specified transmit descriptor in the TX
* descriptor queue belonging to the specified channel.
*/
static inline efx_qword_t *
efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
{
return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
}
/* Decide whether to push a TX descriptor to the NIC vs merely writing
* the doorbell. This can reduce latency when we are adding a single
* descriptor to an empty queue, but is otherwise pointless. Further,
* Falcon and Siena have hardware bugs (SF bug 33851) that may be
* triggered if we don't check this.
*/
static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
unsigned int write_count)
{
unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
if (empty_read_count == 0)
return false;
tx_queue->empty_read_count = 0;
return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
&& tx_queue->write_count - write_count == 1;
}
/* Returns a pointer to the specified descriptor in the RX descriptor queue */
static inline efx_qword_t *
efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
{
return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index;
}
enum { enum {
PHY_TYPE_NONE = 0, PHY_TYPE_NONE = 0,
PHY_TYPE_TXC43128 = 1, PHY_TYPE_TXC43128 = 1,
...@@ -140,28 +199,12 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx) ...@@ -140,28 +199,12 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
/** /**
* struct siena_nic_data - Siena NIC state * struct siena_nic_data - Siena NIC state
* @mcdi: Management-Controller-to-Driver Interface
* @wol_filter_id: Wake-on-LAN packet filter id * @wol_filter_id: Wake-on-LAN packet filter id
* @hwmon: Hardware monitor state
*/ */
struct siena_nic_data { struct siena_nic_data {
struct efx_mcdi_iface mcdi;
int wol_filter_id; int wol_filter_id;
#ifdef CONFIG_SFC_MCDI_MON
struct efx_mcdi_mon hwmon;
#endif
}; };
#ifdef CONFIG_SFC_MCDI_MON
static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
{
struct siena_nic_data *nic_data;
EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
nic_data = efx->nic_data;
return &nic_data->hwmon;
}
#endif
/* /*
* On the SFC9000 family each port is associated with 1 PCI physical * On the SFC9000 family each port is associated with 1 PCI physical
* function (PF) handled by sfc and a configurable number of virtual * function (PF) handled by sfc and a configurable number of virtual
...@@ -274,27 +317,93 @@ extern const struct efx_nic_type siena_a0_nic_type; ...@@ -274,27 +317,93 @@ extern const struct efx_nic_type siena_a0_nic_type;
extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info); extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
/* TX data path */ /* TX data path */
extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue); {
extern void efx_nic_fini_tx(struct efx_tx_queue *tx_queue); return tx_queue->efx->type->tx_probe(tx_queue);
extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue); }
extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue); static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
{
tx_queue->efx->type->tx_init(tx_queue);
}
static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
{
tx_queue->efx->type->tx_remove(tx_queue);
}
static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
{
tx_queue->efx->type->tx_write(tx_queue);
}
/* RX data path */ /* RX data path */
extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue); static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue); {
extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue); return rx_queue->efx->type->rx_probe(rx_queue);
extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue); }
extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue); static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
extern void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue); {
rx_queue->efx->type->rx_init(rx_queue);
}
static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
{
rx_queue->efx->type->rx_remove(rx_queue);
}
static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
{
rx_queue->efx->type->rx_write(rx_queue);
}
static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
{
rx_queue->efx->type->rx_defer_refill(rx_queue);
}
/* Event data path */ /* Event data path */
extern int efx_nic_probe_eventq(struct efx_channel *channel); static inline int efx_nic_probe_eventq(struct efx_channel *channel)
extern void efx_nic_init_eventq(struct efx_channel *channel); {
extern void efx_nic_fini_eventq(struct efx_channel *channel); return channel->efx->type->ev_probe(channel);
extern void efx_nic_remove_eventq(struct efx_channel *channel); }
extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); static inline void efx_nic_init_eventq(struct efx_channel *channel)
extern void efx_nic_eventq_read_ack(struct efx_channel *channel); {
channel->efx->type->ev_init(channel);
}
static inline void efx_nic_fini_eventq(struct efx_channel *channel)
{
channel->efx->type->ev_fini(channel);
}
static inline void efx_nic_remove_eventq(struct efx_channel *channel)
{
channel->efx->type->ev_remove(channel);
}
static inline int
efx_nic_process_eventq(struct efx_channel *channel, int quota)
{
return channel->efx->type->ev_process(channel, quota);
}
static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
{
channel->efx->type->ev_read_ack(channel);
}
extern void efx_nic_event_test_start(struct efx_channel *channel);
/* Falcon/Siena queue operations */
extern int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
extern void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
extern void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
extern void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
extern void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
extern int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
extern void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
extern void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
extern int efx_farch_ev_probe(struct efx_channel *channel);
extern void efx_farch_ev_init(struct efx_channel *channel);
extern void efx_farch_ev_fini(struct efx_channel *channel);
extern void efx_farch_ev_remove(struct efx_channel *channel);
extern int efx_farch_ev_process(struct efx_channel *channel, int quota);
extern void efx_farch_ev_read_ack(struct efx_channel *channel);
extern void efx_farch_ev_test_generate(struct efx_channel *channel);
extern bool efx_nic_event_present(struct efx_channel *channel); extern bool efx_nic_event_present(struct efx_channel *channel);
/* Some statistics are computed as A - B where A and B each increase /* Some statistics are computed as A - B where A and B each increase
...@@ -315,16 +424,18 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff) ...@@ -315,16 +424,18 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff)
*stat = diff; *stat = diff;
} }
/* Interrupts and test events */ /* Interrupts */
extern int efx_nic_init_interrupt(struct efx_nic *efx); extern int efx_nic_init_interrupt(struct efx_nic *efx);
extern void efx_nic_enable_interrupts(struct efx_nic *efx);
extern void efx_nic_event_test_start(struct efx_channel *channel);
extern void efx_nic_irq_test_start(struct efx_nic *efx); extern void efx_nic_irq_test_start(struct efx_nic *efx);
extern void efx_nic_disable_interrupts(struct efx_nic *efx);
extern void efx_nic_fini_interrupt(struct efx_nic *efx); extern void efx_nic_fini_interrupt(struct efx_nic *efx);
extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx);
extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id); /* Falcon/Siena interrupts */
extern void falcon_irq_ack_a1(struct efx_nic *efx); extern void efx_farch_irq_enable_master(struct efx_nic *efx);
extern void efx_farch_irq_test_generate(struct efx_nic *efx);
extern void efx_farch_irq_disable_master(struct efx_nic *efx);
extern irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
extern irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
extern irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
{ {
...@@ -338,34 +449,38 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) ...@@ -338,34 +449,38 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
/* Global Resources */ /* Global Resources */
extern int efx_nic_flush_queues(struct efx_nic *efx); extern int efx_nic_flush_queues(struct efx_nic *efx);
extern void siena_prepare_flush(struct efx_nic *efx); extern void siena_prepare_flush(struct efx_nic *efx);
extern int efx_farch_fini_dmaq(struct efx_nic *efx);
extern void siena_finish_flush(struct efx_nic *efx); extern void siena_finish_flush(struct efx_nic *efx);
extern void falcon_start_nic_stats(struct efx_nic *efx); extern void falcon_start_nic_stats(struct efx_nic *efx);
extern void falcon_stop_nic_stats(struct efx_nic *efx); extern void falcon_stop_nic_stats(struct efx_nic *efx);
extern int falcon_reset_xaui(struct efx_nic *efx); extern int falcon_reset_xaui(struct efx_nic *efx);
extern void extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw); extern void efx_farch_init_common(struct efx_nic *efx);
extern void efx_nic_init_common(struct efx_nic *efx); static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
extern void efx_nic_push_rx_indir_table(struct efx_nic *efx); {
efx->type->rx_push_indir_table(efx);
}
extern void efx_farch_rx_push_indir_table(struct efx_nic *efx);
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len); unsigned int len, gfp_t gfp_flags);
void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
/* Tests */ /* Tests */
struct efx_nic_register_test { struct efx_farch_register_test {
unsigned address; unsigned address;
efx_oword_t mask; efx_oword_t mask;
}; };
extern int efx_nic_test_registers(struct efx_nic *efx, extern int efx_farch_test_registers(struct efx_nic *efx,
const struct efx_nic_register_test *regs, const struct efx_farch_register_test *regs,
size_t n_regs); size_t n_regs);
extern size_t efx_nic_get_regs_len(struct efx_nic *efx); extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
extern void efx_nic_get_regs(struct efx_nic *efx, void *buf); extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
#define EFX_MAX_FLUSH_TIME 5000 #define EFX_MAX_FLUSH_TIME 5000
extern void efx_generate_event(struct efx_nic *efx, unsigned int evq, extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
efx_qword_t *event); efx_qword_t *event);
#endif /* EFX_NIC_H */ #endif /* EFX_NIC_H */
...@@ -46,7 +46,7 @@ ...@@ -46,7 +46,7 @@
#include "mcdi.h" #include "mcdi.h"
#include "mcdi_pcol.h" #include "mcdi_pcol.h"
#include "io.h" #include "io.h"
#include "regs.h" #include "farch_regs.h"
#include "nic.h" #include "nic.h"
/* Maximum number of events expected to make up a PTP event */ /* Maximum number of events expected to make up a PTP event */
...@@ -538,8 +538,9 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) ...@@ -538,8 +538,9 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
/* Clear flag that signals MC ready */ /* Clear flag that signals MC ready */
ACCESS_ONCE(*start) = 0; ACCESS_ONCE(*start) = 0;
efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
MC_CMD_PTP_IN_SYNCHRONIZE_LEN); MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
EFX_BUG_ON_PARANOID(rc);
/* Wait for start from MCDI (or timeout) */ /* Wait for start from MCDI (or timeout) */
timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS); timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
...@@ -875,7 +876,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel) ...@@ -875,7 +876,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
if (!efx->ptp_data) if (!efx->ptp_data)
return -ENOMEM; return -ENOMEM;
rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int)); rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL);
if (rc != 0) if (rc != 0)
goto fail1; goto fail1;
......
...@@ -326,6 +326,9 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) ...@@ -326,6 +326,9 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
unsigned int fill_level, batch_size; unsigned int fill_level, batch_size;
int space, rc = 0; int space, rc = 0;
if (!rx_queue->refill_enabled)
return;
/* Calculate current fill level, and exit if we don't need to fill */ /* Calculate current fill level, and exit if we don't need to fill */
fill_level = (rx_queue->added_count - rx_queue->removed_count); fill_level = (rx_queue->added_count - rx_queue->removed_count);
EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries); EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
...@@ -738,9 +741,9 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue) ...@@ -738,9 +741,9 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->max_fill = max_fill; rx_queue->max_fill = max_fill;
rx_queue->fast_fill_trigger = trigger; rx_queue->fast_fill_trigger = trigger;
rx_queue->refill_enabled = true;
/* Set up RX descriptor ring */ /* Set up RX descriptor ring */
rx_queue->enabled = true;
efx_nic_init_rx(rx_queue); efx_nic_init_rx(rx_queue);
} }
...@@ -753,11 +756,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) ...@@ -753,11 +756,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"shutting down RX queue %d\n", efx_rx_queue_index(rx_queue)); "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
/* A flush failure might have left rx_queue->enabled */
rx_queue->enabled = false;
del_timer_sync(&rx_queue->slow_fill); del_timer_sync(&rx_queue->slow_fill);
efx_nic_fini_rx(rx_queue);
/* Release RX buffers from the current read ptr to the write ptr */ /* Release RX buffers from the current read ptr to the write ptr */
if (rx_queue->buffer) { if (rx_queue->buffer) {
......
...@@ -447,14 +447,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue) ...@@ -447,14 +447,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
static int efx_poll_loopback(struct efx_nic *efx) static int efx_poll_loopback(struct efx_nic *efx)
{ {
struct efx_loopback_state *state = efx->loopback_selftest; struct efx_loopback_state *state = efx->loopback_selftest;
struct efx_channel *channel;
/* NAPI polling is not enabled, so process channels
* synchronously */
efx_for_each_channel(channel, efx) {
if (channel->work_pending)
efx_process_channel_now(channel);
}
return atomic_read(&state->rx_good) == state->packet_count; return atomic_read(&state->rx_good) == state->packet_count;
} }
...@@ -586,10 +579,6 @@ static int efx_wait_for_link(struct efx_nic *efx) ...@@ -586,10 +579,6 @@ static int efx_wait_for_link(struct efx_nic *efx)
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
efx->type->monitor(efx); efx->type->monitor(efx);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
} else {
struct efx_channel *channel = efx_get_channel(efx, 0);
if (channel->work_pending)
efx_process_channel_now(channel);
} }
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include "efx.h" #include "efx.h"
#include "nic.h" #include "nic.h"
#include "spi.h" #include "spi.h"
#include "regs.h" #include "farch_regs.h"
#include "io.h" #include "io.h"
#include "phy.h" #include "phy.h"
#include "workarounds.h" #include "workarounds.h"
...@@ -63,7 +63,7 @@ void siena_finish_flush(struct efx_nic *efx) ...@@ -63,7 +63,7 @@ void siena_finish_flush(struct efx_nic *efx)
efx_mcdi_set_mac(efx); efx_mcdi_set_mac(efx);
} }
static const struct efx_nic_register_test siena_register_tests[] = { static const struct efx_farch_register_test siena_register_tests[] = {
{ FR_AZ_ADR_REGION, { FR_AZ_ADR_REGION,
EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
{ FR_CZ_USR_EV_CFG, { FR_CZ_USR_EV_CFG,
...@@ -107,8 +107,8 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) ...@@ -107,8 +107,8 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
goto out; goto out;
tests->registers = tests->registers =
efx_nic_test_registers(efx, siena_register_tests, efx_farch_test_registers(efx, siena_register_tests,
ARRAY_SIZE(siena_register_tests)) ARRAY_SIZE(siena_register_tests))
? -1 : 1; ? -1 : 1;
rc = efx_mcdi_reset(efx, reset_method); rc = efx_mcdi_reset(efx, reset_method);
...@@ -184,7 +184,7 @@ static void siena_dimension_resources(struct efx_nic *efx) ...@@ -184,7 +184,7 @@ static void siena_dimension_resources(struct efx_nic *efx)
* the buffer table and descriptor caches. In theory we can * the buffer table and descriptor caches. In theory we can
* map both blocks to one port, but we don't. * map both blocks to one port, but we don't.
*/ */
efx_nic_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2); efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
} }
static int siena_probe_nic(struct efx_nic *efx) static int siena_probe_nic(struct efx_nic *efx)
...@@ -200,7 +200,7 @@ static int siena_probe_nic(struct efx_nic *efx) ...@@ -200,7 +200,7 @@ static int siena_probe_nic(struct efx_nic *efx)
return -ENOMEM; return -ENOMEM;
efx->nic_data = nic_data; efx->nic_data = nic_data;
if (efx_nic_fpga_ver(efx) != 0) { if (efx_farch_fpga_ver(efx) != 0) {
netif_err(efx, probe, efx->net_dev, netif_err(efx, probe, efx->net_dev,
"Siena FPGA not supported\n"); "Siena FPGA not supported\n");
rc = -ENODEV; rc = -ENODEV;
...@@ -237,7 +237,8 @@ static int siena_probe_nic(struct efx_nic *efx) ...@@ -237,7 +237,8 @@ static int siena_probe_nic(struct efx_nic *efx)
siena_init_wol(efx); siena_init_wol(efx);
/* Allocate memory for INT_KER */ /* Allocate memory for INT_KER */
rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
GFP_KERNEL);
if (rc) if (rc)
goto fail4; goto fail4;
BUG_ON(efx->irq_status.dma_addr & 0x0f); BUG_ON(efx->irq_status.dma_addr & 0x0f);
...@@ -274,6 +275,7 @@ static int siena_probe_nic(struct efx_nic *efx) ...@@ -274,6 +275,7 @@ static int siena_probe_nic(struct efx_nic *efx)
fail3: fail3:
efx_mcdi_drv_attach(efx, false, NULL); efx_mcdi_drv_attach(efx, false, NULL);
fail2: fail2:
efx_mcdi_fini(efx);
fail1: fail1:
kfree(efx->nic_data); kfree(efx->nic_data);
return rc; return rc;
...@@ -349,7 +351,7 @@ static int siena_init_nic(struct efx_nic *efx) ...@@ -349,7 +351,7 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1); EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1);
efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG); efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG);
efx_nic_init_common(efx); efx_farch_init_common(efx);
return 0; return 0;
} }
...@@ -367,6 +369,8 @@ static void siena_remove_nic(struct efx_nic *efx) ...@@ -367,6 +369,8 @@ static void siena_remove_nic(struct efx_nic *efx)
/* Tear down the private nic state */ /* Tear down the private nic state */
kfree(efx->nic_data); kfree(efx->nic_data);
efx->nic_data = NULL; efx->nic_data = NULL;
efx_mcdi_fini(efx);
} }
static int siena_try_update_nic_stats(struct efx_nic *efx) static int siena_try_update_nic_stats(struct efx_nic *efx)
...@@ -574,6 +578,89 @@ static void siena_init_wol(struct efx_nic *efx) ...@@ -574,6 +578,89 @@ static void siena_init_wol(struct efx_nic *efx)
} }
} }
/**************************************************************************
*
* MCDI
*
**************************************************************************
*/
#define MCDI_PDU(efx) \
(efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
#define MCDI_DOORBELL(efx) \
(efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST)
#define MCDI_STATUS(efx) \
(efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST)
static void siena_mcdi_request(struct efx_nic *efx,
const efx_dword_t *hdr, size_t hdr_len,
const efx_dword_t *sdu, size_t sdu_len)
{
unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
unsigned int i;
unsigned int inlen_dw = DIV_ROUND_UP(sdu_len, 4);
EFX_BUG_ON_PARANOID(hdr_len != 4);
efx_writed(efx, hdr, pdu);
for (i = 0; i < inlen_dw; i++)
efx_writed(efx, &sdu[i], pdu + hdr_len + 4 * i);
/* Ensure the request is written out before the doorbell */
wmb();
/* ring the doorbell with a distinctive value */
_efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
}
static bool siena_mcdi_poll_response(struct efx_nic *efx)
{
unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
efx_dword_t hdr;
efx_readd(efx, &hdr, pdu);
/* All 1's indicates that shared memory is in reset (and is
* not a valid hdr). Wait for it to come out reset before
* completing the command
*/
return EFX_DWORD_FIELD(hdr, EFX_DWORD_0) != 0xffffffff &&
EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
}
static void siena_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
size_t offset, size_t outlen)
{
unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
unsigned int outlen_dw = DIV_ROUND_UP(outlen, 4);
int i;
for (i = 0; i < outlen_dw; i++)
efx_readd(efx, &outbuf[i], pdu + offset + 4 * i);
}
static int siena_mcdi_poll_reboot(struct efx_nic *efx)
{
unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
efx_dword_t reg;
u32 value;
efx_readd(efx, &reg, addr);
value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
if (value == 0)
return 0;
EFX_ZERO_DWORD(reg);
efx_writed(efx, &reg, addr);
if (value == MC_STATUS_DWORD_ASSERT)
return -EINTR;
else
return -EIO;
}
/************************************************************************** /**************************************************************************
* *
...@@ -598,6 +685,7 @@ const struct efx_nic_type siena_a0_nic_type = { ...@@ -598,6 +685,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.reset = efx_mcdi_reset, .reset = efx_mcdi_reset,
.probe_port = efx_mcdi_port_probe, .probe_port = efx_mcdi_port_probe,
.remove_port = efx_mcdi_port_remove, .remove_port = efx_mcdi_port_remove,
.fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = siena_prepare_flush, .prepare_flush = siena_prepare_flush,
.finish_flush = siena_finish_flush, .finish_flush = siena_finish_flush,
.update_stats = siena_update_nic_stats, .update_stats = siena_update_nic_stats,
...@@ -613,6 +701,32 @@ const struct efx_nic_type siena_a0_nic_type = { ...@@ -613,6 +701,32 @@ const struct efx_nic_type siena_a0_nic_type = {
.resume_wol = siena_init_wol, .resume_wol = siena_init_wol,
.test_chip = siena_test_chip, .test_chip = siena_test_chip,
.test_nvram = efx_mcdi_nvram_test_all, .test_nvram = efx_mcdi_nvram_test_all,
.mcdi_request = siena_mcdi_request,
.mcdi_poll_response = siena_mcdi_poll_response,
.mcdi_read_response = siena_mcdi_read_response,
.mcdi_poll_reboot = siena_mcdi_poll_reboot,
.irq_enable_master = efx_farch_irq_enable_master,
.irq_test_generate = efx_farch_irq_test_generate,
.irq_disable_non_ev = efx_farch_irq_disable_master,
.irq_handle_msi = efx_farch_msi_interrupt,
.irq_handle_legacy = efx_farch_legacy_interrupt,
.tx_probe = efx_farch_tx_probe,
.tx_init = efx_farch_tx_init,
.tx_remove = efx_farch_tx_remove,
.tx_write = efx_farch_tx_write,
.rx_push_indir_table = efx_farch_rx_push_indir_table,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
.rx_write = efx_farch_rx_write,
.rx_defer_refill = efx_farch_rx_defer_refill,
.ev_probe = efx_farch_ev_probe,
.ev_init = efx_farch_ev_init,
.ev_fini = efx_farch_ev_fini,
.ev_remove = efx_farch_ev_remove,
.ev_process = efx_farch_ev_process,
.ev_read_ack = efx_farch_ev_read_ack,
.ev_test_generate = efx_farch_ev_test_generate,
.revision = EFX_REV_SIENA_A0, .revision = EFX_REV_SIENA_A0,
.mem_map_size = (FR_CZ_MC_TREG_SMEM + .mem_map_size = (FR_CZ_MC_TREG_SMEM +
...@@ -633,4 +747,5 @@ const struct efx_nic_type siena_a0_nic_type = { ...@@ -633,4 +747,5 @@ const struct efx_nic_type siena_a0_nic_type = {
.timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH, .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXHASH | NETIF_F_NTUPLE), NETIF_F_RXHASH | NETIF_F_NTUPLE),
.mcdi_max_ver = 1,
}; };
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#include "mcdi.h" #include "mcdi.h"
#include "filter.h" #include "filter.h"
#include "mcdi_pcol.h" #include "mcdi_pcol.h"
#include "regs.h" #include "farch_regs.h"
#include "vfdi.h" #include "vfdi.h"
/* Number of longs required to track all the VIs in a VF */ /* Number of longs required to track all the VIs in a VF */
...@@ -464,8 +464,9 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf) ...@@ -464,8 +464,9 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf)
VFDI_EV_SEQ, (vf->msg_seqno & 0xff), VFDI_EV_SEQ, (vf->msg_seqno & 0xff),
VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS); VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS);
++vf->msg_seqno; ++vf->msg_seqno;
efx_generate_event(efx, EFX_VI_BASE + vf->index * efx_vf_size(efx), efx_farch_generate_event(efx,
&event); EFX_VI_BASE + vf->index * efx_vf_size(efx),
&event);
} }
static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset, static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset,
...@@ -997,7 +998,7 @@ static void efx_sriov_reset_vf_work(struct work_struct *work) ...@@ -997,7 +998,7 @@ static void efx_sriov_reset_vf_work(struct work_struct *work)
struct efx_nic *efx = vf->efx; struct efx_nic *efx = vf->efx;
struct efx_buffer buf; struct efx_buffer buf;
if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE)) { if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) {
efx_sriov_reset_vf(vf, &buf); efx_sriov_reset_vf(vf, &buf);
efx_nic_free_buffer(efx, &buf); efx_nic_free_buffer(efx, &buf);
} }
...@@ -1241,7 +1242,8 @@ static int efx_sriov_vfs_init(struct efx_nic *efx) ...@@ -1241,7 +1242,8 @@ static int efx_sriov_vfs_init(struct efx_nic *efx)
pci_domain_nr(pci_dev->bus), pci_dev->bus->number, pci_domain_nr(pci_dev->bus), pci_dev->bus->number,
PCI_SLOT(devfn), PCI_FUNC(devfn)); PCI_SLOT(devfn), PCI_FUNC(devfn));
rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE); rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE,
GFP_KERNEL);
if (rc) if (rc)
goto fail; goto fail;
...@@ -1273,7 +1275,8 @@ int efx_sriov_init(struct efx_nic *efx) ...@@ -1273,7 +1275,8 @@ int efx_sriov_init(struct efx_nic *efx)
if (rc) if (rc)
goto fail_cmd; goto fail_cmd;
rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status)); rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status),
GFP_KERNEL);
if (rc) if (rc)
goto fail_status; goto fail_status;
vfdi_status = efx->vfdi_status.addr; vfdi_status = efx->vfdi_status.addr;
...@@ -1528,7 +1531,7 @@ void efx_sriov_reset(struct efx_nic *efx) ...@@ -1528,7 +1531,7 @@ void efx_sriov_reset(struct efx_nic *efx)
efx_sriov_usrev(efx, true); efx_sriov_usrev(efx, true);
(void)efx_sriov_cmd(efx, true, NULL, NULL); (void)efx_sriov_cmd(efx, true, NULL, NULL);
if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE)) if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO))
return; return;
for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) { for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
......
...@@ -543,10 +543,13 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) ...@@ -543,10 +543,13 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->initialised = true; tx_queue->initialised = true;
} }
void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
{ {
struct efx_tx_buffer *buffer; struct efx_tx_buffer *buffer;
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
"shutting down TX queue %d\n", tx_queue->queue);
if (!tx_queue->buffer) if (!tx_queue->buffer)
return; return;
...@@ -561,22 +564,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) ...@@ -561,22 +564,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
netdev_tx_reset_queue(tx_queue->core_txq); netdev_tx_reset_queue(tx_queue->core_txq);
} }
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
{
if (!tx_queue->initialised)
return;
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
"shutting down TX queue %d\n", tx_queue->queue);
tx_queue->initialised = false;
/* Flush TX queue, remove descriptor ring */
efx_nic_fini_tx(tx_queue);
efx_release_tx_buffers(tx_queue);
}
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
{ {
int i; int i;
...@@ -708,7 +695,8 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue, ...@@ -708,7 +695,8 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET; TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
if (unlikely(!page_buf->addr) && if (unlikely(!page_buf->addr) &&
efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE)) efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
GFP_ATOMIC))
return NULL; return NULL;
result = (u8 *)page_buf->addr + offset; result = (u8 *)page_buf->addr + offset;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment