Commit f878b995 authored by David S. Miller's avatar David S. Miller
parents 29e1846a 94b274bf
...@@ -673,7 +673,7 @@ static void efx_fini_channels(struct efx_nic *efx) ...@@ -673,7 +673,7 @@ static void efx_fini_channels(struct efx_nic *efx)
efx_for_each_channel_rx_queue(rx_queue, channel) efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fini_rx_queue(rx_queue); efx_fini_rx_queue(rx_queue);
efx_for_each_channel_tx_queue(tx_queue, channel) efx_for_each_possible_channel_tx_queue(tx_queue, channel)
efx_fini_tx_queue(tx_queue); efx_fini_tx_queue(tx_queue);
efx_fini_eventq(channel); efx_fini_eventq(channel);
} }
...@@ -689,7 +689,7 @@ static void efx_remove_channel(struct efx_channel *channel) ...@@ -689,7 +689,7 @@ static void efx_remove_channel(struct efx_channel *channel)
efx_for_each_channel_rx_queue(rx_queue, channel) efx_for_each_channel_rx_queue(rx_queue, channel)
efx_remove_rx_queue(rx_queue); efx_remove_rx_queue(rx_queue);
efx_for_each_channel_tx_queue(tx_queue, channel) efx_for_each_possible_channel_tx_queue(tx_queue, channel)
efx_remove_tx_queue(tx_queue); efx_remove_tx_queue(tx_queue);
efx_remove_eventq(channel); efx_remove_eventq(channel);
} }
...@@ -1271,21 +1271,8 @@ static void efx_remove_interrupts(struct efx_nic *efx) ...@@ -1271,21 +1271,8 @@ static void efx_remove_interrupts(struct efx_nic *efx)
static void efx_set_channels(struct efx_nic *efx) static void efx_set_channels(struct efx_nic *efx)
{ {
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
efx->tx_channel_offset = efx->tx_channel_offset =
separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
/* Channel pointers were set in efx_init_struct() but we now
* need to clear them for TX queues in any RX-only channels. */
efx_for_each_channel(channel, efx) {
if (channel->channel - efx->tx_channel_offset >=
efx->n_tx_channels) {
efx_for_each_channel_tx_queue(tx_queue, channel)
tx_queue->channel = NULL;
}
}
} }
static int efx_probe_nic(struct efx_nic *efx) static int efx_probe_nic(struct efx_nic *efx)
...@@ -1531,9 +1518,9 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, ...@@ -1531,9 +1518,9 @@ void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
efx->irq_rx_adaptive = rx_adaptive; efx->irq_rx_adaptive = rx_adaptive;
efx->irq_rx_moderation = rx_ticks; efx->irq_rx_moderation = rx_ticks;
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
if (efx_channel_get_rx_queue(channel)) if (efx_channel_has_rx_queue(channel))
channel->irq_moderation = rx_ticks; channel->irq_moderation = rx_ticks;
else if (efx_channel_get_tx_queue(channel, 0)) else if (efx_channel_has_tx_queues(channel))
channel->irq_moderation = tx_ticks; channel->irq_moderation = tx_ticks;
} }
} }
...@@ -1849,6 +1836,7 @@ static const struct net_device_ops efx_netdev_ops = { ...@@ -1849,6 +1836,7 @@ static const struct net_device_ops efx_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = efx_netpoll, .ndo_poll_controller = efx_netpoll,
#endif #endif
.ndo_setup_tc = efx_setup_tc,
}; };
static void efx_update_name(struct efx_nic *efx) static void efx_update_name(struct efx_nic *efx)
...@@ -1910,10 +1898,8 @@ static int efx_register_netdev(struct efx_nic *efx) ...@@ -1910,10 +1898,8 @@ static int efx_register_netdev(struct efx_nic *efx)
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
struct efx_tx_queue *tx_queue; struct efx_tx_queue *tx_queue;
efx_for_each_channel_tx_queue(tx_queue, channel) { efx_for_each_channel_tx_queue(tx_queue, channel)
tx_queue->core_txq = netdev_get_tx_queue( efx_init_tx_queue_core_txq(tx_queue);
efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES);
}
} }
/* Always start with carrier off; PHY events will detect the link */ /* Always start with carrier off; PHY events will detect the link */
...@@ -2401,7 +2387,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev, ...@@ -2401,7 +2387,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
int i, rc; int i, rc;
/* Allocate and initialise a struct net_device and struct efx_nic */ /* Allocate and initialise a struct net_device and struct efx_nic */
net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES); net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
EFX_MAX_RX_QUEUES);
if (!net_dev) if (!net_dev)
return -ENOMEM; return -ENOMEM;
net_dev->features |= (type->offload_features | NETIF_F_SG | net_dev->features |= (type->offload_features | NETIF_F_SG |
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue); extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
extern netdev_tx_t extern netdev_tx_t
...@@ -36,6 +37,7 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); ...@@ -36,6 +37,7 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
extern netdev_tx_t extern netdev_tx_t
efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
/* RX */ /* RX */
extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
......
...@@ -631,7 +631,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev, ...@@ -631,7 +631,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
/* Find lowest IRQ moderation across all used TX queues */ /* Find lowest IRQ moderation across all used TX queues */
coalesce->tx_coalesce_usecs_irq = ~((u32) 0); coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
if (!efx_channel_get_tx_queue(channel, 0)) if (!efx_channel_has_tx_queues(channel))
continue; continue;
if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
if (channel->channel < efx->n_rx_channels) if (channel->channel < efx->n_rx_channels)
...@@ -676,8 +676,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev, ...@@ -676,8 +676,8 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
/* If the channel is shared only allow RX parameters to be set */ /* If the channel is shared only allow RX parameters to be set */
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
if (efx_channel_get_rx_queue(channel) && if (efx_channel_has_rx_queue(channel) &&
efx_channel_get_tx_queue(channel, 0) && efx_channel_has_tx_queues(channel) &&
tx_usecs) { tx_usecs) {
netif_err(efx, drv, efx->net_dev, "Channel is shared. " netif_err(efx, drv, efx->net_dev, "Channel is shared. "
"Only RX coalescing may be set\n"); "Only RX coalescing may be set\n");
......
...@@ -63,10 +63,12 @@ ...@@ -63,10 +63,12 @@
/* Checksum generation is a per-queue option in hardware, so each /* Checksum generation is a per-queue option in hardware, so each
* queue visible to the networking core is backed by two hardware TX * queue visible to the networking core is backed by two hardware TX
* queues. */ * queues. */
#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS #define EFX_MAX_TX_TC 2
#define EFX_TXQ_TYPE_OFFLOAD 1 #define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
#define EFX_TXQ_TYPES 2 #define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */
#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES) #define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */
#define EFX_TXQ_TYPES 4
#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
/** /**
* struct efx_special_buffer - An Efx special buffer * struct efx_special_buffer - An Efx special buffer
...@@ -140,6 +142,7 @@ struct efx_tx_buffer { ...@@ -140,6 +142,7 @@ struct efx_tx_buffer {
* @buffer: The software buffer ring * @buffer: The software buffer ring
* @txd: The hardware descriptor ring * @txd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1. * @ptr_mask: The size of the ring minus 1.
* @initialised: Has hardware queue been initialised?
* @flushed: Used when handling queue flushing * @flushed: Used when handling queue flushing
* @read_count: Current read pointer. * @read_count: Current read pointer.
* This is the number of buffers that have been removed from both rings. * This is the number of buffers that have been removed from both rings.
...@@ -182,6 +185,7 @@ struct efx_tx_queue { ...@@ -182,6 +185,7 @@ struct efx_tx_queue {
struct efx_tx_buffer *buffer; struct efx_tx_buffer *buffer;
struct efx_special_buffer txd; struct efx_special_buffer txd;
unsigned int ptr_mask; unsigned int ptr_mask;
bool initialised;
enum efx_flush_state flushed; enum efx_flush_state flushed;
/* Members used mainly on the completion path */ /* Members used mainly on the completion path */
...@@ -377,7 +381,7 @@ struct efx_channel { ...@@ -377,7 +381,7 @@ struct efx_channel {
bool rx_pkt_csummed; bool rx_pkt_csummed;
struct efx_rx_queue rx_queue; struct efx_rx_queue rx_queue;
struct efx_tx_queue tx_queue[2]; struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
}; };
enum efx_led_mode { enum efx_led_mode {
...@@ -938,18 +942,40 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) ...@@ -938,18 +942,40 @@ efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
} }
static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
{
return channel->channel - channel->efx->tx_channel_offset <
channel->efx->n_tx_channels;
}
static inline struct efx_tx_queue * static inline struct efx_tx_queue *
efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
{ {
struct efx_tx_queue *tx_queue = channel->tx_queue; EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES); type >= EFX_TXQ_TYPES);
return tx_queue->channel ? tx_queue + type : NULL; return &channel->tx_queue[type];
}
static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
{
return !(tx_queue->efx->net_dev->num_tc < 2 &&
tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
} }
/* Iterate over all TX queues belonging to a channel */ /* Iterate over all TX queues belonging to a channel */
#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ #define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
for (_tx_queue = efx_channel_get_tx_queue(channel, 0); \ if (!efx_channel_has_tx_queues(_channel)) \
_tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ ; \
else \
for (_tx_queue = (_channel)->tx_queue; \
_tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
efx_tx_queue_used(_tx_queue); \
_tx_queue++)
/* Iterate over all possible TX queues belonging to a channel */
#define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
for (_tx_queue = (_channel)->tx_queue; \
_tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
_tx_queue++) _tx_queue++)
static inline struct efx_rx_queue * static inline struct efx_rx_queue *
...@@ -959,18 +985,26 @@ efx_get_rx_queue(struct efx_nic *efx, unsigned index) ...@@ -959,18 +985,26 @@ efx_get_rx_queue(struct efx_nic *efx, unsigned index)
return &efx->channel[index]->rx_queue; return &efx->channel[index]->rx_queue;
} }
static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
{
return channel->channel < channel->efx->n_rx_channels;
}
static inline struct efx_rx_queue * static inline struct efx_rx_queue *
efx_channel_get_rx_queue(struct efx_channel *channel) efx_channel_get_rx_queue(struct efx_channel *channel)
{ {
return channel->channel < channel->efx->n_rx_channels ? EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
&channel->rx_queue : NULL; return &channel->rx_queue;
} }
/* Iterate over all RX queues belonging to a channel */ /* Iterate over all RX queues belonging to a channel */
#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ #define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
for (_rx_queue = efx_channel_get_rx_queue(channel); \ if (!efx_channel_has_rx_queue(_channel)) \
_rx_queue; \ ; \
_rx_queue = NULL) else \
for (_rx_queue = &(_channel)->rx_queue; \
_rx_queue; \
_rx_queue = NULL)
static inline struct efx_channel * static inline struct efx_channel *
efx_rx_queue_channel(struct efx_rx_queue *rx_queue) efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
......
...@@ -445,8 +445,8 @@ int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) ...@@ -445,8 +445,8 @@ int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
void efx_nic_init_tx(struct efx_tx_queue *tx_queue) void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
{ {
efx_oword_t tx_desc_ptr;
struct efx_nic *efx = tx_queue->efx; struct efx_nic *efx = tx_queue->efx;
efx_oword_t reg;
tx_queue->flushed = FLUSH_NONE; tx_queue->flushed = FLUSH_NONE;
...@@ -454,7 +454,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue) ...@@ -454,7 +454,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
efx_init_special_buffer(efx, &tx_queue->txd); efx_init_special_buffer(efx, &tx_queue->txd);
/* Push TX descriptor ring to card */ /* Push TX descriptor ring to card */
EFX_POPULATE_OWORD_10(tx_desc_ptr, EFX_POPULATE_OWORD_10(reg,
FRF_AZ_TX_DESCQ_EN, 1, FRF_AZ_TX_DESCQ_EN, 1,
FRF_AZ_TX_ISCSI_DDIG_EN, 0, FRF_AZ_TX_ISCSI_DDIG_EN, 0,
FRF_AZ_TX_ISCSI_HDIG_EN, 0, FRF_AZ_TX_ISCSI_HDIG_EN, 0,
...@@ -470,17 +470,15 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue) ...@@ -470,17 +470,15 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
!csum); !csum);
} }
efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
tx_queue->queue); tx_queue->queue);
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
efx_oword_t reg;
/* Only 128 bits in this register */ /* Only 128 bits in this register */
BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
...@@ -491,6 +489,16 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue) ...@@ -491,6 +489,16 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
set_bit_le(tx_queue->queue, (void *)&reg); set_bit_le(tx_queue->queue, (void *)&reg);
efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG); efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
} }
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
EFX_POPULATE_OWORD_1(reg,
FRF_BZ_TX_PACE,
(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
FFE_BZ_TX_PACE_OFF :
FFE_BZ_TX_PACE_RESERVED);
efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
tx_queue->queue);
}
} }
static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
...@@ -1238,8 +1246,10 @@ int efx_nic_flush_queues(struct efx_nic *efx) ...@@ -1238,8 +1246,10 @@ int efx_nic_flush_queues(struct efx_nic *efx)
/* Flush all tx queues in parallel */ /* Flush all tx queues in parallel */
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel) efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
efx_flush_tx_queue(tx_queue); if (tx_queue->initialised)
efx_flush_tx_queue(tx_queue);
}
} }
/* The hardware supports four concurrent rx flushes, each of which may /* The hardware supports four concurrent rx flushes, each of which may
...@@ -1262,8 +1272,9 @@ int efx_nic_flush_queues(struct efx_nic *efx) ...@@ -1262,8 +1272,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
++rx_pending; ++rx_pending;
} }
} }
efx_for_each_channel_tx_queue(tx_queue, channel) { efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
if (tx_queue->flushed != FLUSH_DONE) if (tx_queue->initialised &&
tx_queue->flushed != FLUSH_DONE)
++tx_pending; ++tx_pending;
} }
} }
...@@ -1278,8 +1289,9 @@ int efx_nic_flush_queues(struct efx_nic *efx) ...@@ -1278,8 +1289,9 @@ int efx_nic_flush_queues(struct efx_nic *efx)
/* Mark the queues as all flushed. We're going to return failure /* Mark the queues as all flushed. We're going to return failure
* leading to a reset, or fake up success anyway */ * leading to a reset, or fake up success anyway */
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel) { efx_for_each_possible_channel_tx_queue(tx_queue, channel) {
if (tx_queue->flushed != FLUSH_DONE) if (tx_queue->initialised &&
tx_queue->flushed != FLUSH_DONE)
netif_err(efx, hw, efx->net_dev, netif_err(efx, hw, efx->net_dev,
"tx queue %d flush command timed out\n", "tx queue %d flush command timed out\n",
tx_queue->queue); tx_queue->queue);
...@@ -1682,6 +1694,19 @@ void efx_nic_init_common(struct efx_nic *efx) ...@@ -1682,6 +1694,19 @@ void efx_nic_init_common(struct efx_nic *efx)
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
EFX_POPULATE_OWORD_4(temp,
/* Default values */
FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
FRF_BZ_TX_PACE_SB_AF, 0xb,
FRF_BZ_TX_PACE_FB_BASE, 0,
/* Allow large pace values in the
* fast bin. */
FRF_BZ_TX_PACE_BIN_TH,
FFE_BZ_TX_PACE_RESERVED);
efx_writeo(efx, &temp, FR_BZ_TX_PACE);
}
} }
/* Register dump */ /* Register dump */
......
...@@ -2907,6 +2907,12 @@ ...@@ -2907,6 +2907,12 @@
#define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44 #define FRF_CZ_TMFT_SRC_MAC_HI_LBN 44
#define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16 #define FRF_CZ_TMFT_SRC_MAC_HI_WIDTH 16
/* TX_PACE_TBL */
/* Values >20 are documented as reserved, but will result in a queue going
* into the fast bin with a pace value of zero. */
#define FFE_BZ_TX_PACE_OFF 0
#define FFE_BZ_TX_PACE_RESERVED 21
/* DRIVER_EV */ /* DRIVER_EV */
/* Sub-fields of an RX flush completion event */ /* Sub-fields of an RX flush completion event */
#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12 #define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
......
...@@ -644,7 +644,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, ...@@ -644,7 +644,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
goto out; goto out;
} }
/* Test both types of TX queue */ /* Test all enabled types of TX queue */
efx_for_each_channel_tx_queue(tx_queue, channel) { efx_for_each_channel_tx_queue(tx_queue, channel) {
state->offload_csum = (tx_queue->queue & state->offload_csum = (tx_queue->queue &
EFX_TXQ_TYPE_OFFLOAD); EFX_TXQ_TYPE_OFFLOAD);
......
...@@ -336,17 +336,91 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, ...@@ -336,17 +336,91 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
struct efx_tx_queue *tx_queue; struct efx_tx_queue *tx_queue;
unsigned index, type;
if (unlikely(efx->port_inhibited)) if (unlikely(efx->port_inhibited))
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
tx_queue = efx_get_tx_queue(efx, skb_get_queue_mapping(skb), index = skb_get_queue_mapping(skb);
skb->ip_summed == CHECKSUM_PARTIAL ? type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
EFX_TXQ_TYPE_OFFLOAD : 0); if (index >= efx->n_tx_channels) {
index -= efx->n_tx_channels;
type |= EFX_TXQ_TYPE_HIGHPRI;
}
tx_queue = efx_get_tx_queue(efx, index, type);
return efx_enqueue_skb(tx_queue, skb); return efx_enqueue_skb(tx_queue, skb);
} }
void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
{
struct efx_nic *efx = tx_queue->efx;
/* Must be inverse of queue lookup in efx_hard_start_xmit() */
tx_queue->core_txq =
netdev_get_tx_queue(efx->net_dev,
tx_queue->queue / EFX_TXQ_TYPES +
((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
efx->n_tx_channels : 0));
}
int efx_setup_tc(struct net_device *net_dev, u8 num_tc)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
unsigned tc;
int rc;
if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC)
return -EINVAL;
if (num_tc == net_dev->num_tc)
return 0;
for (tc = 0; tc < num_tc; tc++) {
net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
}
if (num_tc > net_dev->num_tc) {
/* Initialise high-priority queues as necessary */
efx_for_each_channel(channel, efx) {
efx_for_each_possible_channel_tx_queue(tx_queue,
channel) {
if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
continue;
if (!tx_queue->buffer) {
rc = efx_probe_tx_queue(tx_queue);
if (rc)
return rc;
}
if (!tx_queue->initialised)
efx_init_tx_queue(tx_queue);
efx_init_tx_queue_core_txq(tx_queue);
}
}
} else {
/* Reduce number of classes before number of queues */
net_dev->num_tc = num_tc;
}
rc = netif_set_real_num_tx_queues(net_dev,
max_t(int, num_tc, 1) *
efx->n_tx_channels);
if (rc)
return rc;
/* Do not destroy high-priority queues when they become
* unused. We would have to flush them first, and it is
* fairly difficult to flush a subset of TX queues. Leave
* it to efx_fini_channels().
*/
net_dev->num_tc = num_tc;
return 0;
}
void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{ {
unsigned fill_level; unsigned fill_level;
...@@ -430,6 +504,8 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue) ...@@ -430,6 +504,8 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
/* Set up TX descriptor ring */ /* Set up TX descriptor ring */
efx_nic_init_tx(tx_queue); efx_nic_init_tx(tx_queue);
tx_queue->initialised = true;
} }
void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
...@@ -452,9 +528,14 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) ...@@ -452,9 +528,14 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
{ {
if (!tx_queue->initialised)
return;
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
"shutting down TX queue %d\n", tx_queue->queue); "shutting down TX queue %d\n", tx_queue->queue);
tx_queue->initialised = false;
/* Flush TX queue, remove descriptor ring */ /* Flush TX queue, remove descriptor ring */
efx_nic_fini_tx(tx_queue); efx_nic_fini_tx(tx_queue);
...@@ -466,6 +547,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) ...@@ -466,6 +547,9 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
{ {
if (!tx_queue->buffer)
return;
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
"destroying TX queue %d\n", tx_queue->queue); "destroying TX queue %d\n", tx_queue->queue);
efx_nic_remove_tx(tx_queue); efx_nic_remove_tx(tx_queue);
......
...@@ -1648,7 +1648,8 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) ...@@ -1648,7 +1648,8 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
if (txq < 1 || txq > dev->num_tx_queues) if (txq < 1 || txq > dev->num_tx_queues)
return -EINVAL; return -EINVAL;
if (dev->reg_state == NETREG_REGISTERED) { if (dev->reg_state == NETREG_REGISTERED ||
dev->reg_state == NETREG_UNREGISTERING) {
ASSERT_RTNL(); ASSERT_RTNL();
rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues, rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
......
...@@ -29,18 +29,18 @@ static void mqprio_destroy(struct Qdisc *sch) ...@@ -29,18 +29,18 @@ static void mqprio_destroy(struct Qdisc *sch)
struct mqprio_sched *priv = qdisc_priv(sch); struct mqprio_sched *priv = qdisc_priv(sch);
unsigned int ntx; unsigned int ntx;
if (!priv->qdiscs) if (priv->qdiscs) {
return; for (ntx = 0;
ntx < dev->num_tx_queues && priv->qdiscs[ntx];
for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++) ntx++)
qdisc_destroy(priv->qdiscs[ntx]); qdisc_destroy(priv->qdiscs[ntx]);
kfree(priv->qdiscs);
}
if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc) if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc)
dev->netdev_ops->ndo_setup_tc(dev, 0); dev->netdev_ops->ndo_setup_tc(dev, 0);
else else
netdev_set_num_tc(dev, 0); netdev_set_num_tc(dev, 0);
kfree(priv->qdiscs);
} }
static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt) static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment