Commit a81dcd85 authored by Edward Cree's avatar Edward Cree Committed by David S. Miller

sfc: assign TXQs without gaps

Since we only allocate VIs for the number of TXQs we actually need, we
 cannot naively use "channel * TXQ_TYPES + txq" for the TXQ number, as
 this has gaps (when efx->tx_queues_per_channel < EFX_TXQ_TYPES) and
 thus overruns the driver's VI allocations, causing the firmware to
 reject the MC_CMD_INIT_TXQ based on INSTANCE.
Thus, we distinguish INSTANCE (stored in tx_queue->queue) from LABEL
 (tx_queue->label); the former is allocated starting from 0 in
 efx_set_channels(), while the latter is simply the txq type (index in
 channel->tx_queue array).
To simplify things, rather than changing tx_queues_per_channel after
 setting up TXQs, make Siena always probe its HIGHPRI queues at start
 of day, rather than deferring it until tc mqprio enables them.
Signed-off-by: default avatarEdward Cree <ecree@solarflare.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 69a70496
...@@ -2244,7 +2244,7 @@ static u32 efx_ef10_tso_versions(struct efx_nic *efx) ...@@ -2244,7 +2244,7 @@ static u32 efx_ef10_tso_versions(struct efx_nic *efx)
static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue) static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
{ {
bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; bool csum_offload = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD;
struct efx_channel *channel = tx_queue->channel; struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx; struct efx_nic *efx = tx_queue->efx;
struct efx_ef10_nic_data *nic_data; struct efx_ef10_nic_data *nic_data;
......
...@@ -524,7 +524,8 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) ...@@ -524,7 +524,8 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
for (j = 0; j < EFX_TXQ_TYPES; j++) { for (j = 0; j < EFX_TXQ_TYPES; j++) {
tx_queue = &channel->tx_queue[j]; tx_queue = &channel->tx_queue[j];
tx_queue->efx = efx; tx_queue->efx = efx;
tx_queue->queue = i * EFX_TXQ_TYPES + j; tx_queue->queue = -1;
tx_queue->label = j;
tx_queue->channel = channel; tx_queue->channel = channel;
} }
...@@ -853,8 +854,9 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) ...@@ -853,8 +854,9 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
int efx_set_channels(struct efx_nic *efx) int efx_set_channels(struct efx_nic *efx)
{ {
struct efx_channel *channel;
struct efx_tx_queue *tx_queue; struct efx_tx_queue *tx_queue;
struct efx_channel *channel;
unsigned int next_queue = 0;
int xdp_queue_number; int xdp_queue_number;
int rc; int rc;
...@@ -884,14 +886,30 @@ int efx_set_channels(struct efx_nic *efx) ...@@ -884,14 +886,30 @@ int efx_set_channels(struct efx_nic *efx)
else else
channel->rx_queue.core_index = -1; channel->rx_queue.core_index = -1;
efx_for_each_channel_tx_queue(tx_queue, channel) { if (channel->channel >= efx->tx_channel_offset) {
tx_queue->queue -= (efx->tx_channel_offset * if (efx_channel_is_xdp_tx(channel)) {
EFX_TXQ_TYPES); efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->queue = next_queue++;
if (efx_channel_is_xdp_tx(channel) && netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
xdp_queue_number < efx->xdp_tx_queue_count) { channel->channel, tx_queue->label,
efx->xdp_tx_queues[xdp_queue_number] = tx_queue; xdp_queue_number, tx_queue->queue);
xdp_queue_number++; /* We may have a few left-over XDP TX
* queues owing to xdp_tx_queue_count
* not dividing evenly by EFX_TXQ_TYPES.
* We still allocate and probe those
* TXQs, but never use them.
*/
if (xdp_queue_number < efx->xdp_tx_queue_count)
efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
xdp_queue_number++;
}
} else {
efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->queue = next_queue++;
netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is HW %u\n",
channel->channel, tx_queue->label,
tx_queue->queue);
}
} }
} }
} }
......
...@@ -287,8 +287,7 @@ static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data, ...@@ -287,8 +287,7 @@ static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
} }
#define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel #define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
#define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->label
#define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
#define EFX_LOOPBACK_NAME(_mode, _counter) \ #define EFX_LOOPBACK_NAME(_mode, _counter) \
"loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode) "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
...@@ -316,11 +315,11 @@ static int efx_fill_loopback_test(struct efx_nic *efx, ...@@ -316,11 +315,11 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
efx_for_each_channel_tx_queue(tx_queue, channel) { efx_for_each_channel_tx_queue(tx_queue, channel) {
efx_fill_test(test_index++, strings, data, efx_fill_test(test_index++, strings, data,
&lb_tests->tx_sent[tx_queue->queue], &lb_tests->tx_sent[tx_queue->label],
EFX_TX_QUEUE_NAME(tx_queue), EFX_TX_QUEUE_NAME(tx_queue),
EFX_LOOPBACK_NAME(mode, "tx_sent")); EFX_LOOPBACK_NAME(mode, "tx_sent"));
efx_fill_test(test_index++, strings, data, efx_fill_test(test_index++, strings, data,
&lb_tests->tx_done[tx_queue->queue], &lb_tests->tx_done[tx_queue->label],
EFX_TX_QUEUE_NAME(tx_queue), EFX_TX_QUEUE_NAME(tx_queue),
EFX_LOOPBACK_NAME(mode, "tx_done")); EFX_LOOPBACK_NAME(mode, "tx_done"));
} }
......
...@@ -379,7 +379,7 @@ int efx_farch_tx_probe(struct efx_tx_queue *tx_queue) ...@@ -379,7 +379,7 @@ int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
void efx_farch_tx_init(struct efx_tx_queue *tx_queue) void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
{ {
int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; int csum = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD;
struct efx_nic *efx = tx_queue->efx; struct efx_nic *efx = tx_queue->efx;
efx_oword_t reg; efx_oword_t reg;
...@@ -395,7 +395,7 @@ void efx_farch_tx_init(struct efx_tx_queue *tx_queue) ...@@ -395,7 +395,7 @@ void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
FRF_AZ_TX_DESCQ_EVQ_ID, FRF_AZ_TX_DESCQ_EVQ_ID,
tx_queue->channel->channel, tx_queue->channel->channel,
FRF_AZ_TX_DESCQ_OWNER_ID, 0, FRF_AZ_TX_DESCQ_OWNER_ID, 0,
FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, FRF_AZ_TX_DESCQ_LABEL, tx_queue->label,
FRF_AZ_TX_DESCQ_SIZE, FRF_AZ_TX_DESCQ_SIZE,
__ffs(tx_queue->txd.entries), __ffs(tx_queue->txd.entries),
FRF_AZ_TX_DESCQ_TYPE, 0, FRF_AZ_TX_DESCQ_TYPE, 0,
...@@ -409,7 +409,7 @@ void efx_farch_tx_init(struct efx_tx_queue *tx_queue) ...@@ -409,7 +409,7 @@ void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
EFX_POPULATE_OWORD_1(reg, EFX_POPULATE_OWORD_1(reg,
FRF_BZ_TX_PACE, FRF_BZ_TX_PACE,
(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? (tx_queue->label & EFX_TXQ_TYPE_HIGHPRI) ?
FFE_BZ_TX_PACE_OFF : FFE_BZ_TX_PACE_OFF :
FFE_BZ_TX_PACE_RESERVED); FFE_BZ_TX_PACE_RESERVED);
efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL, tx_queue->queue); efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL, tx_queue->queue);
......
...@@ -164,7 +164,7 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2) ...@@ -164,7 +164,7 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2)
{ {
MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 / MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
EFX_BUF_SIZE)); EFX_BUF_SIZE));
bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; bool csum_offload = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD;
size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE; size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
struct efx_channel *channel = tx_queue->channel; struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx; struct efx_nic *efx = tx_queue->efx;
...@@ -176,7 +176,7 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2) ...@@ -176,7 +176,7 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2)
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->label);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, efx->vport_id); MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, efx->vport_id);
......
...@@ -189,6 +189,8 @@ struct efx_tx_buffer { ...@@ -189,6 +189,8 @@ struct efx_tx_buffer {
* *
* @efx: The associated Efx NIC * @efx: The associated Efx NIC
* @queue: DMA queue number * @queue: DMA queue number
* @label: Label for TX completion events.
* Is our index within @channel->tx_queue array.
* @tso_version: Version of TSO in use for this queue. * @tso_version: Version of TSO in use for this queue.
* @channel: The associated channel * @channel: The associated channel
* @core_txq: The networking core TX queue structure * @core_txq: The networking core TX queue structure
...@@ -250,7 +252,8 @@ struct efx_tx_buffer { ...@@ -250,7 +252,8 @@ struct efx_tx_buffer {
struct efx_tx_queue { struct efx_tx_queue {
/* Members which don't change on the fast path */ /* Members which don't change on the fast path */
struct efx_nic *efx ____cacheline_aligned_in_smp; struct efx_nic *efx ____cacheline_aligned_in_smp;
unsigned queue; unsigned int queue;
unsigned int label;
unsigned int tso_version; unsigned int tso_version;
struct efx_channel *channel; struct efx_channel *channel;
struct netdev_queue *core_txq; struct netdev_queue *core_txq;
......
...@@ -90,7 +90,7 @@ static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue) ...@@ -90,7 +90,7 @@ static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue)
/* XXX is this a thing on EF100? */ /* XXX is this a thing on EF100? */
static inline struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) static inline struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
{ {
if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) if (tx_queue->label & EFX_TXQ_TYPE_OFFLOAD)
return tx_queue - EFX_TXQ_TYPE_OFFLOAD; return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
else else
return tx_queue + EFX_TXQ_TYPE_OFFLOAD; return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
......
...@@ -445,7 +445,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue) ...@@ -445,7 +445,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
if (rc != NETDEV_TX_OK) { if (rc != NETDEV_TX_OK) {
netif_err(efx, drv, efx->net_dev, netif_err(efx, drv, efx->net_dev,
"TX queue %d could not transmit packet %d of " "TX queue %d could not transmit packet %d of "
"%d in %s loopback test\n", tx_queue->queue, "%d in %s loopback test\n", tx_queue->label,
i + 1, state->packet_count, i + 1, state->packet_count,
LOOPBACK_MODE(efx)); LOOPBACK_MODE(efx));
...@@ -497,7 +497,7 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue, ...@@ -497,7 +497,7 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
netif_err(efx, drv, efx->net_dev, netif_err(efx, drv, efx->net_dev,
"TX queue %d saw only %d out of an expected %d " "TX queue %d saw only %d out of an expected %d "
"TX completion events in %s loopback test\n", "TX completion events in %s loopback test\n",
tx_queue->queue, tx_done, state->packet_count, tx_queue->label, tx_done, state->packet_count,
LOOPBACK_MODE(efx)); LOOPBACK_MODE(efx));
rc = -ETIMEDOUT; rc = -ETIMEDOUT;
/* Allow to fall through so we see the RX errors as well */ /* Allow to fall through so we see the RX errors as well */
...@@ -508,15 +508,15 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue, ...@@ -508,15 +508,15 @@ static int efx_end_loopback(struct efx_tx_queue *tx_queue,
netif_dbg(efx, drv, efx->net_dev, netif_dbg(efx, drv, efx->net_dev,
"TX queue %d saw only %d out of an expected %d " "TX queue %d saw only %d out of an expected %d "
"received packets in %s loopback test\n", "received packets in %s loopback test\n",
tx_queue->queue, rx_good, state->packet_count, tx_queue->label, rx_good, state->packet_count,
LOOPBACK_MODE(efx)); LOOPBACK_MODE(efx));
rc = -ETIMEDOUT; rc = -ETIMEDOUT;
/* Fall through */ /* Fall through */
} }
/* Update loopback test structure */ /* Update loopback test structure */
lb_tests->tx_sent[tx_queue->queue] += state->packet_count; lb_tests->tx_sent[tx_queue->label] += state->packet_count;
lb_tests->tx_done[tx_queue->queue] += tx_done; lb_tests->tx_done[tx_queue->label] += tx_done;
lb_tests->rx_good += rx_good; lb_tests->rx_good += rx_good;
lb_tests->rx_bad += rx_bad; lb_tests->rx_bad += rx_bad;
...@@ -542,8 +542,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue, ...@@ -542,8 +542,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
state->flush = false; state->flush = false;
netif_dbg(efx, drv, efx->net_dev, netif_dbg(efx, drv, efx->net_dev,
"TX queue %d testing %s loopback with %d packets\n", "TX queue %d (hw %d) testing %s loopback with %d packets\n",
tx_queue->queue, LOOPBACK_MODE(efx), tx_queue->label, tx_queue->queue, LOOPBACK_MODE(efx),
state->packet_count); state->packet_count);
efx_iterate_state(efx); efx_iterate_state(efx);
...@@ -570,7 +570,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue, ...@@ -570,7 +570,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
netif_dbg(efx, drv, efx->net_dev, netif_dbg(efx, drv, efx->net_dev,
"TX queue %d passed %s loopback test with a burst length " "TX queue %d passed %s loopback test with a burst length "
"of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), "of %d packets\n", tx_queue->label, LOOPBACK_MODE(efx),
state->packet_count); state->packet_count);
return 0; return 0;
...@@ -660,7 +660,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, ...@@ -660,7 +660,7 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
/* Test all enabled types of TX queue */ /* Test all enabled types of TX queue */
efx_for_each_channel_tx_queue(tx_queue, channel) { efx_for_each_channel_tx_queue(tx_queue, channel) {
state->offload_csum = (tx_queue->queue & state->offload_csum = (tx_queue->label &
EFX_TXQ_TYPE_OFFLOAD); EFX_TXQ_TYPE_OFFLOAD);
rc = efx_test_loopback(tx_queue, rc = efx_test_loopback(tx_queue,
&tests->loopback[mode]); &tests->loopback[mode]);
......
...@@ -279,7 +279,7 @@ static int siena_probe_nic(struct efx_nic *efx) ...@@ -279,7 +279,7 @@ static int siena_probe_nic(struct efx_nic *efx)
efx->max_channels = EFX_MAX_CHANNELS; efx->max_channels = EFX_MAX_CHANNELS;
efx->max_vis = EFX_MAX_CHANNELS; efx->max_vis = EFX_MAX_CHANNELS;
efx->max_tx_channels = EFX_MAX_CHANNELS; efx->max_tx_channels = EFX_MAX_CHANNELS;
efx->tx_queues_per_channel = 2; efx->tx_queues_per_channel = 4;
efx_reado(efx, &reg, FR_AZ_CS_DEBUG); efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
......
...@@ -551,8 +551,8 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) ...@@ -551,8 +551,8 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
/* Must be inverse of queue lookup in efx_hard_start_xmit() */ /* Must be inverse of queue lookup in efx_hard_start_xmit() */
tx_queue->core_txq = tx_queue->core_txq =
netdev_get_tx_queue(efx->net_dev, netdev_get_tx_queue(efx->net_dev,
tx_queue->queue / EFX_TXQ_TYPES + tx_queue->channel->channel +
((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? ((tx_queue->label & EFX_TXQ_TYPE_HIGHPRI) ?
efx->n_tx_channels : 0)); efx->n_tx_channels : 0));
} }
...@@ -561,10 +561,7 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, ...@@ -561,10 +561,7 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
{ {
struct efx_nic *efx = netdev_priv(net_dev); struct efx_nic *efx = netdev_priv(net_dev);
struct tc_mqprio_qopt *mqprio = type_data; struct tc_mqprio_qopt *mqprio = type_data;
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
unsigned tc, num_tc; unsigned tc, num_tc;
int rc;
if (type != TC_SETUP_QDISC_MQPRIO) if (type != TC_SETUP_QDISC_MQPRIO)
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -588,40 +585,9 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type, ...@@ -588,40 +585,9 @@ int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
net_dev->tc_to_txq[tc].count = efx->n_tx_channels; net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
} }
if (num_tc > net_dev->num_tc) {
efx->tx_queues_per_channel = 4;
/* Initialise high-priority queues as necessary */
efx_for_each_channel(channel, efx) {
efx_for_each_channel_tx_queue(tx_queue, channel) {
if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI))
continue;
if (!tx_queue->buffer) {
rc = efx_probe_tx_queue(tx_queue);
if (rc)
return rc;
}
if (!tx_queue->initialised)
efx_init_tx_queue(tx_queue);
efx_init_tx_queue_core_txq(tx_queue);
}
}
} else {
/* Reduce number of classes before number of queues */
net_dev->num_tc = num_tc;
}
rc = netif_set_real_num_tx_queues(net_dev,
max_t(int, num_tc, 1) *
efx->n_tx_channels);
if (rc)
return rc;
/* Do not destroy high-priority queues when they become
* unused. We would have to flush them first, and it is
* fairly difficult to flush a subset of TX queues. Leave
* it to efx_fini_channels().
*/
net_dev->num_tc = num_tc; net_dev->num_tc = num_tc;
return 0;
return netif_set_real_num_tx_queues(net_dev,
max_t(int, num_tc, 1) *
efx->n_tx_channels);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment