Commit 64235187 authored by David Riddoch's avatar David Riddoch Committed by Ben Hutchings

sfc: By default refill RX rings as soon as space for a batch

Previously we refilled with much larger batches, which caused large latency
spikes.  We now have many more much much smaller spikes!
Signed-off-by: default avatarBen Hutchings <bhutchings@solarflare.com>
parent da9ca505
...@@ -76,7 +76,7 @@ static int rx_alloc_method = RX_ALLOC_METHOD_AUTO; ...@@ -76,7 +76,7 @@ static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
/* This is the percentage fill level below which new RX descriptors /* This is the percentage fill level below which new RX descriptors
* will be added to the RX descriptor ring. * will be added to the RX descriptor ring.
*/ */
static unsigned int rx_refill_threshold = 90; static unsigned int rx_refill_threshold;
/* /*
* RX maximum head room required. * RX maximum head room required.
...@@ -363,8 +363,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) ...@@ -363,8 +363,7 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
} }
space = rx_queue->max_fill - fill_level; space = rx_queue->max_fill - fill_level;
if (space < EFX_RX_BATCH) EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH);
goto out;
netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
"RX queue %d fast-filling descriptor ring from" "RX queue %d fast-filling descriptor ring from"
...@@ -676,7 +675,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) ...@@ -676,7 +675,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
void efx_init_rx_queue(struct efx_rx_queue *rx_queue) void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
{ {
struct efx_nic *efx = rx_queue->efx; struct efx_nic *efx = rx_queue->efx;
unsigned int max_fill, trigger; unsigned int max_fill, trigger, max_trigger;
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"initialising RX queue %d\n", efx_rx_queue_index(rx_queue)); "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
...@@ -689,7 +688,14 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue) ...@@ -689,7 +688,14 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
/* Initialise limit fields */ /* Initialise limit fields */
max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; max_trigger = max_fill - EFX_RX_BATCH;
if (rx_refill_threshold != 0) {
trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
if (trigger > max_trigger)
trigger = max_trigger;
} else {
trigger = max_trigger;
}
rx_queue->max_fill = max_fill; rx_queue->max_fill = max_fill;
rx_queue->fast_fill_trigger = trigger; rx_queue->fast_fill_trigger = trigger;
...@@ -739,5 +745,5 @@ MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers"); ...@@ -739,5 +745,5 @@ MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
module_param(rx_refill_threshold, uint, 0444); module_param(rx_refill_threshold, uint, 0444);
MODULE_PARM_DESC(rx_refill_threshold, MODULE_PARM_DESC(rx_refill_threshold,
"RX descriptor ring fast/slow fill threshold (%)"); "RX descriptor ring refill threshold (%)");
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment