Commit e679198b authored by David S. Miller's avatar David S. Miller

Merge branch 'gve-improvements'

Jeroen de Borst says:

====================
gve: minor code and performance improvements

This patchset contains a number of independent minor code and performance
improvements.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ce8bd03c 1b4d1c9b
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#define GVE_MIN_MSIX 3 #define GVE_MIN_MSIX 3
/* Numbers of gve tx/rx stats in stats report. */ /* Numbers of gve tx/rx stats in stats report. */
#define GVE_TX_STATS_REPORT_NUM 5 #define GVE_TX_STATS_REPORT_NUM 6
#define GVE_RX_STATS_REPORT_NUM 2 #define GVE_RX_STATS_REPORT_NUM 2
/* Interval to schedule a stats report update, 20000ms. */ /* Interval to schedule a stats report update, 20000ms. */
...@@ -341,8 +341,8 @@ struct gve_tx_ring { ...@@ -341,8 +341,8 @@ struct gve_tx_ring {
union { union {
/* GQI fields */ /* GQI fields */
struct { struct {
/* NIC tail pointer */ /* Spinlock for when cleanup in progress */
__be32 last_nic_done; spinlock_t clean_lock;
}; };
/* DQO fields. */ /* DQO fields. */
...@@ -413,7 +413,9 @@ struct gve_tx_ring { ...@@ -413,7 +413,9 @@ struct gve_tx_ring {
u32 q_num ____cacheline_aligned; /* queue idx */ u32 q_num ____cacheline_aligned; /* queue idx */
u32 stop_queue; /* count of queue stops */ u32 stop_queue; /* count of queue stops */
u32 wake_queue; /* count of queue wakes */ u32 wake_queue; /* count of queue wakes */
u32 queue_timeout; /* count of queue timeouts */
u32 ntfy_id; /* notification block index */ u32 ntfy_id; /* notification block index */
u32 last_kick_msec; /* Last time the queue was kicked */
dma_addr_t bus; /* dma address of the descr ring */ dma_addr_t bus; /* dma address of the descr ring */
dma_addr_t q_resources_bus; /* dma address of the queue resources */ dma_addr_t q_resources_bus; /* dma address of the queue resources */
dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */ dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
...@@ -821,15 +823,15 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev); ...@@ -821,15 +823,15 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
bool gve_tx_poll(struct gve_notify_block *block, int budget); bool gve_tx_poll(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings(struct gve_priv *priv); int gve_tx_alloc_rings(struct gve_priv *priv);
void gve_tx_free_rings_gqi(struct gve_priv *priv); void gve_tx_free_rings_gqi(struct gve_priv *priv);
__be32 gve_tx_load_event_counter(struct gve_priv *priv, u32 gve_tx_load_event_counter(struct gve_priv *priv,
struct gve_tx_ring *tx); struct gve_tx_ring *tx);
bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
/* rx handling */ /* rx handling */
void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx); void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
bool gve_rx_poll(struct gve_notify_block *block, int budget); int gve_rx_poll(struct gve_notify_block *block, int budget);
bool gve_rx_work_pending(struct gve_rx_ring *rx);
int gve_rx_alloc_rings(struct gve_priv *priv); int gve_rx_alloc_rings(struct gve_priv *priv);
void gve_rx_free_rings_gqi(struct gve_priv *priv); void gve_rx_free_rings_gqi(struct gve_priv *priv);
bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
netdev_features_t feat);
/* Reset */ /* Reset */
void gve_schedule_reset(struct gve_priv *priv); void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown); int gve_reset(struct gve_priv *priv, bool attempt_teardown);
......
...@@ -270,6 +270,7 @@ enum gve_stat_names { ...@@ -270,6 +270,7 @@ enum gve_stat_names {
TX_LAST_COMPLETION_PROCESSED = 5, TX_LAST_COMPLETION_PROCESSED = 5,
RX_NEXT_EXPECTED_SEQUENCE = 6, RX_NEXT_EXPECTED_SEQUENCE = 6,
RX_BUFFERS_POSTED = 7, RX_BUFFERS_POSTED = 7,
TX_TIMEOUT_CNT = 8,
// stats from NIC // stats from NIC
RX_QUEUE_DROP_CNT = 65, RX_QUEUE_DROP_CNT = 65,
RX_NO_BUFFERS_POSTED = 66, RX_NO_BUFFERS_POSTED = 66,
......
...@@ -330,8 +330,7 @@ gve_get_ethtool_stats(struct net_device *netdev, ...@@ -330,8 +330,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = tmp_tx_bytes; data[i++] = tmp_tx_bytes;
data[i++] = tx->wake_queue; data[i++] = tx->wake_queue;
data[i++] = tx->stop_queue; data[i++] = tx->stop_queue;
data[i++] = be32_to_cpu(gve_tx_load_event_counter(priv, data[i++] = gve_tx_load_event_counter(priv, tx);
tx));
data[i++] = tx->dma_mapping_error; data[i++] = tx->dma_mapping_error;
/* stats from NIC */ /* stats from NIC */
if (skip_nic_stats) { if (skip_nic_stats) {
......
...@@ -24,6 +24,9 @@ ...@@ -24,6 +24,9 @@
#define GVE_VERSION "1.0.0" #define GVE_VERSION "1.0.0"
#define GVE_VERSION_PREFIX "GVE-" #define GVE_VERSION_PREFIX "GVE-"
// Minimum amount of time between queue kicks in msec (10 seconds)
#define MIN_TX_TIMEOUT_GAP (1000 * 10)
const char gve_version_str[] = GVE_VERSION; const char gve_version_str[] = GVE_VERSION;
static const char gve_version_prefix[] = GVE_VERSION_PREFIX; static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
...@@ -192,34 +195,40 @@ static int gve_napi_poll(struct napi_struct *napi, int budget) ...@@ -192,34 +195,40 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
__be32 __iomem *irq_doorbell; __be32 __iomem *irq_doorbell;
bool reschedule = false; bool reschedule = false;
struct gve_priv *priv; struct gve_priv *priv;
int work_done = 0;
block = container_of(napi, struct gve_notify_block, napi); block = container_of(napi, struct gve_notify_block, napi);
priv = block->priv; priv = block->priv;
if (block->tx) if (block->tx)
reschedule |= gve_tx_poll(block, budget); reschedule |= gve_tx_poll(block, budget);
if (block->rx) if (block->rx) {
reschedule |= gve_rx_poll(block, budget); work_done = gve_rx_poll(block, budget);
reschedule |= work_done == budget;
}
if (reschedule) if (reschedule)
return budget; return budget;
napi_complete(napi); /* Complete processing - don't unmask irq if busy polling is enabled */
irq_doorbell = gve_irq_doorbell(priv, block); if (likely(napi_complete_done(napi, work_done))) {
iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell); irq_doorbell = gve_irq_doorbell(priv, block);
iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
/* Double check we have no extra work. /* Ensure IRQ ACK is visible before we check pending work.
* Ensure unmask synchronizes with checking for work. * If queue had issued updates, it would be truly visible.
*/ */
mb(); mb();
if (block->tx)
reschedule |= gve_tx_poll(block, -1);
if (block->rx)
reschedule |= gve_rx_poll(block, -1);
if (reschedule && napi_reschedule(napi))
iowrite32be(GVE_IRQ_MASK, irq_doorbell);
return 0; if (block->tx)
reschedule |= gve_tx_clean_pending(priv, block->tx);
if (block->rx)
reschedule |= gve_rx_work_pending(block->rx);
if (reschedule && napi_reschedule(napi))
iowrite32be(GVE_IRQ_MASK, irq_doorbell);
}
return work_done;
} }
static int gve_napi_poll_dqo(struct napi_struct *napi, int budget) static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
...@@ -1115,9 +1124,47 @@ static void gve_turnup(struct gve_priv *priv) ...@@ -1115,9 +1124,47 @@ static void gve_turnup(struct gve_priv *priv)
static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue) static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
{ {
struct gve_priv *priv = netdev_priv(dev); struct gve_notify_block *block;
struct gve_tx_ring *tx = NULL;
struct gve_priv *priv;
u32 last_nic_done;
u32 current_time;
u32 ntfy_idx;
netdev_info(dev, "Timeout on tx queue, %d", txqueue);
priv = netdev_priv(dev);
if (txqueue > priv->tx_cfg.num_queues)
goto reset;
ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
if (ntfy_idx > priv->num_ntfy_blks)
goto reset;
block = &priv->ntfy_blocks[ntfy_idx];
tx = block->tx;
current_time = jiffies_to_msecs(jiffies);
if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
goto reset;
/* Check to see if there are missed completions, which will allow us to
* kick the queue.
*/
last_nic_done = gve_tx_load_event_counter(priv, tx);
if (last_nic_done - tx->done) {
netdev_info(dev, "Kicking queue %d", txqueue);
iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
napi_schedule(&block->napi);
tx->last_kick_msec = current_time;
goto out;
} // Else reset.
reset:
gve_schedule_reset(priv); gve_schedule_reset(priv);
out:
if (tx)
tx->queue_timeout++;
priv->tx_timeo_cnt++; priv->tx_timeo_cnt++;
} }
...@@ -1246,6 +1293,11 @@ void gve_handle_report_stats(struct gve_priv *priv) ...@@ -1246,6 +1293,11 @@ void gve_handle_report_stats(struct gve_priv *priv)
.value = cpu_to_be64(last_completion), .value = cpu_to_be64(last_completion),
.queue_id = cpu_to_be32(idx), .queue_id = cpu_to_be32(idx),
}; };
stats[stats_idx++] = (struct stats) {
.stat_name = cpu_to_be32(TX_TIMEOUT_CNT),
.value = cpu_to_be64(priv->tx[idx].queue_timeout),
.queue_id = cpu_to_be32(idx),
};
} }
} }
/* rx stats */ /* rx stats */
......
...@@ -16,19 +16,23 @@ static void gve_rx_free_buffer(struct device *dev, ...@@ -16,19 +16,23 @@ static void gve_rx_free_buffer(struct device *dev,
dma_addr_t dma = (dma_addr_t)(be64_to_cpu(data_slot->addr) & dma_addr_t dma = (dma_addr_t)(be64_to_cpu(data_slot->addr) &
GVE_DATA_SLOT_ADDR_PAGE_MASK); GVE_DATA_SLOT_ADDR_PAGE_MASK);
page_ref_sub(page_info->page, page_info->pagecnt_bias - 1);
gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE); gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE);
} }
static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx) static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
{ {
if (rx->data.raw_addressing) { u32 slots = rx->mask + 1;
u32 slots = rx->mask + 1; int i;
int i;
if (rx->data.raw_addressing) {
for (i = 0; i < slots; i++) for (i = 0; i < slots; i++)
gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i], gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i],
&rx->data.data_ring[i]); &rx->data.data_ring[i]);
} else { } else {
for (i = 0; i < slots; i++)
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
gve_unassign_qpl(priv, rx->data.qpl->id); gve_unassign_qpl(priv, rx->data.qpl->id);
rx->data.qpl = NULL; rx->data.qpl = NULL;
} }
...@@ -69,6 +73,9 @@ static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info, ...@@ -69,6 +73,9 @@ static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
page_info->page_offset = 0; page_info->page_offset = 0;
page_info->page_address = page_address(page); page_info->page_address = page_address(page);
*slot_addr = cpu_to_be64(addr); *slot_addr = cpu_to_be64(addr);
/* The page already has 1 ref */
page_ref_add(page, INT_MAX - 1);
page_info->pagecnt_bias = INT_MAX;
} }
static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev, static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
...@@ -295,21 +302,22 @@ static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *sl ...@@ -295,21 +302,22 @@ static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *sl
static bool gve_rx_can_flip_buffers(struct net_device *netdev) static bool gve_rx_can_flip_buffers(struct net_device *netdev)
{ {
return PAGE_SIZE == 4096 return PAGE_SIZE >= 4096
? netdev->mtu + GVE_RX_PAD + ETH_HLEN <= PAGE_SIZE / 2 : false; ? netdev->mtu + GVE_RX_PAD + ETH_HLEN <= PAGE_SIZE / 2 : false;
} }
static int gve_rx_can_recycle_buffer(struct page *page) static int gve_rx_can_recycle_buffer(struct gve_rx_slot_page_info *page_info)
{ {
int pagecount = page_count(page); int pagecount = page_count(page_info->page);
/* This page is not being used by any SKBs - reuse */ /* This page is not being used by any SKBs - reuse */
if (pagecount == 1) if (pagecount == page_info->pagecnt_bias)
return 1; return 1;
/* This page is still being used by an SKB - we can't reuse */ /* This page is still being used by an SKB - we can't reuse */
else if (pagecount >= 2) else if (pagecount > page_info->pagecnt_bias)
return 0; return 0;
WARN(pagecount < 1, "Pagecount should never be < 1"); WARN(pagecount < page_info->pagecnt_bias,
"Pagecount should never be less than the bias.");
return -1; return -1;
} }
...@@ -325,11 +333,11 @@ gve_rx_raw_addressing(struct device *dev, struct net_device *netdev, ...@@ -325,11 +333,11 @@ gve_rx_raw_addressing(struct device *dev, struct net_device *netdev,
if (!skb) if (!skb)
return NULL; return NULL;
/* Optimistically stop the kernel from freeing the page by increasing /* Optimistically stop the kernel from freeing the page.
* the page bias. We will check the refcount in refill to determine if * We will check again in refill to determine if we need to alloc a
* we need to alloc a new page. * new page.
*/ */
get_page(page_info->page); gve_dec_pagecnt_bias(page_info);
return skb; return skb;
} }
...@@ -352,7 +360,7 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev, ...@@ -352,7 +360,7 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
/* No point in recycling if we didn't get the skb */ /* No point in recycling if we didn't get the skb */
if (skb) { if (skb) {
/* Make sure that the page isn't freed. */ /* Make sure that the page isn't freed. */
get_page(page_info->page); gve_dec_pagecnt_bias(page_info);
gve_rx_flip_buff(page_info, &data_slot->qpl_offset); gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
} }
} else { } else {
...@@ -376,8 +384,18 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, ...@@ -376,8 +384,18 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
union gve_rx_data_slot *data_slot; union gve_rx_data_slot *data_slot;
struct sk_buff *skb = NULL; struct sk_buff *skb = NULL;
dma_addr_t page_bus; dma_addr_t page_bus;
void *va;
u16 len; u16 len;
/* Prefetch two packet pages ahead, we will need it soon. */
page_info = &rx->data.page_info[(idx + 2) & rx->mask];
va = page_info->page_address + GVE_RX_PAD +
page_info->page_offset;
prefetch(page_info->page); /* Kernel page struct. */
prefetch(va); /* Packet header. */
prefetch(va + 64); /* Next cacheline too. */
/* drop this packet */ /* drop this packet */
if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR)) { if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR)) {
u64_stats_update_begin(&rx->statss); u64_stats_update_begin(&rx->statss);
...@@ -408,7 +426,7 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, ...@@ -408,7 +426,7 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
int recycle = 0; int recycle = 0;
if (can_flip) { if (can_flip) {
recycle = gve_rx_can_recycle_buffer(page_info->page); recycle = gve_rx_can_recycle_buffer(page_info);
if (recycle < 0) { if (recycle < 0) {
if (!rx->data.raw_addressing) if (!rx->data.raw_addressing)
gve_schedule_reset(priv); gve_schedule_reset(priv);
...@@ -456,7 +474,7 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc, ...@@ -456,7 +474,7 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
return true; return true;
} }
static bool gve_rx_work_pending(struct gve_rx_ring *rx) bool gve_rx_work_pending(struct gve_rx_ring *rx)
{ {
struct gve_rx_desc *desc; struct gve_rx_desc *desc;
__be16 flags_seq; __be16 flags_seq;
...@@ -499,7 +517,7 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx) ...@@ -499,7 +517,7 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
* owns half the page it is impossible to tell which half. Either * owns half the page it is impossible to tell which half. Either
* the whole page is free or it needs to be replaced. * the whole page is free or it needs to be replaced.
*/ */
int recycle = gve_rx_can_recycle_buffer(page_info->page); int recycle = gve_rx_can_recycle_buffer(page_info);
if (recycle < 0) { if (recycle < 0) {
if (!rx->data.raw_addressing) if (!rx->data.raw_addressing)
...@@ -514,8 +532,13 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx) ...@@ -514,8 +532,13 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
gve_rx_free_buffer(dev, page_info, data_slot); gve_rx_free_buffer(dev, page_info, data_slot);
page_info->page = NULL; page_info->page = NULL;
if (gve_rx_alloc_buffer(priv, dev, page_info, data_slot)) if (gve_rx_alloc_buffer(priv, dev, page_info,
data_slot)) {
u64_stats_update_begin(&rx->statss);
rx->rx_buf_alloc_fail++;
u64_stats_update_end(&rx->statss);
break; break;
}
} }
} }
fill_cnt++; fill_cnt++;
...@@ -524,8 +547,8 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx) ...@@ -524,8 +547,8 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
return true; return true;
} }
bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
netdev_features_t feat) netdev_features_t feat)
{ {
struct gve_priv *priv = rx->gve; struct gve_priv *priv = rx->gve;
u32 work_done = 0, packets = 0; u32 work_done = 0, packets = 0;
...@@ -546,6 +569,10 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, ...@@ -546,6 +569,10 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
"[%d] seqno=%d rx->desc.seqno=%d\n", "[%d] seqno=%d rx->desc.seqno=%d\n",
rx->q_num, GVE_SEQNO(desc->flags_seq), rx->q_num, GVE_SEQNO(desc->flags_seq),
rx->desc.seqno); rx->desc.seqno);
/* prefetch two descriptors ahead */
prefetch(rx->desc.desc_ring + ((cnt + 2) & rx->mask));
dropped = !gve_rx(rx, desc, feat, idx); dropped = !gve_rx(rx, desc, feat, idx);
if (!dropped) { if (!dropped) {
bytes += be16_to_cpu(desc->len) - GVE_RX_PAD; bytes += be16_to_cpu(desc->len) - GVE_RX_PAD;
...@@ -559,13 +586,15 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, ...@@ -559,13 +586,15 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
} }
if (!work_done && rx->fill_cnt - cnt > rx->db_threshold) if (!work_done && rx->fill_cnt - cnt > rx->db_threshold)
return false; return 0;
u64_stats_update_begin(&rx->statss); if (work_done) {
rx->rpackets += packets; u64_stats_update_begin(&rx->statss);
rx->rbytes += bytes; rx->rpackets += packets;
u64_stats_update_end(&rx->statss); rx->rbytes += bytes;
rx->cnt = cnt; u64_stats_update_end(&rx->statss);
rx->cnt = cnt;
}
/* restock ring slots */ /* restock ring slots */
if (!rx->data.raw_addressing) { if (!rx->data.raw_addressing) {
...@@ -576,26 +605,26 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget, ...@@ -576,26 +605,26 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
* falls below a threshold. * falls below a threshold.
*/ */
if (!gve_rx_refill_buffers(priv, rx)) if (!gve_rx_refill_buffers(priv, rx))
return false; return 0;
/* If we were not able to completely refill buffers, we'll want /* If we were not able to completely refill buffers, we'll want
* to schedule this queue for work again to refill buffers. * to schedule this queue for work again to refill buffers.
*/ */
if (rx->fill_cnt - cnt <= rx->db_threshold) { if (rx->fill_cnt - cnt <= rx->db_threshold) {
gve_rx_write_doorbell(priv, rx); gve_rx_write_doorbell(priv, rx);
return true; return budget;
} }
} }
gve_rx_write_doorbell(priv, rx); gve_rx_write_doorbell(priv, rx);
return gve_rx_work_pending(rx); return work_done;
} }
bool gve_rx_poll(struct gve_notify_block *block, int budget) int gve_rx_poll(struct gve_notify_block *block, int budget)
{ {
struct gve_rx_ring *rx = block->rx; struct gve_rx_ring *rx = block->rx;
netdev_features_t feat; netdev_features_t feat;
bool repoll = false; int work_done = 0;
feat = block->napi.dev->features; feat = block->napi.dev->features;
...@@ -604,8 +633,7 @@ bool gve_rx_poll(struct gve_notify_block *block, int budget) ...@@ -604,8 +633,7 @@ bool gve_rx_poll(struct gve_notify_block *block, int budget)
budget = INT_MAX; budget = INT_MAX;
if (budget > 0) if (budget > 0)
repoll |= gve_clean_rx_done(rx, budget, feat); work_done = gve_clean_rx_done(rx, budget, feat);
else
repoll |= gve_rx_work_pending(rx); return work_done;
return repoll;
} }
...@@ -144,7 +144,7 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx) ...@@ -144,7 +144,7 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx)
gve_tx_remove_from_block(priv, idx); gve_tx_remove_from_block(priv, idx);
slots = tx->mask + 1; slots = tx->mask + 1;
gve_clean_tx_done(priv, tx, tx->req, false); gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
netdev_tx_reset_queue(tx->netdev_txq); netdev_tx_reset_queue(tx->netdev_txq);
dma_free_coherent(hdev, sizeof(*tx->q_resources), dma_free_coherent(hdev, sizeof(*tx->q_resources),
...@@ -176,6 +176,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx) ...@@ -176,6 +176,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
/* Make sure everything is zeroed to start */ /* Make sure everything is zeroed to start */
memset(tx, 0, sizeof(*tx)); memset(tx, 0, sizeof(*tx));
spin_lock_init(&tx->clean_lock);
tx->q_num = idx; tx->q_num = idx;
tx->mask = slots - 1; tx->mask = slots - 1;
...@@ -328,10 +329,16 @@ static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required) ...@@ -328,10 +329,16 @@ static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required)
return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc); return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc);
} }
static_assert(NAPI_POLL_WEIGHT >= MAX_TX_DESC_NEEDED);
/* Stops the queue if the skb cannot be transmitted. */ /* Stops the queue if the skb cannot be transmitted. */
static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb) static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
struct sk_buff *skb)
{ {
int bytes_required = 0; int bytes_required = 0;
u32 nic_done;
u32 to_do;
int ret;
if (!tx->raw_addressing) if (!tx->raw_addressing)
bytes_required = gve_skb_fifo_bytes_required(tx, skb); bytes_required = gve_skb_fifo_bytes_required(tx, skb);
...@@ -339,29 +346,28 @@ static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb) ...@@ -339,29 +346,28 @@ static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb)
if (likely(gve_can_tx(tx, bytes_required))) if (likely(gve_can_tx(tx, bytes_required)))
return 0; return 0;
/* No space, so stop the queue */ ret = -EBUSY;
tx->stop_queue++; spin_lock(&tx->clean_lock);
netif_tx_stop_queue(tx->netdev_txq); nic_done = gve_tx_load_event_counter(priv, tx);
smp_mb(); /* sync with restarting queue in gve_clean_tx_done() */ to_do = nic_done - tx->done;
/* Now check for resources again, in case gve_clean_tx_done() freed
* resources after we checked and we stopped the queue after
* gve_clean_tx_done() checked.
*
* gve_maybe_stop_tx() gve_clean_tx_done()
* nsegs/can_alloc test failed
* gve_tx_free_fifo()
* if (tx queue stopped)
* netif_tx_queue_wake()
* netif_tx_stop_queue()
* Need to check again for space here!
*/
if (likely(!gve_can_tx(tx, bytes_required)))
return -EBUSY;
netif_tx_start_queue(tx->netdev_txq); /* Only try to clean if there is hope for TX */
tx->wake_queue++; if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) {
return 0; if (to_do > 0) {
to_do = min_t(u32, to_do, NAPI_POLL_WEIGHT);
gve_clean_tx_done(priv, tx, to_do, false);
}
if (likely(gve_can_tx(tx, bytes_required)))
ret = 0;
}
if (ret) {
/* No space, so stop the queue */
tx->stop_queue++;
netif_tx_stop_queue(tx->netdev_txq);
}
spin_unlock(&tx->clean_lock);
return ret;
} }
static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc, static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
...@@ -576,7 +582,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev) ...@@ -576,7 +582,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues, WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
"skb queue index out of range"); "skb queue index out of range");
tx = &priv->tx[skb_get_queue_mapping(skb)]; tx = &priv->tx[skb_get_queue_mapping(skb)];
if (unlikely(gve_maybe_stop_tx(tx, skb))) { if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) {
/* We need to ring the txq doorbell -- we have stopped the Tx /* We need to ring the txq doorbell -- we have stopped the Tx
* queue for want of resources, but prior calls to gve_tx() * queue for want of resources, but prior calls to gve_tx()
* may have added descriptors without ringing the doorbell. * may have added descriptors without ringing the doorbell.
...@@ -672,19 +678,19 @@ static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, ...@@ -672,19 +678,19 @@ static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
return pkts; return pkts;
} }
__be32 gve_tx_load_event_counter(struct gve_priv *priv, u32 gve_tx_load_event_counter(struct gve_priv *priv,
struct gve_tx_ring *tx) struct gve_tx_ring *tx)
{ {
u32 counter_index = be32_to_cpu((tx->q_resources->counter_index)); u32 counter_index = be32_to_cpu(tx->q_resources->counter_index);
__be32 counter = READ_ONCE(priv->counter_array[counter_index]);
return READ_ONCE(priv->counter_array[counter_index]); return be32_to_cpu(counter);
} }
bool gve_tx_poll(struct gve_notify_block *block, int budget) bool gve_tx_poll(struct gve_notify_block *block, int budget)
{ {
struct gve_priv *priv = block->priv; struct gve_priv *priv = block->priv;
struct gve_tx_ring *tx = block->tx; struct gve_tx_ring *tx = block->tx;
bool repoll = false;
u32 nic_done; u32 nic_done;
u32 to_do; u32 to_do;
...@@ -692,17 +698,23 @@ bool gve_tx_poll(struct gve_notify_block *block, int budget) ...@@ -692,17 +698,23 @@ bool gve_tx_poll(struct gve_notify_block *block, int budget)
if (budget == 0) if (budget == 0)
budget = INT_MAX; budget = INT_MAX;
/* In TX path, it may try to clean completed pkts in order to xmit,
* to avoid cleaning conflict, use spin_lock(), it yields better
* concurrency between xmit/clean than netif's lock.
*/
spin_lock(&tx->clean_lock);
/* Find out how much work there is to be done */ /* Find out how much work there is to be done */
tx->last_nic_done = gve_tx_load_event_counter(priv, tx); nic_done = gve_tx_load_event_counter(priv, tx);
nic_done = be32_to_cpu(tx->last_nic_done); to_do = min_t(u32, (nic_done - tx->done), budget);
if (budget > 0) { gve_clean_tx_done(priv, tx, to_do, true);
/* Do as much work as we have that the budget will spin_unlock(&tx->clean_lock);
* allow
*/
to_do = min_t(u32, (nic_done - tx->done), budget);
gve_clean_tx_done(priv, tx, to_do, true);
}
/* If we still have work we want to repoll */ /* If we still have work we want to repoll */
repoll |= (nic_done != tx->done); return nic_done != tx->done;
return repoll; }
bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx)
{
u32 nic_done = gve_tx_load_event_counter(priv, tx);
return nic_done != tx->done;
} }
...@@ -18,12 +18,16 @@ void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx) ...@@ -18,12 +18,16 @@ void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx) void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
{ {
unsigned int active_cpus = min_t(int, priv->num_ntfy_blks / 2,
num_online_cpus());
int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx); int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx]; struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
struct gve_tx_ring *tx = &priv->tx[queue_idx]; struct gve_tx_ring *tx = &priv->tx[queue_idx];
block->tx = tx; block->tx = tx;
tx->ntfy_id = ntfy_idx; tx->ntfy_id = ntfy_idx;
netif_set_xps_queue(priv->dev, get_cpu_mask(ntfy_idx % active_cpus),
queue_idx);
} }
void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx) void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment