Commit 644c6431 authored by Paolo Abeni's avatar Paolo Abeni

Merge branch 'ena-driver-changes'

David Arinzon says:

====================
ENA driver changes

From: David Arinzon <darinzon@amazon.com>

This patchset contains a set of minor and cosmetic
changes to the ENA driver.

Changes from v1:
- Address comments from Shannon Nelson
====================

Link: https://lore.kernel.org/r/20240130095353.2881-1-darinzon@amazon.comSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents a4053912 50613650
...@@ -211,10 +211,16 @@ Documentation/networking/net_dim.rst ...@@ -211,10 +211,16 @@ Documentation/networking/net_dim.rst
RX copybreak RX copybreak
============ ============
The rx_copybreak is initialized by default to ENA_DEFAULT_RX_COPYBREAK The rx_copybreak is initialized by default to ENA_DEFAULT_RX_COPYBREAK
and can be configured by the ETHTOOL_STUNABLE command of the and can be configured by the ETHTOOL_STUNABLE command of the
SIOCETHTOOL ioctl. SIOCETHTOOL ioctl.
This option controls the maximum packet length for which the RX
descriptor it was received on would be recycled. When a packet smaller
than RX copybreak bytes is received, it is copied into a new memory
buffer and the RX descriptor is returned to HW.
Statistics Statistics
========== ==========
......
This diff is collapsed.
...@@ -109,16 +109,13 @@ struct ena_com_io_cq { ...@@ -109,16 +109,13 @@ struct ena_com_io_cq {
/* Interrupt unmask register */ /* Interrupt unmask register */
u32 __iomem *unmask_reg; u32 __iomem *unmask_reg;
/* The completion queue head doorbell register */
u32 __iomem *cq_head_db_reg;
/* numa configuration register (for TPH) */ /* numa configuration register (for TPH) */
u32 __iomem *numa_node_cfg_reg; u32 __iomem *numa_node_cfg_reg;
/* The value to write to the above register to unmask /* The value to write to the above register to unmask
* the interrupt of this queue * the interrupt of this queue
*/ */
u32 msix_vector; u32 msix_vector ____cacheline_aligned;
enum queue_direction direction; enum queue_direction direction;
...@@ -134,7 +131,6 @@ struct ena_com_io_cq { ...@@ -134,7 +131,6 @@ struct ena_com_io_cq {
/* Device queue index */ /* Device queue index */
u16 idx; u16 idx;
u16 head; u16 head;
u16 last_head_update;
u8 phase; u8 phase;
u8 cdesc_entry_size_in_bytes; u8 cdesc_entry_size_in_bytes;
...@@ -158,7 +154,6 @@ struct ena_com_io_sq { ...@@ -158,7 +154,6 @@ struct ena_com_io_sq {
struct ena_com_io_desc_addr desc_addr; struct ena_com_io_desc_addr desc_addr;
u32 __iomem *db_addr; u32 __iomem *db_addr;
u8 __iomem *header_addr;
enum queue_direction direction; enum queue_direction direction;
enum ena_admin_placement_policy_type mem_queue_type; enum ena_admin_placement_policy_type mem_queue_type;
......
...@@ -18,8 +18,7 @@ static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( ...@@ -18,8 +18,7 @@ static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ (head_masked * io_cq->cdesc_entry_size_in_bytes)); + (head_masked * io_cq->cdesc_entry_size_in_bytes));
desc_phase = (READ_ONCE(cdesc->status) & desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT; ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
if (desc_phase != expected_phase) if (desc_phase != expected_phase)
...@@ -65,8 +64,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq, ...@@ -65,8 +64,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
io_sq->entries_in_tx_burst_left--; io_sq->entries_in_tx_burst_left--;
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Decreasing entries_in_tx_burst_left of queue %d to %d\n", "Decreasing entries_in_tx_burst_left of queue %d to %d\n", io_sq->qid,
io_sq->qid, io_sq->entries_in_tx_burst_left); io_sq->entries_in_tx_burst_left);
} }
/* Make sure everything was written into the bounce buffer before /* Make sure everything was written into the bounce buffer before
...@@ -75,8 +74,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq, ...@@ -75,8 +74,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
wmb(); wmb();
/* The line is completed. Copy it to dev */ /* The line is completed. Copy it to dev */
__iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, bounce_buffer,
bounce_buffer, (llq_info->desc_list_entry_size) / 8); (llq_info->desc_list_entry_size) / 8);
io_sq->tail++; io_sq->tail++;
...@@ -102,16 +101,14 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq, ...@@ -102,16 +101,14 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
header_offset = header_offset =
llq_info->descs_num_before_header * io_sq->desc_entry_size; llq_info->descs_num_before_header * io_sq->desc_entry_size;
if (unlikely((header_offset + header_len) > if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
llq_info->desc_list_entry_size)) {
netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Trying to write header larger than llq entry can accommodate\n"); "Trying to write header larger than llq entry can accommodate\n");
return -EFAULT; return -EFAULT;
} }
if (unlikely(!bounce_buffer)) { if (unlikely(!bounce_buffer)) {
netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
"Bounce buffer is NULL\n");
return -EFAULT; return -EFAULT;
} }
...@@ -129,8 +126,7 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq) ...@@ -129,8 +126,7 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
bounce_buffer = pkt_ctrl->curr_bounce_buf; bounce_buffer = pkt_ctrl->curr_bounce_buf;
if (unlikely(!bounce_buffer)) { if (unlikely(!bounce_buffer)) {
netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
"Bounce buffer is NULL\n");
return NULL; return NULL;
} }
...@@ -247,8 +243,7 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, ...@@ -247,8 +243,7 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
ena_com_cq_inc_head(io_cq); ena_com_cq_inc_head(io_cq);
count++; count++;
last = (READ_ONCE(cdesc->status) & last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT; ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
} while (!last); } while (!last);
...@@ -369,9 +364,8 @@ static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq, ...@@ -369,9 +364,8 @@ static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
"l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n", "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, ena_rx_ctx->l3_csum_err,
ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err, ena_rx_ctx->l4_csum_err, ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
} }
/*****************************************************************************/ /*****************************************************************************/
...@@ -403,13 +397,12 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq, ...@@ -403,13 +397,12 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
if (unlikely(header_len > io_sq->tx_max_header_size)) { if (unlikely(header_len > io_sq->tx_max_header_size)) {
netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Header size is too large %d max header: %d\n", "Header size is too large %d max header: %d\n", header_len,
header_len, io_sq->tx_max_header_size); io_sq->tx_max_header_size);
return -EINVAL; return -EINVAL;
} }
if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && !buffer_to_push)) {
!buffer_to_push)) {
netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Push header wasn't provided in LLQ mode\n"); "Push header wasn't provided in LLQ mode\n");
return -EINVAL; return -EINVAL;
...@@ -556,13 +549,11 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, ...@@ -556,13 +549,11 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
} }
netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
"Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, nb_hw_desc);
nb_hw_desc);
if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) { if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
"Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, ena_rx_ctx->max_bufs);
ena_rx_ctx->max_bufs);
return -ENOSPC; return -ENOSPC;
} }
...@@ -586,8 +577,8 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq, ...@@ -586,8 +577,8 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
io_sq->next_to_comp += nb_hw_desc; io_sq->next_to_comp += nb_hw_desc;
netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device, netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
"[%s][QID#%d] Updating SQ head to: %d\n", __func__, "[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
io_sq->qid, io_sq->next_to_comp); io_sq->next_to_comp);
/* Get rx flags from the last pkt */ /* Get rx flags from the last pkt */
ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc); ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
...@@ -624,8 +615,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq, ...@@ -624,8 +615,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->req_id = req_id; desc->req_id = req_id;
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"[%s] Adding single RX desc, Queue: %u, req_id: %u\n", "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", __func__, io_sq->qid,
__func__, io_sq->qid, req_id); req_id);
desc->buff_addr_lo = (u32)ena_buf->paddr; desc->buff_addr_lo = (u32)ena_buf->paddr;
desc->buff_addr_hi = desc->buff_addr_hi =
......
...@@ -8,8 +8,6 @@ ...@@ -8,8 +8,6 @@
#include "ena_com.h" #include "ena_com.h"
/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
#define ENA_COMP_HEAD_THRESH 4
/* we allow 2 DMA descriptors per LLQ entry */ /* we allow 2 DMA descriptors per LLQ entry */
#define ENA_LLQ_ENTRY_DESC_CHUNK_SIZE (2 * sizeof(struct ena_eth_io_tx_desc)) #define ENA_LLQ_ENTRY_DESC_CHUNK_SIZE (2 * sizeof(struct ena_eth_io_tx_desc))
#define ENA_LLQ_HEADER (128UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE) #define ENA_LLQ_HEADER (128UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
...@@ -145,8 +143,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq, ...@@ -145,8 +143,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
} }
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Queue: %d num_descs: %d num_entries_needed: %d\n", "Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid, num_descs,
io_sq->qid, num_descs, num_entries_needed); num_entries_needed);
return num_entries_needed > io_sq->entries_in_tx_burst_left; return num_entries_needed > io_sq->entries_in_tx_burst_left;
} }
...@@ -157,43 +155,20 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) ...@@ -157,43 +155,20 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
u16 tail = io_sq->tail; u16 tail = io_sq->tail;
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Write submission queue doorbell for queue: %d tail: %d\n", "Write submission queue doorbell for queue: %d tail: %d\n", io_sq->qid, tail);
io_sq->qid, tail);
writel(tail, io_sq->db_addr); writel(tail, io_sq->db_addr);
if (is_llq_max_tx_burst_exists(io_sq)) { if (is_llq_max_tx_burst_exists(io_sq)) {
netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device, netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
"Reset available entries in tx burst for queue %d to %d\n", "Reset available entries in tx burst for queue %d to %d\n", io_sq->qid,
io_sq->qid, max_entries_in_tx_burst); max_entries_in_tx_burst);
io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst; io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
} }
return 0; return 0;
} }
static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
{
u16 unreported_comp, head;
bool need_update;
if (unlikely(io_cq->cq_head_db_reg)) {
head = io_cq->head;
unreported_comp = head - io_cq->last_head_update;
need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
if (unlikely(need_update)) {
netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
"Write completion queue doorbell for queue %d: head: %d\n",
io_cq->qid, head);
writel(head, io_cq->cq_head_db_reg);
io_cq->last_head_update = head;
}
}
return 0;
}
static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq, static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
u8 numa_node) u8 numa_node)
{ {
...@@ -248,8 +223,8 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, ...@@ -248,8 +223,8 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
*req_id = READ_ONCE(cdesc->req_id); *req_id = READ_ONCE(cdesc->req_id);
if (unlikely(*req_id >= io_cq->q_depth)) { if (unlikely(*req_id >= io_cq->q_depth)) {
netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, "Invalid req id %d\n",
"Invalid req id %d\n", cdesc->req_id); cdesc->req_id);
return -EINVAL; return -EINVAL;
} }
......
...@@ -21,6 +21,7 @@ enum ena_regs_reset_reason_types { ...@@ -21,6 +21,7 @@ enum ena_regs_reset_reason_types {
ENA_REGS_RESET_USER_TRIGGER = 12, ENA_REGS_RESET_USER_TRIGGER = 12,
ENA_REGS_RESET_GENERIC = 13, ENA_REGS_RESET_GENERIC = 13,
ENA_REGS_RESET_MISS_INTERRUPT = 14, ENA_REGS_RESET_MISS_INTERRUPT = 14,
ENA_REGS_RESET_SUSPECTED_POLL_STARVATION = 15,
}; };
/* ena_registers offsets */ /* ena_registers offsets */
......
...@@ -412,7 +412,6 @@ static int ena_clean_xdp_irq(struct ena_ring *tx_ring, u32 budget) ...@@ -412,7 +412,6 @@ static int ena_clean_xdp_irq(struct ena_ring *tx_ring, u32 budget)
tx_ring->next_to_clean = next_to_clean; tx_ring->next_to_clean = next_to_clean;
ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done); ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
"tx_poll: q %d done. total pkts: %d\n", "tx_poll: q %d done. total pkts: %d\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment