Commit e4ac382e authored by Shay Agroskin's avatar Shay Agroskin Committed by David S. Miller

net: ena: optimize data access in fast-path code

This tweaks several small places to improve the data access in fast
path:

* Remove duplicates of first_interrupt flag and surround it with
  WRITE/READ_ONCE macros:

  The flag is used to detect HW disorders in its
  interrupt communication with the driver. The flag is set when an
  interrupt is received and used in the health check function
  (ena_timer_service()) to help it find irregularities.

* Reorder some fields in ena_napi struct to take better advantage of
  cache access pattern.

* Move XDP TX queue number to a variable to save its calculation for
  every packet.

* Use likely in a condition to improve branch prediction

The 'first_interrupt' and 'interrupt_masked' flags were moved to reside
in the same cache line as the first fields of 'napi' struct. This
placement ensures that all memory accessed during upper-half handler
reside in the same cacheline (napi_schedule_irqoff() only accesses
'state' and 'poll_list' fields which are at the beginning of napi
struct).
Signed-off-by: default avatarSameeh Jubran <sameehj@amazon.com>
Signed-off-by: default avatarShay Agroskin <shayagr@amazon.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent fa6d61e9
...@@ -151,7 +151,7 @@ static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq) ...@@ -151,7 +151,7 @@ static int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
return 0; return 0;
/* bounce buffer was used, so write it and get a new one */ /* bounce buffer was used, so write it and get a new one */
if (pkt_ctrl->idx) { if (likely(pkt_ctrl->idx)) {
rc = ena_com_write_bounce_buffer_to_dev(io_sq, rc = ena_com_write_bounce_buffer_to_dev(io_sq,
pkt_ctrl->curr_bounce_buf); pkt_ctrl->curr_bounce_buf);
if (unlikely(rc)) if (unlikely(rc))
......
...@@ -197,7 +197,6 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget) ...@@ -197,7 +197,6 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
int ret; int ret;
xdp_ring = ena_napi->xdp_ring; xdp_ring = ena_napi->xdp_ring;
xdp_ring->first_interrupt = ena_napi->first_interrupt;
xdp_budget = budget; xdp_budget = budget;
...@@ -383,7 +382,6 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) ...@@ -383,7 +382,6 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
u32 verdict = XDP_PASS; u32 verdict = XDP_PASS;
struct xdp_frame *xdpf; struct xdp_frame *xdpf;
u64 *xdp_stat; u64 *xdp_stat;
int qid;
rcu_read_lock(); rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog); xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
...@@ -404,8 +402,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) ...@@ -404,8 +402,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
} }
/* Find xmit queue */ /* Find xmit queue */
qid = rx_ring->qid + rx_ring->adapter->num_io_queues; xdp_ring = rx_ring->xdp_ring;
xdp_ring = &rx_ring->adapter->tx_ring[qid];
/* The XDP queues are shared between XDP_TX and XDP_REDIRECT */ /* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
spin_lock(&xdp_ring->xdp_tx_lock); spin_lock(&xdp_ring->xdp_tx_lock);
...@@ -681,7 +678,6 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter, ...@@ -681,7 +678,6 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
ring->ena_dev = adapter->ena_dev; ring->ena_dev = adapter->ena_dev;
ring->per_napi_packets = 0; ring->per_napi_packets = 0;
ring->cpu = 0; ring->cpu = 0;
ring->first_interrupt = false;
ring->no_interrupt_event_cnt = 0; ring->no_interrupt_event_cnt = 0;
u64_stats_init(&ring->syncp); u64_stats_init(&ring->syncp);
} }
...@@ -725,6 +721,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter, ...@@ -725,6 +721,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter,
ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); ena_com_get_nonadaptive_moderation_interval_rx(ena_dev);
rxr->empty_rx_queue = 0; rxr->empty_rx_queue = 0;
adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; adapter->ena_napi[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
rxr->xdp_ring = &adapter->tx_ring[i + adapter->num_io_queues];
} }
} }
} }
...@@ -1922,9 +1919,6 @@ static int ena_io_poll(struct napi_struct *napi, int budget) ...@@ -1922,9 +1919,6 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
tx_ring = ena_napi->tx_ring; tx_ring = ena_napi->tx_ring;
rx_ring = ena_napi->rx_ring; rx_ring = ena_napi->rx_ring;
tx_ring->first_interrupt = ena_napi->first_interrupt;
rx_ring->first_interrupt = ena_napi->first_interrupt;
tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER; tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
...@@ -2003,7 +1997,8 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data) ...@@ -2003,7 +1997,8 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
{ {
struct ena_napi *ena_napi = data; struct ena_napi *ena_napi = data;
ena_napi->first_interrupt = true; /* Used to check HW health */
WRITE_ONCE(ena_napi->first_interrupt, true);
WRITE_ONCE(ena_napi->interrupts_masked, true); WRITE_ONCE(ena_napi->interrupts_masked, true);
smp_wmb(); /* write interrupts_masked before calling napi */ smp_wmb(); /* write interrupts_masked before calling napi */
...@@ -3657,7 +3652,9 @@ static void ena_fw_reset_device(struct work_struct *work) ...@@ -3657,7 +3652,9 @@ static void ena_fw_reset_device(struct work_struct *work)
static int check_for_rx_interrupt_queue(struct ena_adapter *adapter, static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
struct ena_ring *rx_ring) struct ena_ring *rx_ring)
{ {
if (likely(rx_ring->first_interrupt)) struct ena_napi *ena_napi = container_of(rx_ring->napi, struct ena_napi, napi);
if (likely(READ_ONCE(ena_napi->first_interrupt)))
return 0; return 0;
if (ena_com_cq_empty(rx_ring->ena_com_io_cq)) if (ena_com_cq_empty(rx_ring->ena_com_io_cq))
...@@ -3681,6 +3678,7 @@ static int check_for_rx_interrupt_queue(struct ena_adapter *adapter, ...@@ -3681,6 +3678,7 @@ static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
struct ena_ring *tx_ring) struct ena_ring *tx_ring)
{ {
struct ena_napi *ena_napi = container_of(tx_ring->napi, struct ena_napi, napi);
struct ena_tx_buffer *tx_buf; struct ena_tx_buffer *tx_buf;
unsigned long last_jiffies; unsigned long last_jiffies;
u32 missed_tx = 0; u32 missed_tx = 0;
...@@ -3694,8 +3692,9 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter, ...@@ -3694,8 +3692,9 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
/* no pending Tx at this location */ /* no pending Tx at this location */
continue; continue;
if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies + if (unlikely(!READ_ONCE(ena_napi->first_interrupt) &&
2 * adapter->missing_tx_completion_to))) { time_is_before_jiffies(last_jiffies + 2 *
adapter->missing_tx_completion_to))) {
/* If after graceful period interrupt is still not /* If after graceful period interrupt is still not
* received, we schedule a reset * received, we schedule a reset
*/ */
......
...@@ -135,12 +135,12 @@ struct ena_irq { ...@@ -135,12 +135,12 @@ struct ena_irq {
}; };
struct ena_napi { struct ena_napi {
struct napi_struct napi ____cacheline_aligned; u8 first_interrupt ____cacheline_aligned;
u8 interrupts_masked;
struct napi_struct napi;
struct ena_ring *tx_ring; struct ena_ring *tx_ring;
struct ena_ring *rx_ring; struct ena_ring *rx_ring;
struct ena_ring *xdp_ring; struct ena_ring *xdp_ring;
bool first_interrupt;
bool interrupts_masked;
u32 qid; u32 qid;
struct dim dim; struct dim dim;
}; };
...@@ -259,6 +259,10 @@ struct ena_ring { ...@@ -259,6 +259,10 @@ struct ena_ring {
struct bpf_prog *xdp_bpf_prog; struct bpf_prog *xdp_bpf_prog;
struct xdp_rxq_info xdp_rxq; struct xdp_rxq_info xdp_rxq;
spinlock_t xdp_tx_lock; /* synchronize XDP TX/Redirect traffic */ spinlock_t xdp_tx_lock; /* synchronize XDP TX/Redirect traffic */
/* Used for rx queues only to point to the xdp tx ring, to
* which traffic should be redirected from this rx ring.
*/
struct ena_ring *xdp_ring;
u16 next_to_use; u16 next_to_use;
u16 next_to_clean; u16 next_to_clean;
...@@ -271,7 +275,6 @@ struct ena_ring { ...@@ -271,7 +275,6 @@ struct ena_ring {
/* The maximum header length the device can handle */ /* The maximum header length the device can handle */
u8 tx_max_header_size; u8 tx_max_header_size;
bool first_interrupt;
bool disable_meta_caching; bool disable_meta_caching;
u16 no_interrupt_event_cnt; u16 no_interrupt_event_cnt;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment