Commit 5b40d10b authored by David S. Miller's avatar David S. Miller

Merge branch 'ena-fixes'

Arthur Kiyanovski says:

====================
ENA driver bug fixes

Patchset V2 chages:
-------------------
Updated SHA1 of Fixes tag in patch 3/3 to be 12 digits long

Original cover letter:
----------------------
ENA driver bug fixes
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ae81de73 5055dc03
...@@ -1288,26 +1288,22 @@ static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id, ...@@ -1288,26 +1288,22 @@ static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
{ {
struct ena_tx_buffer *tx_info = NULL; struct ena_tx_buffer *tx_info;
if (likely(req_id < tx_ring->ring_size)) { tx_info = &tx_ring->tx_buffer_info[req_id];
tx_info = &tx_ring->tx_buffer_info[req_id]; if (likely(tx_info->skb))
if (likely(tx_info->skb)) return 0;
return 0;
}
return handle_invalid_req_id(tx_ring, req_id, tx_info, false); return handle_invalid_req_id(tx_ring, req_id, tx_info, false);
} }
static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id) static int validate_xdp_req_id(struct ena_ring *xdp_ring, u16 req_id)
{ {
struct ena_tx_buffer *tx_info = NULL; struct ena_tx_buffer *tx_info;
if (likely(req_id < xdp_ring->ring_size)) { tx_info = &xdp_ring->tx_buffer_info[req_id];
tx_info = &xdp_ring->tx_buffer_info[req_id]; if (likely(tx_info->xdpf))
if (likely(tx_info->xdpf)) return 0;
return 0;
}
return handle_invalid_req_id(xdp_ring, req_id, tx_info, true); return handle_invalid_req_id(xdp_ring, req_id, tx_info, true);
} }
...@@ -1332,9 +1328,14 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) ...@@ -1332,9 +1328,14 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq,
&req_id); &req_id);
if (rc) if (rc) {
if (unlikely(rc == -EINVAL))
handle_invalid_req_id(tx_ring, req_id, NULL,
false);
break; break;
}
/* validate that the request id points to a valid skb */
rc = validate_tx_req_id(tx_ring, req_id); rc = validate_tx_req_id(tx_ring, req_id);
if (rc) if (rc)
break; break;
...@@ -1427,6 +1428,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, ...@@ -1427,6 +1428,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
u16 *next_to_clean) u16 *next_to_clean)
{ {
struct ena_rx_buffer *rx_info; struct ena_rx_buffer *rx_info;
struct ena_adapter *adapter;
u16 len, req_id, buf = 0; u16 len, req_id, buf = 0;
struct sk_buff *skb; struct sk_buff *skb;
void *page_addr; void *page_addr;
...@@ -1439,8 +1441,14 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, ...@@ -1439,8 +1441,14 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
rx_info = &rx_ring->rx_buffer_info[req_id]; rx_info = &rx_ring->rx_buffer_info[req_id];
if (unlikely(!rx_info->page)) { if (unlikely(!rx_info->page)) {
netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, adapter = rx_ring->adapter;
"Page is NULL\n"); netif_err(adapter, rx_err, rx_ring->netdev,
"Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id);
ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp);
adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
/* Make sure reset reason is set before triggering the reset */
smp_mb__before_atomic();
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
return NULL; return NULL;
} }
...@@ -1896,9 +1904,14 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget) ...@@ -1896,9 +1904,14 @@ static int ena_clean_xdp_irq(struct ena_ring *xdp_ring, u32 budget)
rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq, rc = ena_com_tx_comp_req_id_get(xdp_ring->ena_com_io_cq,
&req_id); &req_id);
if (rc) if (rc) {
if (unlikely(rc == -EINVAL))
handle_invalid_req_id(xdp_ring, req_id, NULL,
true);
break; break;
}
/* validate that the request id points to a valid xdp_frame */
rc = validate_xdp_req_id(xdp_ring, req_id); rc = validate_xdp_req_id(xdp_ring, req_id);
if (rc) if (rc)
break; break;
...@@ -4013,10 +4026,6 @@ static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev, ...@@ -4013,10 +4026,6 @@ static u32 ena_calc_max_io_queue_num(struct pci_dev *pdev,
max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num); max_num_io_queues = min_t(u32, max_num_io_queues, io_tx_cq_num);
/* 1 IRQ for mgmnt and 1 IRQs for each IO direction */ /* 1 IRQ for mgmnt and 1 IRQs for each IO direction */
max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1); max_num_io_queues = min_t(u32, max_num_io_queues, pci_msix_vec_count(pdev) - 1);
if (unlikely(!max_num_io_queues)) {
dev_err(&pdev->dev, "The device doesn't have io queues\n");
return -EFAULT;
}
return max_num_io_queues; return max_num_io_queues;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment