Commit 9878f602 authored by David S. Miller's avatar David S. Miller

Merge branch 'ena-bug-fixes'

Netanel Belgazal says:

====================
Bug Fixes in ENA driver

Changes from V3:
* Rebase patchset to master and solve merge conflicts.
* Remove redundant bug fix (fix error handling when probe fails)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b66a8043 3e5d6897
...@@ -631,22 +631,22 @@ enum ena_admin_flow_hash_proto { ...@@ -631,22 +631,22 @@ enum ena_admin_flow_hash_proto {
/* RSS flow hash fields */ /* RSS flow hash fields */
enum ena_admin_flow_hash_fields { enum ena_admin_flow_hash_fields {
/* Ethernet Dest Addr */ /* Ethernet Dest Addr */
ENA_ADMIN_RSS_L2_DA = 0, ENA_ADMIN_RSS_L2_DA = BIT(0),
/* Ethernet Src Addr */ /* Ethernet Src Addr */
ENA_ADMIN_RSS_L2_SA = 1, ENA_ADMIN_RSS_L2_SA = BIT(1),
/* ipv4/6 Dest Addr */ /* ipv4/6 Dest Addr */
ENA_ADMIN_RSS_L3_DA = 2, ENA_ADMIN_RSS_L3_DA = BIT(2),
/* ipv4/6 Src Addr */ /* ipv4/6 Src Addr */
ENA_ADMIN_RSS_L3_SA = 5, ENA_ADMIN_RSS_L3_SA = BIT(3),
/* tcp/udp Dest Port */ /* tcp/udp Dest Port */
ENA_ADMIN_RSS_L4_DP = 6, ENA_ADMIN_RSS_L4_DP = BIT(4),
/* tcp/udp Src Port */ /* tcp/udp Src Port */
ENA_ADMIN_RSS_L4_SP = 7, ENA_ADMIN_RSS_L4_SP = BIT(5),
}; };
struct ena_admin_proto_input { struct ena_admin_proto_input {
...@@ -873,6 +873,14 @@ struct ena_admin_aenq_link_change_desc { ...@@ -873,6 +873,14 @@ struct ena_admin_aenq_link_change_desc {
u32 flags; u32 flags;
}; };
struct ena_admin_aenq_keep_alive_desc {
struct ena_admin_aenq_common_desc aenq_common_desc;
u32 rx_drops_low;
u32 rx_drops_high;
};
struct ena_admin_ena_mmio_req_read_less_resp { struct ena_admin_ena_mmio_req_read_less_resp {
u16 req_id; u16 req_id;
......
...@@ -36,9 +36,9 @@ ...@@ -36,9 +36,9 @@
/*****************************************************************************/ /*****************************************************************************/
/* Timeout in micro-sec */ /* Timeout in micro-sec */
#define ADMIN_CMD_TIMEOUT_US (1000000) #define ADMIN_CMD_TIMEOUT_US (3000000)
#define ENA_ASYNC_QUEUE_DEPTH 4 #define ENA_ASYNC_QUEUE_DEPTH 16
#define ENA_ADMIN_QUEUE_DEPTH 32 #define ENA_ADMIN_QUEUE_DEPTH 32
#define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \ #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
...@@ -784,7 +784,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, ...@@ -784,7 +784,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
int ret; int ret;
if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
pr_info("Feature %d isn't supported\n", feature_id); pr_debug("Feature %d isn't supported\n", feature_id);
return -EPERM; return -EPERM;
} }
...@@ -1126,7 +1126,13 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue, ...@@ -1126,7 +1126,13 @@ int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size, comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
comp, comp_size); comp, comp_size);
if (unlikely(IS_ERR(comp_ctx))) { if (unlikely(IS_ERR(comp_ctx))) {
pr_err("Failed to submit command [%ld]\n", PTR_ERR(comp_ctx)); if (comp_ctx == ERR_PTR(-ENODEV))
pr_debug("Failed to submit command [%ld]\n",
PTR_ERR(comp_ctx));
else
pr_err("Failed to submit command [%ld]\n",
PTR_ERR(comp_ctx));
return PTR_ERR(comp_ctx); return PTR_ERR(comp_ctx);
} }
...@@ -1895,7 +1901,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) ...@@ -1895,7 +1901,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
int ret; int ret;
if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
pr_info("Feature %d isn't supported\n", ENA_ADMIN_MTU); pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
return -EPERM; return -EPERM;
} }
...@@ -1948,8 +1954,8 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) ...@@ -1948,8 +1954,8 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
if (!ena_com_check_supported_feature_id(ena_dev, if (!ena_com_check_supported_feature_id(ena_dev,
ENA_ADMIN_RSS_HASH_FUNCTION)) { ENA_ADMIN_RSS_HASH_FUNCTION)) {
pr_info("Feature %d isn't supported\n", pr_debug("Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_FUNCTION); ENA_ADMIN_RSS_HASH_FUNCTION);
return -EPERM; return -EPERM;
} }
...@@ -2112,7 +2118,8 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) ...@@ -2112,7 +2118,8 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
if (!ena_com_check_supported_feature_id(ena_dev, if (!ena_com_check_supported_feature_id(ena_dev,
ENA_ADMIN_RSS_HASH_INPUT)) { ENA_ADMIN_RSS_HASH_INPUT)) {
pr_info("Feature %d isn't supported\n", ENA_ADMIN_RSS_HASH_INPUT); pr_debug("Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_INPUT);
return -EPERM; return -EPERM;
} }
...@@ -2184,7 +2191,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) ...@@ -2184,7 +2191,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA; ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields = hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA; ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) { for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
...@@ -2270,8 +2277,8 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) ...@@ -2270,8 +2277,8 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
if (!ena_com_check_supported_feature_id( if (!ena_com_check_supported_feature_id(
ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) { ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
pr_info("Feature %d isn't supported\n", pr_debug("Feature %d isn't supported\n",
ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
return -EPERM; return -EPERM;
} }
...@@ -2444,11 +2451,9 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) ...@@ -2444,11 +2451,9 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
int ret; int ret;
if (!ena_com_check_supported_feature_id(ena_dev, /* Host attribute config is called before ena_com_get_dev_attr_feat
ENA_ADMIN_HOST_ATTR_CONFIG)) { * so ena_com can't check if the feature is supported.
pr_warn("Set host attribute isn't supported\n"); */
return -EPERM;
}
memset(&cmd, 0x0, sizeof(cmd)); memset(&cmd, 0x0, sizeof(cmd));
admin_queue = &ena_dev->admin_queue; admin_queue = &ena_dev->admin_queue;
...@@ -2542,8 +2547,8 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) ...@@ -2542,8 +2547,8 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
if (rc) { if (rc) {
if (rc == -EPERM) { if (rc == -EPERM) {
pr_info("Feature %d isn't supported\n", pr_debug("Feature %d isn't supported\n",
ENA_ADMIN_INTERRUPT_MODERATION); ENA_ADMIN_INTERRUPT_MODERATION);
rc = 0; rc = 0;
} else { } else {
pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n", pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#ifndef ENA_COM #ifndef ENA_COM
#define ENA_COM #define ENA_COM
#include <linux/compiler.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/gfp.h> #include <linux/gfp.h>
......
...@@ -45,7 +45,7 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( ...@@ -45,7 +45,7 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ (head_masked * io_cq->cdesc_entry_size_in_bytes)); + (head_masked * io_cq->cdesc_entry_size_in_bytes));
desc_phase = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >> desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT; ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
if (desc_phase != expected_phase) if (desc_phase != expected_phase)
...@@ -141,7 +141,7 @@ static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq, ...@@ -141,7 +141,7 @@ static inline u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
ena_com_cq_inc_head(io_cq); ena_com_cq_inc_head(io_cq);
count++; count++;
last = (cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >> last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT; ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
} while (!last); } while (!last);
...@@ -489,13 +489,13 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) ...@@ -489,13 +489,13 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
* expected, it mean that the device still didn't update * expected, it mean that the device still didn't update
* this completion. * this completion.
*/ */
cdesc_phase = cdesc->flags & ENA_ETH_IO_TX_CDESC_PHASE_MASK; cdesc_phase = READ_ONCE(cdesc->flags) & ENA_ETH_IO_TX_CDESC_PHASE_MASK;
if (cdesc_phase != expected_phase) if (cdesc_phase != expected_phase)
return -EAGAIN; return -EAGAIN;
ena_com_cq_inc_head(io_cq); ena_com_cq_inc_head(io_cq);
*req_id = cdesc->req_id; *req_id = READ_ONCE(cdesc->req_id);
return 0; return 0;
} }
...@@ -80,14 +80,18 @@ static void ena_tx_timeout(struct net_device *dev) ...@@ -80,14 +80,18 @@ static void ena_tx_timeout(struct net_device *dev)
{ {
struct ena_adapter *adapter = netdev_priv(dev); struct ena_adapter *adapter = netdev_priv(dev);
/* Change the state of the device to trigger reset
* Check that we are not in the middle or a trigger already
*/
if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return;
u64_stats_update_begin(&adapter->syncp); u64_stats_update_begin(&adapter->syncp);
adapter->dev_stats.tx_timeout++; adapter->dev_stats.tx_timeout++;
u64_stats_update_end(&adapter->syncp); u64_stats_update_end(&adapter->syncp);
netif_err(adapter, tx_err, dev, "Transmit time out\n"); netif_err(adapter, tx_err, dev, "Transmit time out\n");
/* Change the state of the device to trigger reset */
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
} }
static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu) static void update_rx_ring_mtu(struct ena_adapter *adapter, int mtu)
...@@ -559,6 +563,7 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter) ...@@ -559,6 +563,7 @@ static void ena_free_all_rx_bufs(struct ena_adapter *adapter)
*/ */
static void ena_free_tx_bufs(struct ena_ring *tx_ring) static void ena_free_tx_bufs(struct ena_ring *tx_ring)
{ {
bool print_once = true;
u32 i; u32 i;
for (i = 0; i < tx_ring->ring_size; i++) { for (i = 0; i < tx_ring->ring_size; i++) {
...@@ -570,9 +575,16 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring) ...@@ -570,9 +575,16 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
if (!tx_info->skb) if (!tx_info->skb)
continue; continue;
netdev_notice(tx_ring->netdev, if (print_once) {
"free uncompleted tx skb qid %d idx 0x%x\n", netdev_notice(tx_ring->netdev,
tx_ring->qid, i); "free uncompleted tx skb qid %d idx 0x%x\n",
tx_ring->qid, i);
print_once = false;
} else {
netdev_dbg(tx_ring->netdev,
"free uncompleted tx skb qid %d idx 0x%x\n",
tx_ring->qid, i);
}
ena_buf = tx_info->bufs; ena_buf = tx_info->bufs;
dma_unmap_single(tx_ring->dev, dma_unmap_single(tx_ring->dev,
...@@ -1109,7 +1121,8 @@ static int ena_io_poll(struct napi_struct *napi, int budget) ...@@ -1109,7 +1121,8 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER; tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER;
if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) {
napi_complete_done(napi, 0); napi_complete_done(napi, 0);
return 0; return 0;
} }
...@@ -1117,26 +1130,40 @@ static int ena_io_poll(struct napi_struct *napi, int budget) ...@@ -1117,26 +1130,40 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget);
rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget); rx_work_done = ena_clean_rx_irq(rx_ring, napi, budget);
if ((budget > rx_work_done) && (tx_budget > tx_work_done)) { /* If the device is about to reset or down, avoid unmask
napi_complete_done(napi, rx_work_done); * the interrupt and return 0 so NAPI won't reschedule
*/
if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) ||
test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) {
napi_complete_done(napi, 0);
ret = 0;
} else if ((budget > rx_work_done) && (tx_budget > tx_work_done)) {
napi_comp_call = 1; napi_comp_call = 1;
/* Tx and Rx share the same interrupt vector */
if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
ena_adjust_intr_moderation(rx_ring, tx_ring);
/* Update intr register: rx intr delay, tx intr delay and /* Update numa and unmask the interrupt only when schedule
* interrupt unmask * from the interrupt context (vs from sk_busy_loop)
*/ */
ena_com_update_intr_reg(&intr_reg, if (napi_complete_done(napi, rx_work_done)) {
rx_ring->smoothed_interval, /* Tx and Rx share the same interrupt vector */
tx_ring->smoothed_interval, if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
true); ena_adjust_intr_moderation(rx_ring, tx_ring);
/* Update intr register: rx intr delay,
* tx intr delay and interrupt unmask
*/
ena_com_update_intr_reg(&intr_reg,
rx_ring->smoothed_interval,
tx_ring->smoothed_interval,
true);
/* It is a shared MSI-X.
* Tx and Rx CQ have pointer to it.
* So we use one of them to reach the intr reg
*/
ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
}
/* It is a shared MSI-X. Tx and Rx CQ have pointer to it.
* So we use one of them to reach the intr reg
*/
ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg);
ena_update_ring_numa_node(tx_ring, rx_ring); ena_update_ring_numa_node(tx_ring, rx_ring);
...@@ -1698,12 +1725,22 @@ static void ena_down(struct ena_adapter *adapter) ...@@ -1698,12 +1725,22 @@ static void ena_down(struct ena_adapter *adapter)
adapter->dev_stats.interface_down++; adapter->dev_stats.interface_down++;
u64_stats_update_end(&adapter->syncp); u64_stats_update_end(&adapter->syncp);
/* After this point the napi handler won't enable the tx queue */
ena_napi_disable_all(adapter);
netif_carrier_off(adapter->netdev); netif_carrier_off(adapter->netdev);
netif_tx_disable(adapter->netdev); netif_tx_disable(adapter->netdev);
/* After this point the napi handler won't enable the tx queue */
ena_napi_disable_all(adapter);
/* After destroy the queue there won't be any new interrupts */ /* After destroy the queue there won't be any new interrupts */
if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
int rc;
rc = ena_com_dev_reset(adapter->ena_dev);
if (rc)
dev_err(&adapter->pdev->dev, "Device reset failed\n");
}
ena_destroy_all_io_queues(adapter); ena_destroy_all_io_queues(adapter);
ena_disable_io_intr_sync(adapter); ena_disable_io_intr_sync(adapter);
...@@ -2065,6 +2102,14 @@ static void ena_netpoll(struct net_device *netdev) ...@@ -2065,6 +2102,14 @@ static void ena_netpoll(struct net_device *netdev)
struct ena_adapter *adapter = netdev_priv(netdev); struct ena_adapter *adapter = netdev_priv(netdev);
int i; int i;
/* Dont schedule NAPI if the driver is in the middle of reset
* or netdev is down.
*/
if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return;
for (i = 0; i < adapter->num_queues; i++) for (i = 0; i < adapter->num_queues; i++)
napi_schedule(&adapter->ena_napi[i].napi); napi_schedule(&adapter->ena_napi[i].napi);
} }
...@@ -2169,28 +2214,46 @@ static void ena_get_stats64(struct net_device *netdev, ...@@ -2169,28 +2214,46 @@ static void ena_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats) struct rtnl_link_stats64 *stats)
{ {
struct ena_adapter *adapter = netdev_priv(netdev); struct ena_adapter *adapter = netdev_priv(netdev);
struct ena_admin_basic_stats ena_stats; struct ena_ring *rx_ring, *tx_ring;
int rc; unsigned int start;
u64 rx_drops;
int i;
if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
return; return;
rc = ena_com_get_dev_basic_stats(adapter->ena_dev, &ena_stats); for (i = 0; i < adapter->num_queues; i++) {
if (rc) u64 bytes, packets;
return;
stats->tx_bytes = ((u64)ena_stats.tx_bytes_high << 32) | tx_ring = &adapter->tx_ring[i];
ena_stats.tx_bytes_low;
stats->rx_bytes = ((u64)ena_stats.rx_bytes_high << 32) | do {
ena_stats.rx_bytes_low; start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
packets = tx_ring->tx_stats.cnt;
bytes = tx_ring->tx_stats.bytes;
} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
stats->tx_packets += packets;
stats->tx_bytes += bytes;
rx_ring = &adapter->rx_ring[i];
do {
start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
packets = rx_ring->rx_stats.cnt;
bytes = rx_ring->rx_stats.bytes;
} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
stats->rx_packets = ((u64)ena_stats.rx_pkts_high << 32) | stats->rx_packets += packets;
ena_stats.rx_pkts_low; stats->rx_bytes += bytes;
stats->tx_packets = ((u64)ena_stats.tx_pkts_high << 32) | }
ena_stats.tx_pkts_low;
stats->rx_dropped = ((u64)ena_stats.rx_drops_high << 32) | do {
ena_stats.rx_drops_low; start = u64_stats_fetch_begin_irq(&adapter->syncp);
rx_drops = adapter->dev_stats.rx_drops;
} while (u64_stats_fetch_retry_irq(&adapter->syncp, start));
stats->rx_dropped = rx_drops;
stats->multicast = 0; stats->multicast = 0;
stats->collisions = 0; stats->collisions = 0;
...@@ -2351,6 +2414,8 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, ...@@ -2351,6 +2414,8 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
*/ */
ena_com_set_admin_polling_mode(ena_dev, true); ena_com_set_admin_polling_mode(ena_dev, true);
ena_config_host_info(ena_dev);
/* Get Device Attributes*/ /* Get Device Attributes*/
rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx);
if (rc) { if (rc) {
...@@ -2375,11 +2440,10 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, ...@@ -2375,11 +2440,10 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
*wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE)); *wd_state = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
ena_config_host_info(ena_dev);
return 0; return 0;
err_admin_init: err_admin_init:
ena_com_delete_host_info(ena_dev);
ena_com_admin_destroy(ena_dev); ena_com_admin_destroy(ena_dev);
err_mmio_read_less: err_mmio_read_less:
ena_com_mmio_reg_read_request_destroy(ena_dev); ena_com_mmio_reg_read_request_destroy(ena_dev);
...@@ -2431,6 +2495,14 @@ static void ena_fw_reset_device(struct work_struct *work) ...@@ -2431,6 +2495,14 @@ static void ena_fw_reset_device(struct work_struct *work)
bool dev_up, wd_state; bool dev_up, wd_state;
int rc; int rc;
if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
dev_err(&pdev->dev,
"device reset schedule while reset bit is off\n");
return;
}
netif_carrier_off(netdev);
del_timer_sync(&adapter->timer_service); del_timer_sync(&adapter->timer_service);
rtnl_lock(); rtnl_lock();
...@@ -2444,12 +2516,6 @@ static void ena_fw_reset_device(struct work_struct *work) ...@@ -2444,12 +2516,6 @@ static void ena_fw_reset_device(struct work_struct *work)
*/ */
ena_close(netdev); ena_close(netdev);
rc = ena_com_dev_reset(ena_dev);
if (rc) {
dev_err(&pdev->dev, "Device reset failed\n");
goto err;
}
ena_free_mgmnt_irq(adapter); ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter); ena_disable_msix(adapter);
...@@ -2462,6 +2528,8 @@ static void ena_fw_reset_device(struct work_struct *work) ...@@ -2462,6 +2528,8 @@ static void ena_fw_reset_device(struct work_struct *work)
ena_com_mmio_reg_read_request_destroy(ena_dev); ena_com_mmio_reg_read_request_destroy(ena_dev);
clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
/* Finish with the destroy part. Start the init part */ /* Finish with the destroy part. Start the init part */
rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state); rc = ena_device_init(ena_dev, adapter->pdev, &get_feat_ctx, &wd_state);
...@@ -2507,6 +2575,8 @@ static void ena_fw_reset_device(struct work_struct *work) ...@@ -2507,6 +2575,8 @@ static void ena_fw_reset_device(struct work_struct *work)
err: err:
rtnl_unlock(); rtnl_unlock();
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
dev_err(&pdev->dev, dev_err(&pdev->dev,
"Reset attempt failed. Can not reset the device\n"); "Reset attempt failed. Can not reset the device\n");
} }
...@@ -2525,6 +2595,9 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter) ...@@ -2525,6 +2595,9 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
return; return;
if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return;
budget = ENA_MONITORED_TX_QUEUES; budget = ENA_MONITORED_TX_QUEUES;
for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
...@@ -2624,7 +2697,7 @@ static void ena_timer_service(unsigned long data) ...@@ -2624,7 +2697,7 @@ static void ena_timer_service(unsigned long data)
if (host_info) if (host_info)
ena_update_host_info(host_info, adapter->netdev); ena_update_host_info(host_info, adapter->netdev);
if (unlikely(test_and_clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) { if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
netif_err(adapter, drv, adapter->netdev, netif_err(adapter, drv, adapter->netdev,
"Trigger reset is on\n"); "Trigger reset is on\n");
ena_dump_stats_to_dmesg(adapter); ena_dump_stats_to_dmesg(adapter);
...@@ -2658,7 +2731,7 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev, ...@@ -2658,7 +2731,7 @@ static int ena_calc_io_queue_num(struct pci_dev *pdev,
io_sq_num = get_feat_ctx->max_queues.max_sq_num; io_sq_num = get_feat_ctx->max_queues.max_sq_num;
} }
io_queue_num = min_t(int, num_possible_cpus(), ENA_MAX_NUM_IO_QUEUES); io_queue_num = min_t(int, num_online_cpus(), ENA_MAX_NUM_IO_QUEUES);
io_queue_num = min_t(int, io_queue_num, io_sq_num); io_queue_num = min_t(int, io_queue_num, io_sq_num);
io_queue_num = min_t(int, io_queue_num, io_queue_num = min_t(int, io_queue_num,
get_feat_ctx->max_queues.max_cq_num); get_feat_ctx->max_queues.max_cq_num);
...@@ -2720,7 +2793,6 @@ static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat, ...@@ -2720,7 +2793,6 @@ static void ena_set_dev_offloads(struct ena_com_dev_get_features_ctx *feat,
netdev->features = netdev->features =
dev_features | dev_features |
NETIF_F_SG | NETIF_F_SG |
NETIF_F_NTUPLE |
NETIF_F_RXHASH | NETIF_F_RXHASH |
NETIF_F_HIGHDMA; NETIF_F_HIGHDMA;
...@@ -3116,7 +3188,9 @@ static void ena_remove(struct pci_dev *pdev) ...@@ -3116,7 +3188,9 @@ static void ena_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->resume_io_task); cancel_work_sync(&adapter->resume_io_task);
ena_com_dev_reset(ena_dev); /* Reset the device only if the device is running. */
if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
ena_com_dev_reset(ena_dev);
ena_free_mgmnt_irq(adapter); ena_free_mgmnt_irq(adapter);
......
...@@ -44,7 +44,7 @@ ...@@ -44,7 +44,7 @@
#include "ena_eth_com.h" #include "ena_eth_com.h"
#define DRV_MODULE_VER_MAJOR 1 #define DRV_MODULE_VER_MAJOR 1
#define DRV_MODULE_VER_MINOR 0 #define DRV_MODULE_VER_MINOR 1
#define DRV_MODULE_VER_SUBMINOR 2 #define DRV_MODULE_VER_SUBMINOR 2
#define DRV_MODULE_NAME "ena" #define DRV_MODULE_NAME "ena"
...@@ -100,7 +100,7 @@ ...@@ -100,7 +100,7 @@
/* Number of queues to check for missing queues per timer service */ /* Number of queues to check for missing queues per timer service */
#define ENA_MONITORED_TX_QUEUES 4 #define ENA_MONITORED_TX_QUEUES 4
/* Max timeout packets before device reset */ /* Max timeout packets before device reset */
#define MAX_NUM_OF_TIMEOUTED_PACKETS 32 #define MAX_NUM_OF_TIMEOUTED_PACKETS 128
#define ENA_TX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1)) #define ENA_TX_RING_IDX_NEXT(idx, ring_size) (((idx) + 1) & ((ring_size) - 1))
...@@ -116,9 +116,9 @@ ...@@ -116,9 +116,9 @@
#define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q)) #define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q))
/* ENA device should send keep alive msg every 1 sec. /* ENA device should send keep alive msg every 1 sec.
* We wait for 3 sec just to be on the safe side. * We wait for 6 sec just to be on the safe side.
*/ */
#define ENA_DEVICE_KALIVE_TIMEOUT (3 * HZ) #define ENA_DEVICE_KALIVE_TIMEOUT (6 * HZ)
#define ENA_MMIO_DISABLE_REG_READ BIT(0) #define ENA_MMIO_DISABLE_REG_READ BIT(0)
...@@ -241,6 +241,7 @@ struct ena_stats_dev { ...@@ -241,6 +241,7 @@ struct ena_stats_dev {
u64 interface_up; u64 interface_up;
u64 interface_down; u64 interface_down;
u64 admin_q_pause; u64 admin_q_pause;
u64 rx_drops;
}; };
enum ena_flags_t { enum ena_flags_t {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment