Commit 408cf83f authored by David S. Miller's avatar David S. Miller

Merge branch 'ena-new-features-and-improvements'

Netanel Belgazal says:

====================
net: update ena ethernet driver to version 1.2.0

This patchset contains some new features/improvements that were added
to the ENA driver to increase its robustness and are based on
experience of wide ENA deployment.

Change log:

V2:
* Remove patch that add inline to C-file static function (contradict coding style).
* Remove patch that moves MTU parameter validation in ena_change_mtu() instead of
using the network stack.
* Use upper_32_bits()/lower_32_bits() instead of casting.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 926f38e9 85238999
...@@ -70,6 +70,8 @@ enum ena_admin_aq_feature_id { ...@@ -70,6 +70,8 @@ enum ena_admin_aq_feature_id {
ENA_ADMIN_MAX_QUEUES_NUM = 2, ENA_ADMIN_MAX_QUEUES_NUM = 2,
ENA_ADMIN_HW_HINTS = 3,
ENA_ADMIN_RSS_HASH_FUNCTION = 10, ENA_ADMIN_RSS_HASH_FUNCTION = 10,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11, ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
...@@ -749,6 +751,31 @@ struct ena_admin_feature_rss_ind_table { ...@@ -749,6 +751,31 @@ struct ena_admin_feature_rss_ind_table {
struct ena_admin_rss_ind_table_entry inline_entry; struct ena_admin_rss_ind_table_entry inline_entry;
}; };
/* When hint value is 0, driver should use it's own predefined value */
struct ena_admin_ena_hw_hints {
/* value in ms */
u16 mmio_read_timeout;
/* value in ms */
u16 driver_watchdog_timeout;
/* Per packet tx completion timeout. value in ms */
u16 missing_tx_completion_timeout;
u16 missed_tx_completion_count_threshold_to_reset;
/* value in ms */
u16 admin_completion_tx_timeout;
u16 netdev_wd_timeout;
u16 max_tx_sgl_size;
u16 max_rx_sgl_size;
u16 reserved[8];
};
struct ena_admin_get_feat_cmd { struct ena_admin_get_feat_cmd {
struct ena_admin_aq_common_desc aq_common_descriptor; struct ena_admin_aq_common_desc aq_common_descriptor;
...@@ -782,6 +809,8 @@ struct ena_admin_get_feat_resp { ...@@ -782,6 +809,8 @@ struct ena_admin_get_feat_resp {
struct ena_admin_feature_rss_ind_table ind_table; struct ena_admin_feature_rss_ind_table ind_table;
struct ena_admin_feature_intr_moder_desc intr_moderation; struct ena_admin_feature_intr_moder_desc intr_moderation;
struct ena_admin_ena_hw_hints hw_hints;
} u; } u;
}; };
...@@ -857,6 +886,8 @@ enum ena_admin_aenq_notification_syndrom { ...@@ -857,6 +886,8 @@ enum ena_admin_aenq_notification_syndrom {
ENA_ADMIN_SUSPEND = 0, ENA_ADMIN_SUSPEND = 0,
ENA_ADMIN_RESUME = 1, ENA_ADMIN_RESUME = 1,
ENA_ADMIN_UPDATE_HINTS = 2,
}; };
struct ena_admin_aenq_entry { struct ena_admin_aenq_entry {
......
...@@ -99,8 +99,8 @@ static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev, ...@@ -99,8 +99,8 @@ static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
return -EINVAL; return -EINVAL;
} }
ena_addr->mem_addr_low = (u32)addr; ena_addr->mem_addr_low = lower_32_bits(addr);
ena_addr->mem_addr_high = (u64)addr >> 32; ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
return 0; return 0;
} }
...@@ -329,7 +329,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, ...@@ -329,7 +329,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size_t size; size_t size;
int dev_node = 0; int dev_node = 0;
memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr)); memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
io_sq->desc_entry_size = io_sq->desc_entry_size =
(io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
...@@ -383,7 +383,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, ...@@ -383,7 +383,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
size_t size; size_t size;
int prev_node = 0; int prev_node = 0;
memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr)); memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
/* Use the basic completion descriptor for Rx */ /* Use the basic completion descriptor for Rx */
io_cq->cdesc_entry_size_in_bytes = io_cq->cdesc_entry_size_in_bytes =
...@@ -494,7 +494,7 @@ static int ena_com_comp_status_to_errno(u8 comp_status) ...@@ -494,7 +494,7 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE: case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
return -ENOMEM; return -ENOMEM;
case ENA_ADMIN_UNSUPPORTED_OPCODE: case ENA_ADMIN_UNSUPPORTED_OPCODE:
return -EPERM; return -EOPNOTSUPP;
case ENA_ADMIN_BAD_OPCODE: case ENA_ADMIN_BAD_OPCODE:
case ENA_ADMIN_MALFORMED_REQUEST: case ENA_ADMIN_MALFORMED_REQUEST:
case ENA_ADMIN_ILLEGAL_PARAMETER: case ENA_ADMIN_ILLEGAL_PARAMETER:
...@@ -511,7 +511,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c ...@@ -511,7 +511,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
unsigned long flags, timeout; unsigned long flags, timeout;
int ret; int ret;
timeout = jiffies + ADMIN_CMD_TIMEOUT_US; timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
while (1) { while (1) {
spin_lock_irqsave(&admin_queue->q_lock, flags); spin_lock_irqsave(&admin_queue->q_lock, flags);
...@@ -561,7 +561,8 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com ...@@ -561,7 +561,8 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
int ret; int ret;
wait_for_completion_timeout(&comp_ctx->wait_event, wait_for_completion_timeout(&comp_ctx->wait_event,
usecs_to_jiffies(ADMIN_CMD_TIMEOUT_US)); usecs_to_jiffies(
admin_queue->completion_timeout));
/* In case the command wasn't completed find out the root cause. /* In case the command wasn't completed find out the root cause.
* There might be 2 kinds of errors * There might be 2 kinds of errors
...@@ -601,12 +602,15 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) ...@@ -601,12 +602,15 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp = volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
mmio_read->read_resp; mmio_read->read_resp;
u32 mmio_read_reg, ret; u32 mmio_read_reg, ret, i;
unsigned long flags; unsigned long flags;
int i; u32 timeout = mmio_read->reg_read_to;
might_sleep(); might_sleep();
if (timeout == 0)
timeout = ENA_REG_READ_TIMEOUT;
/* If readless is disabled, perform regular read */ /* If readless is disabled, perform regular read */
if (!mmio_read->readless_supported) if (!mmio_read->readless_supported)
return readl(ena_dev->reg_bar + offset); return readl(ena_dev->reg_bar + offset);
...@@ -627,14 +631,14 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) ...@@ -627,14 +631,14 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
for (i = 0; i < ENA_REG_READ_TIMEOUT; i++) { for (i = 0; i < timeout; i++) {
if (read_resp->req_id == mmio_read->seq_num) if (read_resp->req_id == mmio_read->seq_num)
break; break;
udelay(1); udelay(1);
} }
if (unlikely(i == ENA_REG_READ_TIMEOUT)) { if (unlikely(i == timeout)) {
pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n", pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
mmio_read->seq_num, offset, read_resp->req_id, mmio_read->seq_num, offset, read_resp->req_id,
read_resp->reg_off); read_resp->reg_off);
...@@ -681,7 +685,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, ...@@ -681,7 +685,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
u8 direction; u8 direction;
int ret; int ret;
memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd)); memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
direction = ENA_ADMIN_SQ_DIRECTION_TX; direction = ENA_ADMIN_SQ_DIRECTION_TX;
...@@ -786,7 +790,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, ...@@ -786,7 +790,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
pr_debug("Feature %d isn't supported\n", feature_id); pr_debug("Feature %d isn't supported\n", feature_id);
return -EPERM; return -EOPNOTSUPP;
} }
memset(&get_cmd, 0x0, sizeof(get_cmd)); memset(&get_cmd, 0x0, sizeof(get_cmd));
...@@ -963,7 +967,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, ...@@ -963,7 +967,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
u8 direction; u8 direction;
int ret; int ret;
memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd)); memset(&create_cmd, 0x0, sizeof(create_cmd));
create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ; create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
...@@ -1155,7 +1159,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev, ...@@ -1155,7 +1159,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
struct ena_admin_acq_create_cq_resp_desc cmd_completion; struct ena_admin_acq_create_cq_resp_desc cmd_completion;
int ret; int ret;
memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd)); memset(&create_cmd, 0x0, sizeof(create_cmd));
create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ; create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
...@@ -1263,7 +1267,7 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, ...@@ -1263,7 +1267,7 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
struct ena_admin_acq_destroy_cq_resp_desc destroy_resp; struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
int ret; int ret;
memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd)); memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
destroy_cmd.cq_idx = io_cq->idx; destroy_cmd.cq_idx = io_cq->idx;
destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ; destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
...@@ -1324,7 +1328,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) ...@@ -1324,7 +1328,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) { if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n", pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
get_resp.u.aenq.supported_groups, groups_flag); get_resp.u.aenq.supported_groups, groups_flag);
return -EPERM; return -EOPNOTSUPP;
} }
memset(&cmd, 0x0, sizeof(cmd)); memset(&cmd, 0x0, sizeof(cmd));
...@@ -1619,8 +1623,8 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev, ...@@ -1619,8 +1623,8 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
io_sq = &ena_dev->io_sq_queues[ctx->qid]; io_sq = &ena_dev->io_sq_queues[ctx->qid];
io_cq = &ena_dev->io_cq_queues[ctx->qid]; io_cq = &ena_dev->io_cq_queues[ctx->qid];
memset(io_sq, 0x0, sizeof(struct ena_com_io_sq)); memset(io_sq, 0x0, sizeof(*io_sq));
memset(io_cq, 0x0, sizeof(struct ena_com_io_cq)); memset(io_cq, 0x0, sizeof(*io_cq));
/* Init CQ */ /* Init CQ */
io_cq->q_depth = ctx->queue_size; io_cq->q_depth = ctx->queue_size;
...@@ -1730,6 +1734,20 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, ...@@ -1730,6 +1734,20 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
memcpy(&get_feat_ctx->offload, &get_resp.u.offload, memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
sizeof(get_resp.u.offload)); sizeof(get_resp.u.offload));
/* Driver hints isn't mandatory admin command. So in case the
* command isn't supported set driver hints to 0
*/
rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
if (!rc)
memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
sizeof(get_resp.u.hw_hints));
else if (rc == -EOPNOTSUPP)
memset(&get_feat_ctx->hw_hints, 0x0,
sizeof(get_feat_ctx->hw_hints));
else
return rc;
return 0; return 0;
} }
...@@ -1807,7 +1825,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) ...@@ -1807,7 +1825,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
writel((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); writel((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
} }
int ena_com_dev_reset(struct ena_com_dev *ena_dev) int ena_com_dev_reset(struct ena_com_dev *ena_dev,
enum ena_regs_reset_reason_types reset_reason)
{ {
u32 stat, timeout, cap, reset_val; u32 stat, timeout, cap, reset_val;
int rc; int rc;
...@@ -1835,6 +1854,8 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev) ...@@ -1835,6 +1854,8 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev)
/* start reset */ /* start reset */
reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK; reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
ENA_REGS_DEV_CTL_RESET_REASON_MASK;
writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
/* Write again the MMIO read request address */ /* Write again the MMIO read request address */
...@@ -1855,6 +1876,14 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev) ...@@ -1855,6 +1876,14 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev)
return rc; return rc;
} }
timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
if (timeout)
/* the resolution of timeout reg is 100ms */
ena_dev->admin_queue.completion_timeout = timeout * 100000;
else
ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
return 0; return 0;
} }
...@@ -1909,7 +1938,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu) ...@@ -1909,7 +1938,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU); pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
return -EPERM; return -EOPNOTSUPP;
} }
memset(&cmd, 0x0, sizeof(cmd)); memset(&cmd, 0x0, sizeof(cmd));
...@@ -1963,7 +1992,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) ...@@ -1963,7 +1992,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
ENA_ADMIN_RSS_HASH_FUNCTION)) { ENA_ADMIN_RSS_HASH_FUNCTION)) {
pr_debug("Feature %d isn't supported\n", pr_debug("Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_FUNCTION); ENA_ADMIN_RSS_HASH_FUNCTION);
return -EPERM; return -EOPNOTSUPP;
} }
/* Validate hash function is supported */ /* Validate hash function is supported */
...@@ -1975,7 +2004,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) ...@@ -1975,7 +2004,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) { if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
pr_err("Func hash %d isn't supported by device, abort\n", pr_err("Func hash %d isn't supported by device, abort\n",
rss->hash_func); rss->hash_func);
return -EPERM; return -EOPNOTSUPP;
} }
memset(&cmd, 0x0, sizeof(cmd)); memset(&cmd, 0x0, sizeof(cmd));
...@@ -2034,7 +2063,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, ...@@ -2034,7 +2063,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) { if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
pr_err("Flow hash function %d isn't supported\n", func); pr_err("Flow hash function %d isn't supported\n", func);
return -EPERM; return -EOPNOTSUPP;
} }
switch (func) { switch (func) {
...@@ -2127,7 +2156,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) ...@@ -2127,7 +2156,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
ENA_ADMIN_RSS_HASH_INPUT)) { ENA_ADMIN_RSS_HASH_INPUT)) {
pr_debug("Feature %d isn't supported\n", pr_debug("Feature %d isn't supported\n",
ENA_ADMIN_RSS_HASH_INPUT); ENA_ADMIN_RSS_HASH_INPUT);
return -EPERM; return -EOPNOTSUPP;
} }
memset(&cmd, 0x0, sizeof(cmd)); memset(&cmd, 0x0, sizeof(cmd));
...@@ -2208,7 +2237,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev) ...@@ -2208,7 +2237,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n", pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
i, hash_ctrl->supported_fields[i].fields, i, hash_ctrl->supported_fields[i].fields,
hash_ctrl->selected_fields[i].fields); hash_ctrl->selected_fields[i].fields);
return -EPERM; return -EOPNOTSUPP;
} }
} }
...@@ -2286,7 +2315,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) ...@@ -2286,7 +2315,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) { ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
pr_debug("Feature %d isn't supported\n", pr_debug("Feature %d isn't supported\n",
ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG); ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
return -EPERM; return -EOPNOTSUPP;
} }
ret = ena_com_ind_tbl_convert_to_device(ena_dev); ret = ena_com_ind_tbl_convert_to_device(ena_dev);
...@@ -2553,7 +2582,7 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) ...@@ -2553,7 +2582,7 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
ENA_ADMIN_INTERRUPT_MODERATION); ENA_ADMIN_INTERRUPT_MODERATION);
if (rc) { if (rc) {
if (rc == -EPERM) { if (rc == -EOPNOTSUPP) {
pr_debug("Feature %d isn't supported\n", pr_debug("Feature %d isn't supported\n",
ENA_ADMIN_INTERRUPT_MODERATION); ENA_ADMIN_INTERRUPT_MODERATION);
rc = 0; rc = 0;
......
...@@ -97,6 +97,8 @@ ...@@ -97,6 +97,8 @@
#define ENA_INTR_MODER_LEVEL_STRIDE 2 #define ENA_INTR_MODER_LEVEL_STRIDE 2
#define ENA_INTR_BYTE_COUNT_NOT_SUPPORTED 0xFFFFFF #define ENA_INTR_BYTE_COUNT_NOT_SUPPORTED 0xFFFFFF
#define ENA_HW_HINTS_NO_TIMEOUT 0xFFFF
enum ena_intr_moder_level { enum ena_intr_moder_level {
ENA_INTR_MODER_LOWEST = 0, ENA_INTR_MODER_LOWEST = 0,
ENA_INTR_MODER_LOW, ENA_INTR_MODER_LOW,
...@@ -232,7 +234,9 @@ struct ena_com_stats_admin { ...@@ -232,7 +234,9 @@ struct ena_com_stats_admin {
struct ena_com_admin_queue { struct ena_com_admin_queue {
void *q_dmadev; void *q_dmadev;
spinlock_t q_lock; /* spinlock for the admin queue */ spinlock_t q_lock; /* spinlock for the admin queue */
struct ena_comp_ctx *comp_ctx; struct ena_comp_ctx *comp_ctx;
u32 completion_timeout;
u16 q_depth; u16 q_depth;
struct ena_com_admin_cq cq; struct ena_com_admin_cq cq;
struct ena_com_admin_sq sq; struct ena_com_admin_sq sq;
...@@ -267,6 +271,7 @@ struct ena_com_aenq { ...@@ -267,6 +271,7 @@ struct ena_com_aenq {
struct ena_com_mmio_read { struct ena_com_mmio_read {
struct ena_admin_ena_mmio_req_read_less_resp *read_resp; struct ena_admin_ena_mmio_req_read_less_resp *read_resp;
dma_addr_t read_resp_dma_addr; dma_addr_t read_resp_dma_addr;
u32 reg_read_to; /* in us */
u16 seq_num; u16 seq_num;
bool readless_supported; bool readless_supported;
/* spin lock to ensure a single outstanding read */ /* spin lock to ensure a single outstanding read */
...@@ -336,6 +341,7 @@ struct ena_com_dev_get_features_ctx { ...@@ -336,6 +341,7 @@ struct ena_com_dev_get_features_ctx {
struct ena_admin_device_attr_feature_desc dev_attr; struct ena_admin_device_attr_feature_desc dev_attr;
struct ena_admin_feature_aenq_desc aenq; struct ena_admin_feature_aenq_desc aenq;
struct ena_admin_feature_offload_desc offload; struct ena_admin_feature_offload_desc offload;
struct ena_admin_ena_hw_hints hw_hints;
}; };
struct ena_com_create_io_ctx { struct ena_com_create_io_ctx {
...@@ -414,10 +420,12 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev); ...@@ -414,10 +420,12 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev);
/* ena_com_dev_reset - Perform device FLR to the device. /* ena_com_dev_reset - Perform device FLR to the device.
* @ena_dev: ENA communication layer struct * @ena_dev: ENA communication layer struct
* @reset_reason: Specify what is the trigger for the reset in case of an error.
* *
* @return - 0 on success, negative value on failure. * @return - 0 on success, negative value on failure.
*/ */
int ena_com_dev_reset(struct ena_com_dev *ena_dev); int ena_com_dev_reset(struct ena_com_dev *ena_dev,
enum ena_regs_reset_reason_types reset_reason);
/* ena_com_create_io_queue - Create io queue. /* ena_com_create_io_queue - Create io queue.
* @ena_dev: ENA communication layer struct * @ena_dev: ENA communication layer struct
......
...@@ -493,6 +493,11 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) ...@@ -493,6 +493,11 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
if (cdesc_phase != expected_phase) if (cdesc_phase != expected_phase)
return -EAGAIN; return -EAGAIN;
if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
pr_err("Invalid req id %d\n", cdesc->req_id);
return -EINVAL;
}
ena_com_cq_inc_head(io_cq); ena_com_cq_inc_head(io_cq);
*req_id = READ_ONCE(cdesc->req_id); *req_id = READ_ONCE(cdesc->req_id);
......
...@@ -93,6 +93,7 @@ static const struct ena_stats ena_stats_rx_strings[] = { ...@@ -93,6 +93,7 @@ static const struct ena_stats ena_stats_rx_strings[] = {
ENA_STAT_RX_ENTRY(dma_mapping_err), ENA_STAT_RX_ENTRY(dma_mapping_err),
ENA_STAT_RX_ENTRY(bad_desc_num), ENA_STAT_RX_ENTRY(bad_desc_num),
ENA_STAT_RX_ENTRY(rx_copybreak_pkt), ENA_STAT_RX_ENTRY(rx_copybreak_pkt),
ENA_STAT_RX_ENTRY(bad_req_id),
ENA_STAT_RX_ENTRY(empty_rx_ring), ENA_STAT_RX_ENTRY(empty_rx_ring),
}; };
...@@ -539,12 +540,8 @@ static int ena_get_rss_hash(struct ena_com_dev *ena_dev, ...@@ -539,12 +540,8 @@ static int ena_get_rss_hash(struct ena_com_dev *ena_dev,
} }
rc = ena_com_get_hash_ctrl(ena_dev, proto, &hash_fields); rc = ena_com_get_hash_ctrl(ena_dev, proto, &hash_fields);
if (rc) { if (rc)
/* If device don't have permission, return unsupported */
if (rc == -EPERM)
rc = -EOPNOTSUPP;
return rc; return rc;
}
cmd->data = ena_flow_hash_to_flow_type(hash_fields); cmd->data = ena_flow_hash_to_flow_type(hash_fields);
...@@ -612,7 +609,7 @@ static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info) ...@@ -612,7 +609,7 @@ static int ena_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info)
rc = -EOPNOTSUPP; rc = -EOPNOTSUPP;
} }
return (rc == -EPERM) ? -EOPNOTSUPP : rc; return rc;
} }
static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
...@@ -638,7 +635,7 @@ static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info, ...@@ -638,7 +635,7 @@ static int ena_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
rc = -EOPNOTSUPP; rc = -EOPNOTSUPP;
} }
return (rc == -EPERM) ? -EOPNOTSUPP : rc; return rc;
} }
static u32 ena_get_rxfh_indir_size(struct net_device *netdev) static u32 ena_get_rxfh_indir_size(struct net_device *netdev)
......
...@@ -87,6 +87,7 @@ static void ena_tx_timeout(struct net_device *dev) ...@@ -87,6 +87,7 @@ static void ena_tx_timeout(struct net_device *dev)
if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return; return;
adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
u64_stats_update_begin(&adapter->syncp); u64_stats_update_begin(&adapter->syncp);
adapter->dev_stats.tx_timeout++; adapter->dev_stats.tx_timeout++;
u64_stats_update_end(&adapter->syncp); u64_stats_update_end(&adapter->syncp);
...@@ -303,6 +304,24 @@ static void ena_free_all_io_tx_resources(struct ena_adapter *adapter) ...@@ -303,6 +304,24 @@ static void ena_free_all_io_tx_resources(struct ena_adapter *adapter)
ena_free_tx_resources(adapter, i); ena_free_tx_resources(adapter, i);
} }
static inline int validate_rx_req_id(struct ena_ring *rx_ring, u16 req_id)
{
if (likely(req_id < rx_ring->ring_size))
return 0;
netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
"Invalid rx req_id: %hu\n", req_id);
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->rx_stats.bad_req_id++;
u64_stats_update_end(&rx_ring->syncp);
/* Trigger device reset */
rx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
set_bit(ENA_FLAG_TRIGGER_RESET, &rx_ring->adapter->flags);
return -EFAULT;
}
/* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors) /* ena_setup_rx_resources - allocate I/O Rx resources (Descriptors)
* @adapter: network interface device structure * @adapter: network interface device structure
* @qid: queue index * @qid: queue index
...@@ -314,7 +333,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter, ...@@ -314,7 +333,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
{ {
struct ena_ring *rx_ring = &adapter->rx_ring[qid]; struct ena_ring *rx_ring = &adapter->rx_ring[qid];
struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)]; struct ena_irq *ena_irq = &adapter->irq_tbl[ENA_IO_IRQ_IDX(qid)];
int size, node; int size, node, i;
if (rx_ring->rx_buffer_info) { if (rx_ring->rx_buffer_info) {
netif_err(adapter, ifup, adapter->netdev, netif_err(adapter, ifup, adapter->netdev,
...@@ -335,6 +354,20 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter, ...@@ -335,6 +354,20 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
return -ENOMEM; return -ENOMEM;
} }
size = sizeof(u16) * rx_ring->ring_size;
rx_ring->free_rx_ids = vzalloc_node(size, node);
if (!rx_ring->free_rx_ids) {
rx_ring->free_rx_ids = vzalloc(size);
if (!rx_ring->free_rx_ids) {
vfree(rx_ring->rx_buffer_info);
return -ENOMEM;
}
}
/* Req id ring for receiving RX pkts out of order */
for (i = 0; i < rx_ring->ring_size; i++)
rx_ring->free_rx_ids[i] = i;
/* Reset rx statistics */ /* Reset rx statistics */
memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats)); memset(&rx_ring->rx_stats, 0x0, sizeof(rx_ring->rx_stats));
...@@ -358,6 +391,9 @@ static void ena_free_rx_resources(struct ena_adapter *adapter, ...@@ -358,6 +391,9 @@ static void ena_free_rx_resources(struct ena_adapter *adapter,
vfree(rx_ring->rx_buffer_info); vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL; rx_ring->rx_buffer_info = NULL;
vfree(rx_ring->free_rx_ids);
rx_ring->free_rx_ids = NULL;
} }
/* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues /* ena_setup_all_rx_resources - allocate I/O Rx queues resources for all queues
...@@ -463,15 +499,22 @@ static void ena_free_rx_page(struct ena_ring *rx_ring, ...@@ -463,15 +499,22 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
{ {
u16 next_to_use; u16 next_to_use, req_id;
u32 i; u32 i;
int rc; int rc;
next_to_use = rx_ring->next_to_use; next_to_use = rx_ring->next_to_use;
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
struct ena_rx_buffer *rx_info = struct ena_rx_buffer *rx_info;
&rx_ring->rx_buffer_info[next_to_use];
req_id = rx_ring->free_rx_ids[next_to_use];
rc = validate_rx_req_id(rx_ring, req_id);
if (unlikely(rc < 0))
break;
rx_info = &rx_ring->rx_buffer_info[req_id];
rc = ena_alloc_rx_page(rx_ring, rx_info, rc = ena_alloc_rx_page(rx_ring, rx_info,
__GFP_COLD | GFP_ATOMIC | __GFP_COMP); __GFP_COLD | GFP_ATOMIC | __GFP_COMP);
...@@ -483,7 +526,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) ...@@ -483,7 +526,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
} }
rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq, rc = ena_com_add_single_rx_desc(rx_ring->ena_com_io_sq,
&rx_info->ena_buf, &rx_info->ena_buf,
next_to_use); req_id);
if (unlikely(rc)) { if (unlikely(rc)) {
netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev, netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
"failed to add buffer for rx queue %d\n", "failed to add buffer for rx queue %d\n",
...@@ -670,6 +713,7 @@ static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) ...@@ -670,6 +713,7 @@ static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id)
u64_stats_update_end(&tx_ring->syncp); u64_stats_update_end(&tx_ring->syncp);
/* Trigger device reset */ /* Trigger device reset */
tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags); set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags);
return -EFAULT; return -EFAULT;
} }
...@@ -781,19 +825,42 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) ...@@ -781,19 +825,42 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
return tx_pkts; return tx_pkts;
} }
static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, bool frags)
{
struct sk_buff *skb;
if (frags)
skb = napi_get_frags(rx_ring->napi);
else
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_copybreak);
if (unlikely(!skb)) {
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->rx_stats.skb_alloc_fail++;
u64_stats_update_end(&rx_ring->syncp);
netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
"Failed to allocate skb. frags: %d\n", frags);
return NULL;
}
return skb;
}
static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
struct ena_com_rx_buf_info *ena_bufs, struct ena_com_rx_buf_info *ena_bufs,
u32 descs, u32 descs,
u16 *next_to_clean) u16 *next_to_clean)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct ena_rx_buffer *rx_info = struct ena_rx_buffer *rx_info;
&rx_ring->rx_buffer_info[*next_to_clean]; u16 len, req_id, buf = 0;
u32 len;
u32 buf = 0;
void *va; void *va;
len = ena_bufs[0].len; len = ena_bufs[buf].len;
req_id = ena_bufs[buf].req_id;
rx_info = &rx_ring->rx_buffer_info[req_id];
if (unlikely(!rx_info->page)) { if (unlikely(!rx_info->page)) {
netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
"Page is NULL\n"); "Page is NULL\n");
...@@ -809,16 +876,9 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, ...@@ -809,16 +876,9 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
prefetch(va + NET_IP_ALIGN); prefetch(va + NET_IP_ALIGN);
if (len <= rx_ring->rx_copybreak) { if (len <= rx_ring->rx_copybreak) {
skb = netdev_alloc_skb_ip_align(rx_ring->netdev, skb = ena_alloc_skb(rx_ring, false);
rx_ring->rx_copybreak); if (unlikely(!skb))
if (unlikely(!skb)) {
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->rx_stats.skb_alloc_fail++;
u64_stats_update_end(&rx_ring->syncp);
netif_err(rx_ring->adapter, rx_err, rx_ring->netdev,
"Failed to allocate skb\n");
return NULL; return NULL;
}
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"rx allocated small packet. len %d. data_len %d\n", "rx allocated small packet. len %d. data_len %d\n",
...@@ -837,20 +897,15 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, ...@@ -837,20 +897,15 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
skb_put(skb, len); skb_put(skb, len);
skb->protocol = eth_type_trans(skb, rx_ring->netdev); skb->protocol = eth_type_trans(skb, rx_ring->netdev);
rx_ring->free_rx_ids[*next_to_clean] = req_id;
*next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs, *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
rx_ring->ring_size); rx_ring->ring_size);
return skb; return skb;
} }
skb = napi_get_frags(rx_ring->napi); skb = ena_alloc_skb(rx_ring, true);
if (unlikely(!skb)) { if (unlikely(!skb))
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"Failed allocating skb\n");
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->rx_stats.skb_alloc_fail++;
u64_stats_update_end(&rx_ring->syncp);
return NULL; return NULL;
}
do { do {
dma_unmap_page(rx_ring->dev, dma_unmap_page(rx_ring->dev,
...@@ -865,13 +920,18 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, ...@@ -865,13 +920,18 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
skb->len, skb->data_len); skb->len, skb->data_len);
rx_info->page = NULL; rx_info->page = NULL;
rx_ring->free_rx_ids[*next_to_clean] = req_id;
*next_to_clean = *next_to_clean =
ENA_RX_RING_IDX_NEXT(*next_to_clean, ENA_RX_RING_IDX_NEXT(*next_to_clean,
rx_ring->ring_size); rx_ring->ring_size);
if (likely(--descs == 0)) if (likely(--descs == 0))
break; break;
rx_info = &rx_ring->rx_buffer_info[*next_to_clean];
len = ena_bufs[++buf].len; buf++;
len = ena_bufs[buf].len;
req_id = ena_bufs[buf].req_id;
rx_info = &rx_ring->rx_buffer_info[req_id];
} while (1); } while (1);
return skb; return skb;
...@@ -972,6 +1032,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, ...@@ -972,6 +1032,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
int rc = 0; int rc = 0;
int total_len = 0; int total_len = 0;
int rx_copybreak_pkt = 0; int rx_copybreak_pkt = 0;
int i;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"%s qid %d\n", __func__, rx_ring->qid); "%s qid %d\n", __func__, rx_ring->qid);
...@@ -1001,9 +1062,13 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, ...@@ -1001,9 +1062,13 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
/* exit if we failed to retrieve a buffer */ /* exit if we failed to retrieve a buffer */
if (unlikely(!skb)) { if (unlikely(!skb)) {
next_to_clean = ENA_RX_RING_IDX_ADD(next_to_clean, for (i = 0; i < ena_rx_ctx.descs; i++) {
ena_rx_ctx.descs, rx_ring->free_tx_ids[next_to_clean] =
rx_ring->ring_size); rx_ring->ena_bufs[i].req_id;
next_to_clean =
ENA_RX_RING_IDX_NEXT(next_to_clean,
rx_ring->ring_size);
}
break; break;
} }
...@@ -1055,6 +1120,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi, ...@@ -1055,6 +1120,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
u64_stats_update_end(&rx_ring->syncp); u64_stats_update_end(&rx_ring->syncp);
/* Too many desc from the device. Trigger reset */ /* Too many desc from the device. Trigger reset */
adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
return 0; return 0;
...@@ -1208,14 +1274,25 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data) ...@@ -1208,14 +1274,25 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
{ {
struct ena_napi *ena_napi = data; struct ena_napi *ena_napi = data;
napi_schedule(&ena_napi->napi); napi_schedule_irqoff(&ena_napi->napi);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
/* Reserve a single MSI-X vector for management (admin + aenq).
* plus reserve one vector for each potential io queue.
* the number of potential io queues is the minimum of what the device
* supports and the number of vCPUs.
*/
static int ena_enable_msix(struct ena_adapter *adapter, int num_queues) static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
{ {
int msix_vecs, rc; int msix_vecs, irq_cnt;
if (test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
netif_err(adapter, probe, adapter->netdev,
"Error, MSI-X is already enabled\n");
return -EPERM;
}
/* Reserved the max msix vectors we might need */ /* Reserved the max msix vectors we might need */
msix_vecs = ENA_MAX_MSIX_VEC(num_queues); msix_vecs = ENA_MAX_MSIX_VEC(num_queues);
...@@ -1223,25 +1300,28 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues) ...@@ -1223,25 +1300,28 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
netif_dbg(adapter, probe, adapter->netdev, netif_dbg(adapter, probe, adapter->netdev,
"trying to enable MSI-X, vectors %d\n", msix_vecs); "trying to enable MSI-X, vectors %d\n", msix_vecs);
rc = pci_alloc_irq_vectors(adapter->pdev, msix_vecs, msix_vecs, irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
PCI_IRQ_MSIX); msix_vecs, PCI_IRQ_MSIX);
if (rc < 0) {
if (irq_cnt < 0) {
netif_err(adapter, probe, adapter->netdev, netif_err(adapter, probe, adapter->netdev,
"Failed to enable MSI-X, vectors %d rc %d\n", "Failed to enable MSI-X. irq_cnt %d\n", irq_cnt);
msix_vecs, rc);
return -ENOSPC; return -ENOSPC;
} }
netif_dbg(adapter, probe, adapter->netdev, "enable MSI-X, vectors %d\n", if (irq_cnt != msix_vecs) {
msix_vecs); netif_notice(adapter, probe, adapter->netdev,
"enable only %d MSI-X (out of %d), reduce the number of queues\n",
if (msix_vecs >= 1) { irq_cnt, msix_vecs);
if (ena_init_rx_cpu_rmap(adapter)) adapter->num_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
netif_warn(adapter, probe, adapter->netdev,
"Failed to map IRQs to CPUs\n");
} }
adapter->msix_vecs = msix_vecs; if (ena_init_rx_cpu_rmap(adapter))
netif_warn(adapter, probe, adapter->netdev,
"Failed to map IRQs to CPUs\n");
adapter->msix_vecs = irq_cnt;
set_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags);
return 0; return 0;
} }
...@@ -1318,6 +1398,12 @@ static int ena_request_io_irq(struct ena_adapter *adapter) ...@@ -1318,6 +1398,12 @@ static int ena_request_io_irq(struct ena_adapter *adapter)
struct ena_irq *irq; struct ena_irq *irq;
int rc = 0, i, k; int rc = 0, i, k;
if (!test_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags)) {
netif_err(adapter, ifup, adapter->netdev,
"Failed to request I/O IRQ: MSI-X is not enabled\n");
return -EINVAL;
}
for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) { for (i = ENA_IO_IRQ_FIRST_IDX; i < adapter->msix_vecs; i++) {
irq = &adapter->irq_tbl[i]; irq = &adapter->irq_tbl[i];
rc = request_irq(irq->vector, irq->handler, flags, irq->name, rc = request_irq(irq->vector, irq->handler, flags, irq->name,
...@@ -1376,6 +1462,12 @@ static void ena_free_io_irq(struct ena_adapter *adapter) ...@@ -1376,6 +1462,12 @@ static void ena_free_io_irq(struct ena_adapter *adapter)
} }
} }
static void ena_disable_msix(struct ena_adapter *adapter)
{
if (test_and_clear_bit(ENA_FLAG_MSIX_ENABLED, &adapter->flags))
pci_free_irq_vectors(adapter->pdev);
}
static void ena_disable_io_intr_sync(struct ena_adapter *adapter) static void ena_disable_io_intr_sync(struct ena_adapter *adapter)
{ {
int i; int i;
...@@ -1446,7 +1538,7 @@ static int ena_rss_configure(struct ena_adapter *adapter) ...@@ -1446,7 +1538,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
/* In case the RSS table wasn't initialized by probe */ /* In case the RSS table wasn't initialized by probe */
if (!ena_dev->rss.tbl_log_size) { if (!ena_dev->rss.tbl_log_size) {
rc = ena_rss_init_default(adapter); rc = ena_rss_init_default(adapter);
if (rc && (rc != -EPERM)) { if (rc && (rc != -EOPNOTSUPP)) {
netif_err(adapter, ifup, adapter->netdev, netif_err(adapter, ifup, adapter->netdev,
"Failed to init RSS rc: %d\n", rc); "Failed to init RSS rc: %d\n", rc);
return rc; return rc;
...@@ -1455,17 +1547,17 @@ static int ena_rss_configure(struct ena_adapter *adapter) ...@@ -1455,17 +1547,17 @@ static int ena_rss_configure(struct ena_adapter *adapter)
/* Set indirect table */ /* Set indirect table */
rc = ena_com_indirect_table_set(ena_dev); rc = ena_com_indirect_table_set(ena_dev);
if (unlikely(rc && rc != -EPERM)) if (unlikely(rc && rc != -EOPNOTSUPP))
return rc; return rc;
/* Configure hash function (if supported) */ /* Configure hash function (if supported) */
rc = ena_com_set_hash_function(ena_dev); rc = ena_com_set_hash_function(ena_dev);
if (unlikely(rc && (rc != -EPERM))) if (unlikely(rc && (rc != -EOPNOTSUPP)))
return rc; return rc;
/* Configure hash inputs (if supported) */ /* Configure hash inputs (if supported) */
rc = ena_com_set_hash_ctrl(ena_dev); rc = ena_com_set_hash_ctrl(ena_dev);
if (unlikely(rc && (rc != -EPERM))) if (unlikely(rc && (rc != -EOPNOTSUPP)))
return rc; return rc;
return 0; return 0;
...@@ -1720,7 +1812,7 @@ static void ena_down(struct ena_adapter *adapter) ...@@ -1720,7 +1812,7 @@ static void ena_down(struct ena_adapter *adapter)
if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) { if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) {
int rc; int rc;
rc = ena_com_dev_reset(adapter->ena_dev); rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
if (rc) if (rc)
dev_err(&adapter->pdev->dev, "Device reset failed\n"); dev_err(&adapter->pdev->dev, "Device reset failed\n");
} }
...@@ -2144,7 +2236,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev) ...@@ -2144,7 +2236,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
rc = ena_com_set_host_attributes(ena_dev); rc = ena_com_set_host_attributes(ena_dev);
if (rc) { if (rc) {
if (rc == -EPERM) if (rc == -EOPNOTSUPP)
pr_warn("Cannot set host attributes\n"); pr_warn("Cannot set host attributes\n");
else else
pr_err("Cannot set host attributes\n"); pr_err("Cannot set host attributes\n");
...@@ -2181,7 +2273,7 @@ static void ena_config_debug_area(struct ena_adapter *adapter) ...@@ -2181,7 +2273,7 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
rc = ena_com_set_host_attributes(adapter->ena_dev); rc = ena_com_set_host_attributes(adapter->ena_dev);
if (rc) { if (rc) {
if (rc == -EPERM) if (rc == -EOPNOTSUPP)
netif_warn(adapter, drv, adapter->netdev, netif_warn(adapter, drv, adapter->netdev,
"Cannot set host attributes\n"); "Cannot set host attributes\n");
else else
...@@ -2353,7 +2445,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, ...@@ -2353,7 +2445,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ); readless_supported = !(pdev->revision & ENA_MMIO_DISABLE_REG_READ);
ena_com_set_mmio_read_mode(ena_dev, readless_supported); ena_com_set_mmio_read_mode(ena_dev, readless_supported);
rc = ena_com_dev_reset(ena_dev); rc = ena_com_dev_reset(ena_dev, ENA_REGS_RESET_NORMAL);
if (rc) { if (rc) {
dev_err(dev, "Can not reset device\n"); dev_err(dev, "Can not reset device\n");
goto err_mmio_read_less; goto err_mmio_read_less;
...@@ -2464,7 +2556,8 @@ static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter, ...@@ -2464,7 +2556,8 @@ static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter,
return 0; return 0;
err_disable_msix: err_disable_msix:
pci_free_irq_vectors(adapter->pdev); ena_disable_msix(adapter);
return rc; return rc;
} }
...@@ -2502,7 +2595,7 @@ static void ena_fw_reset_device(struct work_struct *work) ...@@ -2502,7 +2595,7 @@ static void ena_fw_reset_device(struct work_struct *work)
ena_free_mgmnt_irq(adapter); ena_free_mgmnt_irq(adapter);
pci_free_irq_vectors(adapter->pdev); ena_disable_msix(adapter);
ena_com_abort_admin_commands(ena_dev); ena_com_abort_admin_commands(ena_dev);
...@@ -2512,6 +2605,7 @@ static void ena_fw_reset_device(struct work_struct *work) ...@@ -2512,6 +2605,7 @@ static void ena_fw_reset_device(struct work_struct *work)
ena_com_mmio_reg_read_request_destroy(ena_dev); ena_com_mmio_reg_read_request_destroy(ena_dev);
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
/* Finish with the destroy part. Start the init part */ /* Finish with the destroy part. Start the init part */
...@@ -2553,7 +2647,7 @@ static void ena_fw_reset_device(struct work_struct *work) ...@@ -2553,7 +2647,7 @@ static void ena_fw_reset_device(struct work_struct *work)
return; return;
err_disable_msix: err_disable_msix:
ena_free_mgmnt_irq(adapter); ena_free_mgmnt_irq(adapter);
pci_free_irq_vectors(adapter->pdev); ena_disable_msix(adapter);
err_device_destroy: err_device_destroy:
ena_com_admin_destroy(ena_dev); ena_com_admin_destroy(ena_dev);
err: err:
...@@ -2577,7 +2671,7 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter, ...@@ -2577,7 +2671,7 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter,
tx_buf = &tx_ring->tx_buffer_info[i]; tx_buf = &tx_ring->tx_buffer_info[i];
last_jiffies = tx_buf->last_jiffies; last_jiffies = tx_buf->last_jiffies;
if (unlikely(last_jiffies && if (unlikely(last_jiffies &&
time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) { time_is_before_jiffies(last_jiffies + adapter->missing_tx_completion_to))) {
if (!tx_buf->print_once) if (!tx_buf->print_once)
netif_notice(adapter, tx_err, adapter->netdev, netif_notice(adapter, tx_err, adapter->netdev,
"Found a Tx that wasn't completed on time, qid %d, index %d.\n", "Found a Tx that wasn't completed on time, qid %d, index %d.\n",
...@@ -2586,10 +2680,13 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter, ...@@ -2586,10 +2680,13 @@ static int check_missing_comp_in_queue(struct ena_adapter *adapter,
tx_buf->print_once = 1; tx_buf->print_once = 1;
missed_tx++; missed_tx++;
if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) { if (unlikely(missed_tx > adapter->missing_tx_completion_threshold)) {
netif_err(adapter, tx_err, adapter->netdev, netif_err(adapter, tx_err, adapter->netdev,
"The number of lost tx completions is above the threshold (%d > %d). Reset the device\n", "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS); missed_tx,
adapter->missing_tx_completion_threshold);
adapter->reset_reason =
ENA_REGS_RESET_MISS_TX_CMPL;
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
return -EIO; return -EIO;
} }
...@@ -2613,6 +2710,9 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter) ...@@ -2613,6 +2710,9 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter)
if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
return; return;
if (adapter->missing_tx_completion_to == ENA_HW_HINTS_NO_TIMEOUT)
return;
budget = ENA_MONITORED_TX_QUEUES; budget = ENA_MONITORED_TX_QUEUES;
for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) {
...@@ -2690,14 +2790,18 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter) ...@@ -2690,14 +2790,18 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
if (!adapter->wd_state) if (!adapter->wd_state)
return; return;
keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
+ ENA_DEVICE_KALIVE_TIMEOUT); return;
keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies +
adapter->keep_alive_timeout);
if (unlikely(time_is_before_jiffies(keep_alive_expired))) { if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
netif_err(adapter, drv, adapter->netdev, netif_err(adapter, drv, adapter->netdev,
"Keep alive watchdog timeout.\n"); "Keep alive watchdog timeout.\n");
u64_stats_update_begin(&adapter->syncp); u64_stats_update_begin(&adapter->syncp);
adapter->dev_stats.wd_expired++; adapter->dev_stats.wd_expired++;
u64_stats_update_end(&adapter->syncp); u64_stats_update_end(&adapter->syncp);
adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
} }
} }
...@@ -2710,10 +2814,49 @@ static void check_for_admin_com_state(struct ena_adapter *adapter) ...@@ -2710,10 +2814,49 @@ static void check_for_admin_com_state(struct ena_adapter *adapter)
u64_stats_update_begin(&adapter->syncp); u64_stats_update_begin(&adapter->syncp);
adapter->dev_stats.admin_q_pause++; adapter->dev_stats.admin_q_pause++;
u64_stats_update_end(&adapter->syncp); u64_stats_update_end(&adapter->syncp);
adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
} }
} }
static void ena_update_hints(struct ena_adapter *adapter,
struct ena_admin_ena_hw_hints *hints)
{
struct net_device *netdev = adapter->netdev;
if (hints->admin_completion_tx_timeout)
adapter->ena_dev->admin_queue.completion_timeout =
hints->admin_completion_tx_timeout * 1000;
if (hints->mmio_read_timeout)
/* convert to usec */
adapter->ena_dev->mmio_read.reg_read_to =
hints->mmio_read_timeout * 1000;
if (hints->missed_tx_completion_count_threshold_to_reset)
adapter->missing_tx_completion_threshold =
hints->missed_tx_completion_count_threshold_to_reset;
if (hints->missing_tx_completion_timeout) {
if (hints->missing_tx_completion_timeout == ENA_HW_HINTS_NO_TIMEOUT)
adapter->missing_tx_completion_to = ENA_HW_HINTS_NO_TIMEOUT;
else
adapter->missing_tx_completion_to =
msecs_to_jiffies(hints->missing_tx_completion_timeout);
}
if (hints->netdev_wd_timeout)
netdev->watchdog_timeo = msecs_to_jiffies(hints->netdev_wd_timeout);
if (hints->driver_watchdog_timeout) {
if (hints->driver_watchdog_timeout == ENA_HW_HINTS_NO_TIMEOUT)
adapter->keep_alive_timeout = ENA_HW_HINTS_NO_TIMEOUT;
else
adapter->keep_alive_timeout =
msecs_to_jiffies(hints->driver_watchdog_timeout);
}
}
static void ena_update_host_info(struct ena_admin_host_info *host_info, static void ena_update_host_info(struct ena_admin_host_info *host_info,
struct net_device *netdev) struct net_device *netdev)
{ {
...@@ -2886,7 +3029,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter) ...@@ -2886,7 +3029,7 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
val = ethtool_rxfh_indir_default(i, adapter->num_queues); val = ethtool_rxfh_indir_default(i, adapter->num_queues);
rc = ena_com_indirect_table_fill_entry(ena_dev, i, rc = ena_com_indirect_table_fill_entry(ena_dev, i,
ENA_IO_RXQ_IDX(val)); ENA_IO_RXQ_IDX(val));
if (unlikely(rc && (rc != -EPERM))) { if (unlikely(rc && (rc != -EOPNOTSUPP))) {
dev_err(dev, "Cannot fill indirect table\n"); dev_err(dev, "Cannot fill indirect table\n");
goto err_fill_indir; goto err_fill_indir;
} }
...@@ -2894,13 +3037,13 @@ static int ena_rss_init_default(struct ena_adapter *adapter) ...@@ -2894,13 +3037,13 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL, rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_CRC32, NULL,
ENA_HASH_KEY_SIZE, 0xFFFFFFFF); ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
if (unlikely(rc && (rc != -EPERM))) { if (unlikely(rc && (rc != -EOPNOTSUPP))) {
dev_err(dev, "Cannot fill hash function\n"); dev_err(dev, "Cannot fill hash function\n");
goto err_fill_indir; goto err_fill_indir;
} }
rc = ena_com_set_default_hash_ctrl(ena_dev); rc = ena_com_set_default_hash_ctrl(ena_dev);
if (unlikely(rc && (rc != -EPERM))) { if (unlikely(rc && (rc != -EOPNOTSUPP))) {
dev_err(dev, "Cannot fill hash control\n"); dev_err(dev, "Cannot fill hash control\n");
goto err_fill_indir; goto err_fill_indir;
} }
...@@ -3076,6 +3219,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -3076,6 +3219,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_set_conf_feat_params(adapter, &get_feat_ctx); ena_set_conf_feat_params(adapter, &get_feat_ctx);
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
adapter->tx_ring_size = queue_size; adapter->tx_ring_size = queue_size;
adapter->rx_ring_size = queue_size; adapter->rx_ring_size = queue_size;
...@@ -3114,7 +3258,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -3114,7 +3258,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_worker_destroy; goto err_worker_destroy;
} }
rc = ena_rss_init_default(adapter); rc = ena_rss_init_default(adapter);
if (rc && (rc != -EPERM)) { if (rc && (rc != -EOPNOTSUPP)) {
dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc); dev_err(&pdev->dev, "Cannot init RSS rc: %d\n", rc);
goto err_free_msix; goto err_free_msix;
} }
...@@ -3136,6 +3280,11 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -3136,6 +3280,11 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->reset_task, ena_fw_reset_device); INIT_WORK(&adapter->reset_task, ena_fw_reset_device);
adapter->last_keep_alive_jiffies = jiffies; adapter->last_keep_alive_jiffies = jiffies;
adapter->keep_alive_timeout = ENA_DEVICE_KALIVE_TIMEOUT;
adapter->missing_tx_completion_to = TX_TIMEOUT;
adapter->missing_tx_completion_threshold = MAX_NUM_OF_TIMEOUTED_PACKETS;
ena_update_hints(adapter, &get_feat_ctx.hw_hints);
setup_timer(&adapter->timer_service, ena_timer_service, setup_timer(&adapter->timer_service, ena_timer_service,
(unsigned long)adapter); (unsigned long)adapter);
...@@ -3155,9 +3304,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -3155,9 +3304,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_com_delete_debug_area(ena_dev); ena_com_delete_debug_area(ena_dev);
ena_com_rss_destroy(ena_dev); ena_com_rss_destroy(ena_dev);
err_free_msix: err_free_msix:
ena_com_dev_reset(ena_dev); ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR);
ena_free_mgmnt_irq(adapter); ena_free_mgmnt_irq(adapter);
pci_free_irq_vectors(adapter->pdev); ena_disable_msix(adapter);
err_worker_destroy: err_worker_destroy:
ena_com_destroy_interrupt_moderation(ena_dev); ena_com_destroy_interrupt_moderation(ena_dev);
del_timer(&adapter->timer_service); del_timer(&adapter->timer_service);
...@@ -3238,11 +3387,11 @@ static void ena_remove(struct pci_dev *pdev) ...@@ -3238,11 +3387,11 @@ static void ena_remove(struct pci_dev *pdev)
/* Reset the device only if the device is running. */ /* Reset the device only if the device is running. */
if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
ena_com_dev_reset(ena_dev); ena_com_dev_reset(ena_dev, adapter->reset_reason);
ena_free_mgmnt_irq(adapter); ena_free_mgmnt_irq(adapter);
pci_free_irq_vectors(adapter->pdev); ena_disable_msix(adapter);
free_netdev(netdev); free_netdev(netdev);
...@@ -3329,14 +3478,24 @@ static void ena_keep_alive_wd(void *adapter_data, ...@@ -3329,14 +3478,24 @@ static void ena_keep_alive_wd(void *adapter_data,
struct ena_admin_aenq_entry *aenq_e) struct ena_admin_aenq_entry *aenq_e)
{ {
struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
struct ena_admin_aenq_keep_alive_desc *desc;
u64 rx_drops;
desc = (struct ena_admin_aenq_keep_alive_desc *)aenq_e;
adapter->last_keep_alive_jiffies = jiffies; adapter->last_keep_alive_jiffies = jiffies;
rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
u64_stats_update_begin(&adapter->syncp);
adapter->dev_stats.rx_drops = rx_drops;
u64_stats_update_end(&adapter->syncp);
} }
static void ena_notification(void *adapter_data, static void ena_notification(void *adapter_data,
struct ena_admin_aenq_entry *aenq_e) struct ena_admin_aenq_entry *aenq_e)
{ {
struct ena_adapter *adapter = (struct ena_adapter *)adapter_data; struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
struct ena_admin_ena_hw_hints *hints;
WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION, WARN(aenq_e->aenq_common_desc.group != ENA_ADMIN_NOTIFICATION,
"Invalid group(%x) expected %x\n", "Invalid group(%x) expected %x\n",
...@@ -3354,6 +3513,11 @@ static void ena_notification(void *adapter_data, ...@@ -3354,6 +3513,11 @@ static void ena_notification(void *adapter_data,
case ENA_ADMIN_RESUME: case ENA_ADMIN_RESUME:
queue_work(ena_wq, &adapter->resume_io_task); queue_work(ena_wq, &adapter->resume_io_task);
break; break;
case ENA_ADMIN_UPDATE_HINTS:
hints = (struct ena_admin_ena_hw_hints *)
(&aenq_e->inline_data_w4);
ena_update_hints(adapter, hints);
break;
default: default:
netif_err(adapter, drv, adapter->netdev, netif_err(adapter, drv, adapter->netdev,
"Invalid aenq notification link state %d\n", "Invalid aenq notification link state %d\n",
......
...@@ -44,21 +44,24 @@ ...@@ -44,21 +44,24 @@
#include "ena_eth_com.h" #include "ena_eth_com.h"
#define DRV_MODULE_VER_MAJOR 1 #define DRV_MODULE_VER_MAJOR 1
#define DRV_MODULE_VER_MINOR 1 #define DRV_MODULE_VER_MINOR 2
#define DRV_MODULE_VER_SUBMINOR 7 #define DRV_MODULE_VER_SUBMINOR 0
#define DRV_MODULE_NAME "ena" #define DRV_MODULE_NAME "ena"
#ifndef DRV_MODULE_VERSION #ifndef DRV_MODULE_VERSION
#define DRV_MODULE_VERSION \ #define DRV_MODULE_VERSION \
__stringify(DRV_MODULE_VER_MAJOR) "." \ __stringify(DRV_MODULE_VER_MAJOR) "." \
__stringify(DRV_MODULE_VER_MINOR) "." \ __stringify(DRV_MODULE_VER_MINOR) "." \
__stringify(DRV_MODULE_VER_SUBMINOR) __stringify(DRV_MODULE_VER_SUBMINOR) "k"
#endif #endif
#define DEVICE_NAME "Elastic Network Adapter (ENA)" #define DEVICE_NAME "Elastic Network Adapter (ENA)"
/* 1 for AENQ + ADMIN */ /* 1 for AENQ + ADMIN */
#define ENA_MAX_MSIX_VEC(io_queues) (1 + (io_queues)) #define ENA_ADMIN_MSIX_VEC 1
#define ENA_MAX_MSIX_VEC(io_queues) (ENA_ADMIN_MSIX_VEC + (io_queues))
#define ENA_MIN_MSIX_VEC 2
#define ENA_REG_BAR 0 #define ENA_REG_BAR 0
#define ENA_MEM_BAR 2 #define ENA_MEM_BAR 2
...@@ -194,12 +197,19 @@ struct ena_stats_rx { ...@@ -194,12 +197,19 @@ struct ena_stats_rx {
u64 dma_mapping_err; u64 dma_mapping_err;
u64 bad_desc_num; u64 bad_desc_num;
u64 rx_copybreak_pkt; u64 rx_copybreak_pkt;
u64 bad_req_id;
u64 empty_rx_ring; u64 empty_rx_ring;
}; };
struct ena_ring { struct ena_ring {
/* Holds the empty requests for TX out of order completions */ union {
u16 *free_tx_ids; /* Holds the empty requests for TX/RX
* out of order completions
*/
u16 *free_tx_ids;
u16 *free_rx_ids;
};
union { union {
struct ena_tx_buffer *tx_buffer_info; struct ena_tx_buffer *tx_buffer_info;
struct ena_rx_buffer *rx_buffer_info; struct ena_rx_buffer *rx_buffer_info;
...@@ -260,6 +270,7 @@ enum ena_flags_t { ...@@ -260,6 +270,7 @@ enum ena_flags_t {
ENA_FLAG_DEVICE_RUNNING, ENA_FLAG_DEVICE_RUNNING,
ENA_FLAG_DEV_UP, ENA_FLAG_DEV_UP,
ENA_FLAG_LINK_UP, ENA_FLAG_LINK_UP,
ENA_FLAG_MSIX_ENABLED,
ENA_FLAG_TRIGGER_RESET ENA_FLAG_TRIGGER_RESET
}; };
...@@ -280,6 +291,8 @@ struct ena_adapter { ...@@ -280,6 +291,8 @@ struct ena_adapter {
int msix_vecs; int msix_vecs;
u32 missing_tx_completion_threshold;
u32 tx_usecs, rx_usecs; /* interrupt moderation */ u32 tx_usecs, rx_usecs; /* interrupt moderation */
u32 tx_frames, rx_frames; /* interrupt moderation */ u32 tx_frames, rx_frames; /* interrupt moderation */
...@@ -293,6 +306,9 @@ struct ena_adapter { ...@@ -293,6 +306,9 @@ struct ena_adapter {
u8 mac_addr[ETH_ALEN]; u8 mac_addr[ETH_ALEN];
unsigned long keep_alive_timeout;
unsigned long missing_tx_completion_to;
char name[ENA_NAME_MAX_LEN]; char name[ENA_NAME_MAX_LEN];
unsigned long flags; unsigned long flags;
...@@ -322,6 +338,8 @@ struct ena_adapter { ...@@ -322,6 +338,8 @@ struct ena_adapter {
/* last queue index that was checked for uncompleted tx packets */ /* last queue index that was checked for uncompleted tx packets */
u32 last_monitored_tx_qid; u32 last_monitored_tx_qid;
enum ena_regs_reset_reason_types reset_reason;
}; };
void ena_set_ethtool_ops(struct net_device *netdev); void ena_set_ethtool_ops(struct net_device *netdev);
......
...@@ -32,6 +32,36 @@ ...@@ -32,6 +32,36 @@
#ifndef _ENA_REGS_H_ #ifndef _ENA_REGS_H_
#define _ENA_REGS_H_ #define _ENA_REGS_H_
enum ena_regs_reset_reason_types {
ENA_REGS_RESET_NORMAL = 0,
ENA_REGS_RESET_KEEP_ALIVE_TO = 1,
ENA_REGS_RESET_ADMIN_TO = 2,
ENA_REGS_RESET_MISS_TX_CMPL = 3,
ENA_REGS_RESET_INV_RX_REQ_ID = 4,
ENA_REGS_RESET_INV_TX_REQ_ID = 5,
ENA_REGS_RESET_TOO_MANY_RX_DESCS = 6,
ENA_REGS_RESET_INIT_ERR = 7,
ENA_REGS_RESET_DRIVER_INVALID_STATE = 8,
ENA_REGS_RESET_OS_TRIGGER = 9,
ENA_REGS_RESET_OS_NETDEV_WD = 10,
ENA_REGS_RESET_SHUTDOWN = 11,
ENA_REGS_RESET_USER_TRIGGER = 12,
ENA_REGS_RESET_GENERIC = 13,
};
/* ena_registers offsets */ /* ena_registers offsets */
#define ENA_REGS_VERSION_OFF 0x0 #define ENA_REGS_VERSION_OFF 0x0
#define ENA_REGS_CONTROLLER_VERSION_OFF 0x4 #define ENA_REGS_CONTROLLER_VERSION_OFF 0x4
...@@ -78,6 +108,8 @@ ...@@ -78,6 +108,8 @@
#define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e #define ENA_REGS_CAPS_RESET_TIMEOUT_MASK 0x3e
#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8 #define ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT 8
#define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00 #define ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK 0xff00
#define ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT 16
#define ENA_REGS_CAPS_ADMIN_CMD_TO_MASK 0xf0000
/* aq_caps register */ /* aq_caps register */
#define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff #define ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK 0xffff
...@@ -102,6 +134,8 @@ ...@@ -102,6 +134,8 @@
#define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4 #define ENA_REGS_DEV_CTL_QUIESCENT_MASK 0x4
#define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3 #define ENA_REGS_DEV_CTL_IO_RESUME_SHIFT 3
#define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8 #define ENA_REGS_DEV_CTL_IO_RESUME_MASK 0x8
#define ENA_REGS_DEV_CTL_RESET_REASON_SHIFT 28
#define ENA_REGS_DEV_CTL_RESET_REASON_MASK 0xf0000000
/* dev_sts register */ /* dev_sts register */
#define ENA_REGS_DEV_STS_READY_MASK 0x1 #define ENA_REGS_DEV_STS_READY_MASK 0x1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment