Commit b523f23f authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'mlx5-updates-2024-09-11'

Saeed Mahameed says:

====================
Misc updates to mlx5 driver:

1) Fix HW steering ret value and align with kdoc
2) Flow steering cleanups and add support for no append at software level
3) Support for sync reset using hot reset
4) RX SW counter to cover no-split events in header/data split mode
5) Make affinity of SFs configurable
====================

Link: https://patch.msgid.link/20240911201757.1505453-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents f0c7de5a cc181291
......@@ -218,6 +218,22 @@ the software port.
[#accel]_.
- Informative
* - `rx[i]_hds_nosplit_packets`
- Number of packets that were not split in header/data split mode. A
packet will not get split when the hardware does not support its
protocol splitting. An example such a protocol is ICMPv4/v6. Currently
TCP and UDP with IPv4/IPv6 are supported for header/data split
[#accel]_.
- Informative
* - `rx[i]_hds_nosplit_bytes`
- Number of bytes for packets that were not split in header/data split
mode. A packet will not get split when the hardware does not support its
protocol splitting. An example such a protocol is ICMPv4/v6. Currently
TCP and UDP with IPv4/IPv6 are supported for header/data split
[#accel]_.
- Informative
* - `rx[i]_lro_packets`
- The number of LRO packets received on ring i [#accel]_.
- Acceleration
......
......@@ -754,6 +754,8 @@ static const char *cmd_status_str(u8 status)
return "bad resource";
case MLX5_CMD_STAT_RES_BUSY:
return "resource busy";
case MLX5_CMD_STAT_NOT_READY:
return "FW not ready";
case MLX5_CMD_STAT_LIM_ERR:
return "limits exceeded";
case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
......@@ -787,6 +789,7 @@ static int cmd_status_to_err(u8 status)
case MLX5_CMD_STAT_BAD_SYS_STATE_ERR: return -EIO;
case MLX5_CMD_STAT_BAD_RES_ERR: return -EINVAL;
case MLX5_CMD_STAT_RES_BUSY: return -EBUSY;
case MLX5_CMD_STAT_NOT_READY: return -EAGAIN;
case MLX5_CMD_STAT_LIM_ERR: return -ENOMEM;
case MLX5_CMD_STAT_BAD_RES_STATE_ERR: return -EINVAL;
case MLX5_CMD_STAT_IX_ERR: return -EINVAL;
......@@ -815,14 +818,16 @@ EXPORT_SYMBOL(mlx5_cmd_out_err);
static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
{
u16 opcode, op_mod;
u8 status;
u16 uid;
opcode = in_to_opcode(in);
op_mod = MLX5_GET(mbox_in, in, op_mod);
uid = MLX5_GET(mbox_in, in, uid);
status = MLX5_GET(mbox_out, out, status);
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY &&
opcode != MLX5_CMD_OP_CREATE_UCTX)
opcode != MLX5_CMD_OP_CREATE_UCTX && status != MLX5_CMD_STAT_NOT_READY)
mlx5_cmd_out_err(dev, opcode, op_mod, out);
}
......
......@@ -203,10 +203,10 @@ TRACE_EVENT(mlx5_fs_set_fte,
fs_get_obj(__entry->fg, fte->node.parent);
__entry->group_index = __entry->fg->id;
__entry->index = fte->index;
__entry->action = fte->action.action;
__entry->action = fte->act_dests.action.action;
__entry->mask_enable = __entry->fg->mask.match_criteria_enable;
__entry->flow_tag = fte->flow_context.flow_tag;
__entry->flow_source = fte->flow_context.flow_source;
__entry->flow_tag = fte->act_dests.flow_context.flow_tag;
__entry->flow_source = fte->act_dests.flow_context.flow_source;
memcpy(__entry->mask_outer,
MLX5_ADDR_OF(fte_match_param,
&__entry->fg->mask.match_criteria,
......@@ -284,7 +284,7 @@ TRACE_EVENT(mlx5_fs_add_rule,
TP_fast_assign(
__entry->rule = rule;
fs_get_obj(__entry->fte, rule->node.parent);
__entry->index = __entry->fte->dests_size - 1;
__entry->index = __entry->fte->act_dests.dests_size - 1;
__entry->sw_action = rule->sw_action;
memcpy(__entry->destination,
&rule->dest_attr,
......
......@@ -1016,30 +1016,31 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
static void mlx5e_free_rq(struct mlx5e_rq *rq)
{
struct bpf_prog *old_prog;
if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
old_prog = rcu_dereference_protected(rq->xdp_prog,
lockdep_is_held(&rq->priv->state_lock));
if (old_prog)
bpf_prog_put(old_prog);
}
kvfree(rq->dim);
page_pool_destroy(rq->page_pool);
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
mlx5e_rq_free_shampo(rq);
kvfree(rq->mpwqe.info);
mlx5_core_destroy_mkey(rq->mdev, be32_to_cpu(rq->mpwqe.umr_mkey_be));
mlx5e_free_mpwqe_rq_drop_page(rq);
mlx5e_rq_free_shampo(rq);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
mlx5e_free_wqe_alloc_info(rq);
}
kvfree(rq->dim);
xdp_rxq_info_unreg(&rq->xdp_rxq);
page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl);
if (xdp_rxq_info_is_reg(&rq->xdp_rxq)) {
struct bpf_prog *old_prog;
old_prog = rcu_dereference_protected(rq->xdp_prog,
lockdep_is_held(&rq->priv->state_lock));
if (old_prog)
bpf_prog_put(old_prog);
}
xdp_rxq_info_unreg(&rq->xdp_rxq);
}
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter)
......
......@@ -2346,6 +2346,9 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
stats->hds_nodata_packets++;
stats->hds_nodata_bytes += head_size;
}
} else {
stats->hds_nosplit_packets++;
stats->hds_nosplit_bytes += data_bcnt;
}
mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
......
......@@ -144,6 +144,8 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nosplit_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
......@@ -347,6 +349,8 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_gro_large_hds += rq_stats->gro_large_hds;
s->rx_hds_nodata_packets += rq_stats->hds_nodata_packets;
s->rx_hds_nodata_bytes += rq_stats->hds_nodata_bytes;
s->rx_hds_nosplit_packets += rq_stats->hds_nosplit_packets;
s->rx_hds_nosplit_bytes += rq_stats->hds_nosplit_bytes;
s->rx_ecn_mark += rq_stats->ecn_mark;
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
......@@ -2062,6 +2066,8 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nosplit_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
......
......@@ -156,6 +156,8 @@ struct mlx5e_sw_stats {
u64 rx_gro_large_hds;
u64 rx_hds_nodata_packets;
u64 rx_hds_nodata_bytes;
u64 rx_hds_nosplit_packets;
u64 rx_hds_nosplit_bytes;
u64 rx_mcast_packets;
u64 rx_ecn_mark;
u64 rx_removed_vlan_packets;
......@@ -356,6 +358,8 @@ struct mlx5e_rq_stats {
u64 gro_large_hds;
u64 hds_nodata_packets;
u64 hds_nodata_bytes;
u64 hds_nosplit_packets;
u64 hds_nosplit_bytes;
u64 mcast_packets;
u64 ecn_mark;
u64 removed_vlan_packets;
......
......@@ -896,7 +896,7 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
if (!mlx5_irq_pool_is_sf_pool(pool))
return comp_irq_request_pci(dev, vecidx);
af_desc.is_managed = 1;
af_desc.is_managed = false;
cpumask_copy(&af_desc.mask, cpu_online_mask);
cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
......
......@@ -463,7 +463,7 @@ static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
int num_encap = 0;
*extended_dest = false;
if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
if (!(fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
......@@ -502,17 +502,17 @@ mlx5_cmd_set_fte_flow_meter(struct fs_fte *fte, void *in_flow_context)
execute_aso[0]);
MLX5_SET(execute_aso, execute_aso, valid, 1);
MLX5_SET(execute_aso, execute_aso, aso_object_id,
fte->action.exe_aso.object_id);
fte->act_dests.action.exe_aso.object_id);
exe_aso_ctrl = MLX5_ADDR_OF(execute_aso, execute_aso, exe_aso_ctrl);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, return_reg_id,
fte->action.exe_aso.return_reg_id);
fte->act_dests.action.exe_aso.return_reg_id);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, aso_type,
fte->action.exe_aso.type);
fte->act_dests.action.exe_aso.type);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, init_color,
fte->action.exe_aso.flow_meter.init_color);
fte->act_dests.action.exe_aso.flow_meter.init_color);
MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, meter_id,
fte->action.exe_aso.flow_meter.meter_idx);
fte->act_dests.action.exe_aso.flow_meter.meter_idx);
}
static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
......@@ -541,7 +541,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
else
dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->act_dests.dests_size * dst_cnt_size;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
......@@ -553,7 +553,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, fte->index);
MLX5_SET(set_fte_in, in, ignore_flow_level,
!!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
!!(fte->act_dests.action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
MLX5_SET(set_fte_in, in, vport_number, ft->vport);
MLX5_SET(set_fte_in, in, other_vport,
......@@ -563,23 +563,23 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag,
fte->flow_context.flow_tag);
fte->act_dests.flow_context.flow_tag);
MLX5_SET(flow_context, in_flow_context, flow_source,
fte->flow_context.flow_source);
fte->act_dests.flow_context.flow_source);
MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en,
!!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
!!(fte->act_dests.flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
MLX5_SET(flow_context, in_flow_context, extended_destination,
extended_dest);
action = fte->action.action;
action = fte->act_dests.action.action;
if (extended_dest)
action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
MLX5_SET(flow_context, in_flow_context, action, action);
if (!extended_dest && fte->action.pkt_reformat) {
struct mlx5_pkt_reformat *pkt_reformat = fte->action.pkt_reformat;
if (!extended_dest && fte->act_dests.action.pkt_reformat) {
struct mlx5_pkt_reformat *pkt_reformat = fte->act_dests.action.pkt_reformat;
if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
reformat_id = mlx5_fs_dr_action_get_pkt_reformat_id(pkt_reformat);
......@@ -591,46 +591,46 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
goto err_out;
}
} else {
reformat_id = fte->action.pkt_reformat->id;
reformat_id = fte->act_dests.action.pkt_reformat->id;
}
}
MLX5_SET(flow_context, in_flow_context, packet_reformat_id, (u32)reformat_id);
if (fte->action.modify_hdr) {
if (fte->action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
if (fte->act_dests.action.modify_hdr) {
if (fte->act_dests.action.modify_hdr->owner == MLX5_FLOW_RESOURCE_OWNER_SW) {
mlx5_core_err(dev, "Can't use SW-owned modify_hdr in FW-owned table\n");
err = -EOPNOTSUPP;
goto err_out;
}
MLX5_SET(flow_context, in_flow_context, modify_header_id,
fte->action.modify_hdr->id);
fte->act_dests.action.modify_hdr->id);
}
MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type,
fte->action.crypto.type);
fte->act_dests.action.crypto.type);
MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_obj_id,
fte->action.crypto.obj_id);
fte->act_dests.action.crypto.obj_id);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
MLX5_SET(vlan, vlan, ethtype, fte->act_dests.action.vlan[0].ethtype);
MLX5_SET(vlan, vlan, vid, fte->act_dests.action.vlan[0].vid);
MLX5_SET(vlan, vlan, prio, fte->act_dests.action.vlan[0].prio);
vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
MLX5_SET(vlan, vlan, ethtype, fte->act_dests.action.vlan[1].ethtype);
MLX5_SET(vlan, vlan, vid, fte->act_dests.action.vlan[1].vid);
MLX5_SET(vlan, vlan, prio, fte->act_dests.action.vlan[1].prio);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, &fte->val, sizeof(fte->val));
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
int list_size = 0;
list_for_each_entry(dst, &fte->node.children, node.list) {
......@@ -706,7 +706,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
list_size);
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
log_max_flow_counter,
ft->type));
......@@ -731,8 +731,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
list_size);
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
if (fte->action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) {
if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
if (fte->act_dests.action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) {
mlx5_cmd_set_fte_flow_meter(fte, in_flow_context);
} else {
err = -EOPNOTSUPP;
......@@ -1071,7 +1071,7 @@ static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns,
static u32 mlx5_cmd_get_capabilities(struct mlx5_flow_root_namespace *ns,
enum fs_flow_table_type ft_type)
{
return 0;
return MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH;
}
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
......
......@@ -124,4 +124,12 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
int mlx5_fs_cmd_set_l2table_entry_silent(struct mlx5_core_dev *dev, u8 silent_mode);
int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, bool disconnect);
static inline bool mlx5_fs_cmd_is_fw_term_table(struct mlx5_flow_table *ft)
{
if (ft->flags & MLX5_FLOW_TABLE_TERMINATION)
return true;
return false;
}
#endif
......@@ -133,6 +133,7 @@ enum mlx5_flow_steering_capabilty {
MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX = 1UL << 0,
MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX = 1UL << 1,
MLX5_FLOW_STEERING_CAP_MATCH_RANGES = 1UL << 2,
MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH = 1UL << 3,
};
struct mlx5_flow_steering {
......@@ -230,20 +231,29 @@ struct mlx5_ft_underlay_qp {
MLX5_BYTE_OFF(fte_match_param, \
MLX5_FTE_MATCH_PARAM_RESERVED)))
struct fs_fte_action {
int modify_mask;
u32 dests_size;
u32 fwd_dests;
struct mlx5_flow_context flow_context;
struct mlx5_flow_act action;
};
struct fs_fte_dup {
struct list_head children;
struct fs_fte_action act_dests;
};
/* Type of children is mlx5_flow_rule */
struct fs_fte {
struct fs_node node;
struct mlx5_fs_dr_rule fs_dr_rule;
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
u32 dests_size;
u32 fwd_dests;
struct fs_fte_action act_dests;
struct fs_fte_dup *dup;
u32 index;
struct mlx5_flow_context flow_context;
struct mlx5_flow_act action;
enum fs_fte_status status;
struct mlx5_fc *counter;
struct rhash_head hash;
int modify_mask;
};
/* Type of children is mlx5_flow_table/namespace */
......
......@@ -26,6 +26,7 @@ struct mlx5_fw_reset {
struct work_struct reset_now_work;
struct work_struct reset_abort_work;
unsigned long reset_flags;
u8 reset_method;
struct timer_list timer;
struct completion done;
int ret;
......@@ -95,7 +96,7 @@ static int mlx5_reg_mfrl_set(struct mlx5_core_dev *dev, u8 reset_level,
}
static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level,
u8 *reset_type, u8 *reset_state)
u8 *reset_type, u8 *reset_state, u8 *reset_method)
{
u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
......@@ -111,13 +112,26 @@ static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level,
*reset_type = MLX5_GET(mfrl_reg, out, reset_type);
if (reset_state)
*reset_state = MLX5_GET(mfrl_reg, out, reset_state);
if (reset_method)
*reset_method = MLX5_GET(mfrl_reg, out, pci_reset_req_method);
return 0;
}
int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
{
return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL);
return mlx5_reg_mfrl_query(dev, reset_level, reset_type, NULL, NULL);
}
static int mlx5_fw_reset_get_reset_method(struct mlx5_core_dev *dev,
u8 *reset_method)
{
if (!MLX5_CAP_GEN(dev, pcie_reset_using_hotreset_method)) {
*reset_method = MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE;
return 0;
}
return mlx5_reg_mfrl_query(dev, NULL, NULL, NULL, reset_method);
}
static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev,
......@@ -125,7 +139,7 @@ static int mlx5_fw_reset_get_reset_state_err(struct mlx5_core_dev *dev,
{
u8 reset_state;
if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state))
if (mlx5_reg_mfrl_query(dev, NULL, NULL, &reset_state, NULL))
goto out;
if (!reset_state)
......@@ -398,7 +412,8 @@ static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
return 0;
}
static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev,
u8 reset_method)
{
u16 dev_id;
int err;
......@@ -409,9 +424,11 @@ static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
}
#if IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)
err = mlx5_check_hotplug_interrupt(dev);
if (err)
return false;
if (reset_method != MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET) {
err = mlx5_check_hotplug_interrupt(dev);
if (err)
return false;
}
#endif
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
......@@ -427,8 +444,12 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
struct mlx5_core_dev *dev = fw_reset->dev;
int err;
if (test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) ||
!mlx5_is_reset_now_capable(dev)) {
err = mlx5_fw_reset_get_reset_method(dev, &fw_reset->reset_method);
if (err)
mlx5_core_warn(dev, "Failed reading MFRL, err %d\n", err);
if (err || test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) ||
!mlx5_is_reset_now_capable(dev, fw_reset->reset_method)) {
err = mlx5_fw_reset_set_reset_sync_nack(dev);
mlx5_core_warn(dev, "PCI Sync FW Update Reset Nack %s",
err ? "Failed" : "Sent");
......@@ -444,21 +465,15 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack. Device reset is expected.\n");
}
static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev, u16 dev_id)
{
struct pci_bus *bridge_bus = dev->pdev->bus;
struct pci_dev *bridge = bridge_bus->self;
unsigned long timeout;
struct pci_dev *sdev;
u16 reg16, dev_id;
int cap, err;
u16 reg16;
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
if (err)
return pcibios_err_to_errno(err);
err = mlx5_check_dev_ids(dev, dev_id);
if (err)
return err;
cap = pci_find_capability(bridge, PCI_CAP_ID_EXP);
if (!cap)
return -EOPNOTSUPP;
......@@ -528,6 +543,44 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
return err;
}
static int mlx5_pci_reset_bus(struct mlx5_core_dev *dev)
{
if (!MLX5_CAP_GEN(dev, pcie_reset_using_hotreset_method))
return -EOPNOTSUPP;
return pci_reset_bus(dev->pdev);
}
static int mlx5_sync_pci_reset(struct mlx5_core_dev *dev, u8 reset_method)
{
u16 dev_id;
int err;
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
if (err)
return pcibios_err_to_errno(err);
err = mlx5_check_dev_ids(dev, dev_id);
if (err)
return err;
switch (reset_method) {
case MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE:
err = mlx5_pci_link_toggle(dev, dev_id);
if (err)
mlx5_core_warn(dev, "mlx5_pci_link_toggle failed\n");
break;
case MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET:
err = mlx5_pci_reset_bus(dev);
if (err)
mlx5_core_warn(dev, "mlx5_pci_reset_bus failed\n");
break;
default:
return -EOPNOTSUPP;
}
return err;
}
static void mlx5_sync_reset_now_event(struct work_struct *work)
{
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
......@@ -546,9 +599,9 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
goto done;
}
err = mlx5_pci_link_toggle(dev);
err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
if (err) {
mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, no reset done, err %d\n", err);
mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, no reset done, err %d\n", err);
set_bit(MLX5_FW_RESET_FLAGS_RELOAD_REQUIRED, &fw_reset->reset_flags);
}
......@@ -610,9 +663,9 @@ static void mlx5_sync_reset_unload_event(struct work_struct *work)
mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n", rst_state);
if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) {
err = mlx5_pci_link_toggle(dev);
err = mlx5_sync_pci_reset(dev, fw_reset->reset_method);
if (err) {
mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, err %d\n", err);
mlx5_core_warn(dev, "mlx5_sync_pci_reset failed, err %d\n", err);
fw_reset->ret = err;
}
}
......
......@@ -619,6 +619,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_with_driver_unload))
MLX5_SET(cmd_hca_cap, set_hca_cap,
pci_sync_for_fw_update_with_driver_unload, 1);
if (MLX5_CAP_GEN_MAX(dev, pcie_reset_using_hotreset_method))
MLX5_SET(cmd_hca_cap, set_hca_cap,
pcie_reset_using_hotreset_method, 1);
if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
MLX5_SET(cmd_hca_cap,
......
......@@ -967,7 +967,7 @@ int mlx5hws_matcher_attach_at(struct mlx5hws_matcher *matcher,
ret = hws_matcher_check_and_process_at(matcher, at);
if (ret)
return -ret;
return ret;
required_stes = at->num_of_action_stes - (!is_jumbo || at->only_term);
if (matcher->action_ste[MLX5HWS_ACTION_STE_IDX_ANY].max_stes < required_stes) {
......
......@@ -751,11 +751,11 @@ int mlx5hws_rule_destroy(struct mlx5hws_rule *rule,
ret = hws_rule_enqueue_precheck(rule, attr);
if (unlikely(ret))
return -ret;
return ret;
ret = hws_rule_destroy_hws(rule, attr);
return -ret;
return ret;
}
int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
......@@ -767,7 +767,7 @@ int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
ret = hws_rule_enqueue_precheck_update(rule, attr);
if (unlikely(ret))
return -ret;
return ret;
ret = hws_rule_create_hws(rule,
attr,
......@@ -776,5 +776,5 @@ int mlx5hws_rule_action_update(struct mlx5hws_rule *rule,
at_idx,
rule_actions);
return -ret;
return ret;
}
......@@ -489,5 +489,5 @@ int mlx5hws_table_set_default_miss(struct mlx5hws_table *tbl,
return 0;
out:
mutex_unlock(&ctx->ctrl_lock);
return -ret;
return ret;
}
......@@ -1449,6 +1449,7 @@ enum {
MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
MLX5_CMD_STAT_RES_BUSY = 0x6,
MLX5_CMD_STAT_NOT_READY = 0x7,
MLX5_CMD_STAT_LIM_ERR = 0x8,
MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
MLX5_CMD_STAT_IX_ERR = 0xa,
......
......@@ -342,4 +342,7 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
struct mlx5_pkt_reformat *reformat);
u32 mlx5_flow_table_id(struct mlx5_flow_table *ft);
struct mlx5_flow_root_namespace *
mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type);
#endif
......@@ -1856,7 +1856,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_328[0x2];
u8 relaxed_ordering_read[0x1];
u8 log_max_pd[0x5];
u8 reserved_at_330[0x6];
u8 reserved_at_330[0x5];
u8 pcie_reset_using_hotreset_method[0x1];
u8 pci_sync_for_fw_update_with_driver_unload[0x1];
u8 vnic_env_cnt_steering_fail[0x1];
u8 vport_counter_local_loopback[0x1];
......@@ -11188,6 +11189,11 @@ struct mlx5_ifc_mcda_reg_bits {
u8 data[][0x20];
};
enum {
MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE = 0,
MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET = 1,
};
enum {
MLX5_MFRL_REG_RESET_STATE_IDLE = 0,
MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION = 1,
......@@ -11215,7 +11221,8 @@ struct mlx5_ifc_mfrl_reg_bits {
u8 pci_sync_for_fw_update_start[0x1];
u8 pci_sync_for_fw_update_resp[0x2];
u8 rst_type_sel[0x3];
u8 reserved_at_28[0x4];
u8 pci_reset_req_method[0x3];
u8 reserved_at_2b[0x1];
u8 reset_state[0x4];
u8 reset_type[0x8];
u8 reset_level[0x8];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment