Commit 4326d04f authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2021-11-30' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2021-11-30

This series provides bug fixes to mlx5 driver.
Please pull and let me know if there is any problem.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 74b95b07 8c8cf038
......@@ -341,6 +341,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DEALLOC_SF:
case MLX5_CMD_OP_DESTROY_UCTX:
case MLX5_CMD_OP_DESTROY_UMEM:
case MLX5_CMD_OP_MODIFY_RQT:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
......@@ -446,7 +447,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_MODIFY_TIS:
case MLX5_CMD_OP_QUERY_TIS:
case MLX5_CMD_OP_CREATE_RQT:
case MLX5_CMD_OP_MODIFY_RQT:
case MLX5_CMD_OP_QUERY_RQT:
case MLX5_CMD_OP_CREATE_FLOW_TABLE:
......
......@@ -13,6 +13,9 @@ struct mlx5e_rx_res {
unsigned int max_nch;
u32 drop_rqn;
struct mlx5e_packet_merge_param pkt_merge_param;
struct rw_semaphore pkt_merge_param_sem;
struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS];
bool rss_active;
u32 rss_rqns[MLX5E_INDIR_RQT_SIZE];
......@@ -392,6 +395,7 @@ static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res)
if (err)
goto out;
/* Separated from the channels RQs, does not share pkt_merge state with them */
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
mlx5e_rqt_get_rqtn(&res->ptp.rqt),
inner_ft_support);
......@@ -447,6 +451,9 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
res->max_nch = max_nch;
res->drop_rqn = drop_rqn;
res->pkt_merge_param = *init_pkt_merge_param;
init_rwsem(&res->pkt_merge_param_sem);
err = mlx5e_rx_res_rss_init_def(res, init_pkt_merge_param, init_nch);
if (err)
goto err_out;
......@@ -513,7 +520,7 @@ u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res)
return mlx5e_tir_get_tirn(&res->ptp.tir);
}
u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
static u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix)
{
return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt);
}
......@@ -656,6 +663,9 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
if (!builder)
return -ENOMEM;
down_write(&res->pkt_merge_param_sem);
res->pkt_merge_param = *pkt_merge_param;
mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
final_err = 0;
......@@ -681,6 +691,7 @@ int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
}
}
up_write(&res->pkt_merge_param_sem);
mlx5e_tir_builder_free(builder);
return final_err;
}
......@@ -689,3 +700,31 @@ struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *
{
return mlx5e_rss_get_hash(res->rss[0]);
}
int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
struct mlx5e_tir *tir)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
struct mlx5e_tir_builder *builder;
u32 rqtn;
int err;
builder = mlx5e_tir_builder_alloc(false);
if (!builder)
return -ENOMEM;
rqtn = mlx5e_rx_res_get_rqtn_direct(res, rxq);
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, rqtn,
inner_ft_support);
mlx5e_tir_builder_build_direct(builder);
mlx5e_tir_builder_build_tls(builder);
down_read(&res->pkt_merge_param_sem);
mlx5e_tir_builder_build_packet_merge(builder, &res->pkt_merge_param);
err = mlx5e_tir_init(tir, builder, res->mdev, false);
up_read(&res->pkt_merge_param_sem);
mlx5e_tir_builder_free(builder);
return err;
}
......@@ -37,9 +37,6 @@ u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types
u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res);
/* RQTN getters for modules that create their own TIRs */
u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix);
/* Activate/deactivate API */
void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs);
void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res);
......@@ -69,4 +66,7 @@ struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx);
/* Workaround for hairpin */
struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res);
/* Accel TIRs */
int mlx5e_rx_res_tls_tir_create(struct mlx5e_rx_res *res, unsigned int rxq,
struct mlx5e_tir *tir);
#endif /* __MLX5_EN_RX_RES_H__ */
......@@ -191,7 +191,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
eseg->swp_inner_l4_offset =
(skb->csum_start + skb->head - skb->data) / 2;
if (skb->protocol == htons(ETH_P_IPV6))
if (inner_ip_hdr(skb)->version == 6)
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
break;
default:
......
......@@ -100,25 +100,6 @@ mlx5e_ktls_rx_resync_create_resp_list(void)
return resp_list;
}
static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 rqtn)
{
struct mlx5e_tir_builder *builder;
int err;
builder = mlx5e_tir_builder_alloc(false);
if (!builder)
return -ENOMEM;
mlx5e_tir_builder_build_rqt(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqtn, false);
mlx5e_tir_builder_build_direct(builder);
mlx5e_tir_builder_build_tls(builder);
err = mlx5e_tir_init(tir, builder, mdev, false);
mlx5e_tir_builder_free(builder);
return err;
}
static void accel_rule_handle_work(struct work_struct *work)
{
struct mlx5e_ktls_offload_context_rx *priv_rx;
......@@ -609,7 +590,6 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
struct mlx5_core_dev *mdev;
struct mlx5e_priv *priv;
int rxq, err;
u32 rqtn;
tls_ctx = tls_get_ctx(sk);
priv = netdev_priv(netdev);
......@@ -635,9 +615,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk,
priv_rx->sw_stats = &priv->tls->sw_stats;
mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx);
rqtn = mlx5e_rx_res_get_rqtn_direct(priv->rx_res, rxq);
err = mlx5e_ktls_create_tir(mdev, &priv_rx->tir, rqtn);
err = mlx5e_rx_res_tls_tir_create(priv->rx_res, rxq, &priv_rx->tir);
if (err)
goto err_create_tir;
......
......@@ -1080,6 +1080,10 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
&MLX5E_STATS_GRP(pme),
&MLX5E_STATS_GRP(channels),
&MLX5E_STATS_GRP(per_port_buff_congest),
#ifdef CONFIG_MLX5_EN_IPSEC
&MLX5E_STATS_GRP(ipsec_sw),
&MLX5E_STATS_GRP(ipsec_hw),
#endif
};
static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
......
......@@ -543,13 +543,13 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
u16 klm_entries, u16 index)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
u16 entries, pi, i, header_offset, err, wqe_bbs, new_entries;
u16 entries, pi, header_offset, err, wqe_bbs, new_entries;
u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
struct page *page = shampo->last_page;
u64 addr = shampo->last_addr;
struct mlx5e_dma_info *dma_info;
struct mlx5e_umr_wqe *umr_wqe;
int headroom;
int headroom, i;
headroom = rq->buff.headroom;
new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
......@@ -601,9 +601,7 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
err_unmap:
while (--i >= 0) {
if (--index < 0)
index = shampo->hd_per_wq - 1;
dma_info = &shampo->info[index];
dma_info = &shampo->info[--index];
if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
mlx5e_page_release(rq, dma_info, true);
......
......@@ -130,7 +130,7 @@ static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw,
/* If vports min rate divider is 0 but their group has bw_share configured, then
* need to set bw_share for vports to minimal value.
*/
if (!group_level && !max_guarantee && group->bw_share)
if (!group_level && !max_guarantee && group && group->bw_share)
return 1;
return 0;
}
......@@ -423,7 +423,7 @@ static int esw_qos_vport_update_group(struct mlx5_eswitch *esw,
return err;
/* Recalculate bw share weights of old and new groups */
if (vport->qos.bw_share) {
if (vport->qos.bw_share || new_group->bw_share) {
esw_qos_normalize_vports_min_rate(esw, curr_group, extack);
esw_qos_normalize_vports_min_rate(esw, new_group, extack);
}
......
......@@ -329,14 +329,25 @@ static bool
esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
{
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
bool result = false;
int i;
for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
/* Indirect table is supported only for flows with in_port uplink
* and the destination is vport on the same eswitch as the uplink,
* return false in case at least one of destinations doesn't meet
* this criteria.
*/
for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
if (esw_attr->dests[i].rep &&
mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
esw_attr->dests[i].mdev))
return true;
return false;
esw_attr->dests[i].mdev)) {
result = true;
} else {
result = false;
break;
}
}
return result;
}
static int
......@@ -2512,6 +2523,7 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
struct mlx5_eswitch *esw = master->priv.eswitch;
struct mlx5_flow_table_attr ft_attr = {
.max_fte = 1, .prio = 0, .level = 0,
.flags = MLX5_FLOW_TABLE_OTHER_VPORT,
};
struct mlx5_flow_namespace *egress_ns;
struct mlx5_flow_table *acl;
......
......@@ -835,6 +835,9 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
health->timer.expires = jiffies + msecs_to_jiffies(poll_interval_ms);
add_timer(&health->timer);
if (mlx5_core_is_pf(dev) && MLX5_CAP_MCAM_REG(dev, mrtc))
queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
}
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
......@@ -902,8 +905,6 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work);
INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work);
INIT_DELAYED_WORK(&health->update_fw_log_ts_work, mlx5_health_log_ts_update);
if (mlx5_core_is_pf(dev))
queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
return 0;
......
......@@ -608,4 +608,5 @@ void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev)
if (port_sel->tunnel)
mlx5_destroy_ttc_table(port_sel->inner.ttc);
mlx5_lag_destroy_definers(ldev);
memset(port_sel, 0, sizeof(*port_sel));
}
......@@ -31,11 +31,11 @@ static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_type
dev->timeouts->to[type] = val;
}
static void tout_set_def_val(struct mlx5_core_dev *dev)
void mlx5_tout_set_def_val(struct mlx5_core_dev *dev)
{
int i;
for (i = MLX5_TO_FW_PRE_INIT_TIMEOUT_MS; i < MAX_TIMEOUT_TYPES; i++)
for (i = 0; i < MAX_TIMEOUT_TYPES; i++)
tout_set(dev, tout_def_sw_val[i], i);
}
......@@ -45,7 +45,6 @@ int mlx5_tout_init(struct mlx5_core_dev *dev)
if (!dev->timeouts)
return -ENOMEM;
tout_set_def_val(dev);
return 0;
}
......
......@@ -34,6 +34,7 @@ int mlx5_tout_init(struct mlx5_core_dev *dev);
void mlx5_tout_cleanup(struct mlx5_core_dev *dev);
void mlx5_tout_query_iseg(struct mlx5_core_dev *dev);
int mlx5_tout_query_dtor(struct mlx5_core_dev *dev);
void mlx5_tout_set_def_val(struct mlx5_core_dev *dev);
u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type);
#define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS)
......
......@@ -992,11 +992,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
if (mlx5_core_is_pf(dev))
pcie_print_link_status(dev->pdev);
err = mlx5_tout_init(dev);
if (err) {
mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
return err;
}
mlx5_tout_set_def_val(dev);
/* wait for firmware to accept initialization segments configurations
*/
......@@ -1005,13 +1001,13 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
if (err) {
mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
goto err_tout_cleanup;
return err;
}
err = mlx5_cmd_init(dev);
if (err) {
mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
goto err_tout_cleanup;
return err;
}
mlx5_tout_query_iseg(dev);
......@@ -1075,18 +1071,16 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
mlx5_set_driver_version(dev);
mlx5_start_health_poll(dev);
err = mlx5_query_hca_caps(dev);
if (err) {
mlx5_core_err(dev, "query hca failed\n");
goto stop_health;
goto reclaim_boot_pages;
}
mlx5_start_health_poll(dev);
return 0;
stop_health:
mlx5_stop_health_poll(dev, boot);
reclaim_boot_pages:
mlx5_reclaim_startup_pages(dev);
err_disable_hca:
......@@ -1094,8 +1088,6 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
err_cmd_cleanup:
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
err_tout_cleanup:
mlx5_tout_cleanup(dev);
return err;
}
......@@ -1114,7 +1106,6 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
mlx5_core_disable_hca(dev, 0);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
mlx5_tout_cleanup(dev);
return 0;
}
......@@ -1476,6 +1467,12 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
mlx5_debugfs_root);
INIT_LIST_HEAD(&priv->traps);
err = mlx5_tout_init(dev);
if (err) {
mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
goto err_timeout_init;
}
err = mlx5_health_init(dev);
if (err)
goto err_health_init;
......@@ -1501,6 +1498,8 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
err_pagealloc_init:
mlx5_health_cleanup(dev);
err_health_init:
mlx5_tout_cleanup(dev);
err_timeout_init:
debugfs_remove(dev->priv.dbg_root);
mutex_destroy(&priv->pgdir_mutex);
mutex_destroy(&priv->alloc_mutex);
......@@ -1518,6 +1517,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mlx5_adev_cleanup(dev);
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
mlx5_tout_cleanup(dev);
debugfs_remove_recursive(dev->priv.dbg_root);
mutex_destroy(&priv->pgdir_mutex);
mutex_destroy(&priv->alloc_mutex);
......
......@@ -9698,7 +9698,10 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 regs_84_to_68[0x11];
u8 tracer_registers[0x4];
u8 regs_63_to_32[0x20];
u8 regs_63_to_46[0x12];
u8 mrtc[0x1];
u8 regs_44_to_32[0xd];
u8 regs_31_to_0[0x20];
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment