Commit 05f1e35a authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-updates-2021-09-30' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2021-09-30

1) From Yevgeny Kliteynik:

This patch series deals with vport handling in SW steering.

For every vport, SW steering queries FW for this vport's properties,
such as RX/TX ICM addresses to be able to add this vport as dest action.
The following patches rework vport capabilities managements and add support
for Scalable Functions (SFs).

 - Patch 1 fixes the vport number data type all over the DR code to 16 bits
   in accordance with HW spec.
 - Patch 2 replaces local SW steering WIRE_PORT macro with the existing
   mlx5 define.
 - Patch 3 adds missing query for vport 0 and and handles eswitch manager
   capabilities for ECPF (BlueField in embedded CPU mode).
 - Patch 4 fixes error messages for failure to obtain vport caps from
   different locations in the code to have the same verbosity level and
   similar wording.
 - Patch 5 adds support for csum recalculation flow tables on SFs: it
   implements these FTs management in XArray instead of the fixed size array,
   thus adding support for csum recalculation table for any valid vport.
 - Patch 6 is the main patch of this whole series: it refactors vports
   capabilities handling and adds SFs support.

2) Minor and trivial updates and cleanups

* tag 'mlx5-updates-2021-09-30' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: Use array_size() helper
  net/mlx5: Use struct_size() helper in kvzalloc()
  net/mlx5: Use kvcalloc() instead of kvzalloc()
  net/mlx5: Tolerate failures in debug features while driver load
  net/mlx5: Warn for devlink reload when there are VFs alive
  net/mlx5: DR, Add missing string for action type SAMPLER
  net/mlx5: DR, init_next_match only if needed
  net/mlx5: DR, Fix typo 'offeset' to 'offset'
  net/mlx5: DR, Increase supported num of actions to 32
  net/mlx5: DR, Add support for SF vports
  net/mlx5: DR, Support csum recalculation flow table on SFs
  net/mlx5: DR, Align error messages for failure to obtain vport caps
  net/mlx5: DR, Add missing query for vport 0
  net/mlx5: DR, Replace local WIRE_PORT macro with the existing MLX5_VPORT_UPLINK
  net/mlx5: DR, Fix vport number data type to u16
====================

Link: https://lore.kernel.org/r/20210930232050.41779-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 10d48705 51984c9e
...@@ -2058,7 +2058,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev) ...@@ -2058,7 +2058,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
return -EINVAL; return -EINVAL;
} }
cmd->stats = kvzalloc(MLX5_CMD_OP_MAX * sizeof(*cmd->stats), GFP_KERNEL); cmd->stats = kvcalloc(MLX5_CMD_OP_MAX, sizeof(*cmd->stats), GFP_KERNEL);
if (!cmd->stats) if (!cmd->stats)
return -ENOMEM; return -ENOMEM;
......
...@@ -136,6 +136,7 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, ...@@ -136,6 +136,7 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct mlx5_core_dev *dev = devlink_priv(devlink); struct mlx5_core_dev *dev = devlink_priv(devlink);
struct pci_dev *pdev = dev->pdev;
bool sf_dev_allocated; bool sf_dev_allocated;
sf_dev_allocated = mlx5_sf_dev_allocated(dev); sf_dev_allocated = mlx5_sf_dev_allocated(dev);
...@@ -153,6 +154,10 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change, ...@@ -153,6 +154,10 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (pci_num_vf(pdev)) {
NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
}
switch (action) { switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
mlx5_unload_one(dev); mlx5_unload_one(dev);
......
...@@ -930,9 +930,10 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa) ...@@ -930,9 +930,10 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS; int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
size_t size;
xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq, size = array_size(sizeof(*xdpi_fifo->xi), dsegs_per_wq);
GFP_KERNEL, numa); xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa);
if (!xdpi_fifo->xi) if (!xdpi_fifo->xi)
return -ENOMEM; return -ENOMEM;
...@@ -946,10 +947,11 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa) ...@@ -946,10 +947,11 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
{ {
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
size_t size;
int err; int err;
sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz, size = array_size(sizeof(*sq->db.wqe_info), wq_sz);
GFP_KERNEL, numa); sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
if (!sq->db.wqe_info) if (!sq->db.wqe_info)
return -ENOMEM; return -ENOMEM;
......
...@@ -1009,7 +1009,7 @@ mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw) ...@@ -1009,7 +1009,7 @@ mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
u16 vport_num; u16 vport_num;
num_vfs = esw->esw_funcs.num_vfs; num_vfs = esw->esw_funcs.num_vfs;
flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL); flows = kvcalloc(num_vfs, sizeof(*flows), GFP_KERNEL);
if (!flows) if (!flows)
return -ENOMEM; return -ENOMEM;
...@@ -1188,7 +1188,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, ...@@ -1188,7 +1188,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
peer_miss_rules_setup(esw, peer_dev, spec, &dest); peer_miss_rules_setup(esw, peer_dev, spec, &dest);
flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL); flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
if (!flows) { if (!flows) {
err = -ENOMEM; err = -ENOMEM;
goto alloc_flows_err; goto alloc_flows_err;
......
...@@ -497,8 +497,7 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev) ...@@ -497,8 +497,7 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc); alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1; bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
bulk = kvzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc), bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL);
GFP_KERNEL);
if (!bulk) if (!bulk)
goto err_alloc_bulk; goto err_alloc_bulk;
......
...@@ -1112,8 +1112,9 @@ static int mlx5_load(struct mlx5_core_dev *dev) ...@@ -1112,8 +1112,9 @@ static int mlx5_load(struct mlx5_core_dev *dev)
err = mlx5_fw_tracer_init(dev->tracer); err = mlx5_fw_tracer_init(dev->tracer);
if (err) { if (err) {
mlx5_core_err(dev, "Failed to init FW tracer\n"); mlx5_core_err(dev, "Failed to init FW tracer %d\n", err);
goto err_fw_tracer; mlx5_fw_tracer_destroy(dev->tracer);
dev->tracer = NULL;
} }
mlx5_fw_reset_events_start(dev); mlx5_fw_reset_events_start(dev);
...@@ -1121,8 +1122,9 @@ static int mlx5_load(struct mlx5_core_dev *dev) ...@@ -1121,8 +1122,9 @@ static int mlx5_load(struct mlx5_core_dev *dev)
err = mlx5_rsc_dump_init(dev); err = mlx5_rsc_dump_init(dev);
if (err) { if (err) {
mlx5_core_err(dev, "Failed to init Resource dump\n"); mlx5_core_err(dev, "Failed to init Resource dump %d\n", err);
goto err_rsc_dump; mlx5_rsc_dump_destroy(dev);
dev->rsc_dump = NULL;
} }
err = mlx5_fpga_device_start(dev); err = mlx5_fpga_device_start(dev);
...@@ -1192,11 +1194,9 @@ static int mlx5_load(struct mlx5_core_dev *dev) ...@@ -1192,11 +1194,9 @@ static int mlx5_load(struct mlx5_core_dev *dev)
mlx5_fpga_device_stop(dev); mlx5_fpga_device_stop(dev);
err_fpga_start: err_fpga_start:
mlx5_rsc_dump_cleanup(dev); mlx5_rsc_dump_cleanup(dev);
err_rsc_dump:
mlx5_hv_vhca_cleanup(dev->hv_vhca); mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_reset_events_stop(dev); mlx5_fw_reset_events_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer); mlx5_fw_tracer_cleanup(dev->tracer);
err_fw_tracer:
mlx5_eq_table_destroy(dev); mlx5_eq_table_destroy(dev);
err_eq_table: err_eq_table:
mlx5_irq_table_destroy(dev); mlx5_irq_table_destroy(dev);
......
...@@ -39,6 +39,7 @@ static const char * const action_type_to_str[] = { ...@@ -39,6 +39,7 @@ static const char * const action_type_to_str[] = {
[DR_ACTION_TYP_VPORT] = "DR_ACTION_TYP_VPORT", [DR_ACTION_TYP_VPORT] = "DR_ACTION_TYP_VPORT",
[DR_ACTION_TYP_POP_VLAN] = "DR_ACTION_TYP_POP_VLAN", [DR_ACTION_TYP_POP_VLAN] = "DR_ACTION_TYP_POP_VLAN",
[DR_ACTION_TYP_PUSH_VLAN] = "DR_ACTION_TYP_PUSH_VLAN", [DR_ACTION_TYP_PUSH_VLAN] = "DR_ACTION_TYP_PUSH_VLAN",
[DR_ACTION_TYP_SAMPLER] = "DR_ACTION_TYP_SAMPLER",
[DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR", [DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR",
[DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR", [DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR",
[DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN", [DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN",
...@@ -513,9 +514,9 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn, ...@@ -513,9 +514,9 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
/* If destination is vport we will get the FW flow table /* If destination is vport we will get the FW flow table
* that recalculates the CS and forwards to the vport. * that recalculates the CS and forwards to the vport.
*/ */
ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport->dmn, ret = mlx5dr_domain_get_recalc_cs_ft_addr(dest_action->vport->dmn,
dest_action->vport->caps->num, dest_action->vport->caps->num,
final_icm_addr); final_icm_addr);
if (ret) { if (ret) {
mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n"); mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n");
return ret; return ret;
...@@ -632,7 +633,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, ...@@ -632,7 +633,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
return -EOPNOTSUPP; return -EOPNOTSUPP;
case DR_ACTION_TYP_CTR: case DR_ACTION_TYP_CTR:
attr.ctr_id = action->ctr->ctr_id + attr.ctr_id = action->ctr->ctr_id +
action->ctr->offeset; action->ctr->offset;
break; break;
case DR_ACTION_TYP_TAG: case DR_ACTION_TYP_TAG:
attr.flow_tag = action->flow_tag->flow_tag; attr.flow_tag = action->flow_tag->flow_tag;
...@@ -669,7 +670,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, ...@@ -669,7 +670,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
attr.hit_gvmi = action->vport->caps->vhca_gvmi; attr.hit_gvmi = action->vport->caps->vhca_gvmi;
dest_action = action; dest_action = action;
if (rx_rule) { if (rx_rule) {
if (action->vport->caps->num == WIRE_PORT) { if (action->vport->caps->num == MLX5_VPORT_UPLINK) {
mlx5dr_dbg(dmn, "Device doesn't support Loopback on WIRE vport\n"); mlx5dr_dbg(dmn, "Device doesn't support Loopback on WIRE vport\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -1747,7 +1748,7 @@ mlx5dr_action_create_modify_header(struct mlx5dr_domain *dmn, ...@@ -1747,7 +1748,7 @@ mlx5dr_action_create_modify_header(struct mlx5dr_domain *dmn,
struct mlx5dr_action * struct mlx5dr_action *
mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn, mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
u32 vport, u8 vhca_id_valid, u16 vport, u8 vhca_id_valid,
u16 vhca_id) u16 vhca_id)
{ {
struct mlx5dr_cmd_vport_cap *vport_cap; struct mlx5dr_cmd_vport_cap *vport_cap;
...@@ -1767,9 +1768,11 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn, ...@@ -1767,9 +1768,11 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
return NULL; return NULL;
} }
vport_cap = mlx5dr_get_vport_cap(&vport_dmn->info.caps, vport); vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, vport);
if (!vport_cap) { if (!vport_cap) {
mlx5dr_dbg(dmn, "Failed to get vport %d caps\n", vport); mlx5dr_err(dmn,
"Failed to get vport 0x%x caps - vport is disabled or invalid\n",
vport);
return NULL; return NULL;
} }
......
...@@ -195,6 +195,8 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev, ...@@ -195,6 +195,8 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port); caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
return 0; return 0;
} }
...@@ -272,7 +274,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev, ...@@ -272,7 +274,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
u32 table_id, u32 table_id,
u32 group_id, u32 group_id,
u32 modify_header_id, u32 modify_header_id,
u32 vport_id) u16 vport)
{ {
u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {}; u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
void *in_flow_context; void *in_flow_context;
...@@ -303,7 +305,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev, ...@@ -303,7 +305,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
MLX5_SET(dest_format_struct, in_dests, destination_type, MLX5_SET(dest_format_struct, in_dests, destination_type,
MLX5_FLOW_DESTINATION_TYPE_VPORT); MLX5_FLOW_DESTINATION_TYPE_VPORT);
MLX5_SET(dest_format_struct, in_dests, destination_id, vport_id); MLX5_SET(dest_format_struct, in_dests, destination_id, vport);
err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
kvfree(in); kvfree(in);
......
...@@ -9,48 +9,45 @@ ...@@ -9,48 +9,45 @@
((dmn)->info.caps.dmn_type##_sw_owner_v2 && \ ((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
(dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX)) (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX))
static int dr_domain_init_cache(struct mlx5dr_domain *dmn) static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn)
{ {
/* Per vport cached FW FT for checksum recalculation, this /* Per vport cached FW FT for checksum recalculation, this
* recalculation is needed due to a HW bug. * recalculation is needed due to a HW bug in STEv0.
*/ */
dmn->cache.recalc_cs_ft = kcalloc(dmn->info.caps.num_vports, xa_init(&dmn->csum_fts_xa);
sizeof(dmn->cache.recalc_cs_ft[0]),
GFP_KERNEL);
if (!dmn->cache.recalc_cs_ft)
return -ENOMEM;
return 0;
} }
static void dr_domain_uninit_cache(struct mlx5dr_domain *dmn) static void dr_domain_uninit_csum_recalc_fts(struct mlx5dr_domain *dmn)
{ {
int i; struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
unsigned long i;
for (i = 0; i < dmn->info.caps.num_vports; i++) {
if (!dmn->cache.recalc_cs_ft[i])
continue;
mlx5dr_fw_destroy_recalc_cs_ft(dmn, dmn->cache.recalc_cs_ft[i]); xa_for_each(&dmn->csum_fts_xa, i, recalc_cs_ft) {
if (recalc_cs_ft)
mlx5dr_fw_destroy_recalc_cs_ft(dmn, recalc_cs_ft);
} }
kfree(dmn->cache.recalc_cs_ft); xa_destroy(&dmn->csum_fts_xa);
} }
int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn, int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
u32 vport_num, u16 vport_num,
u64 *rx_icm_addr) u64 *rx_icm_addr)
{ {
struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft; struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
int ret;
recalc_cs_ft = dmn->cache.recalc_cs_ft[vport_num]; recalc_cs_ft = xa_load(&dmn->csum_fts_xa, vport_num);
if (!recalc_cs_ft) { if (!recalc_cs_ft) {
/* Table not in cache, need to allocate a new one */ /* Table hasn't been created yet */
recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num); recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
if (!recalc_cs_ft) if (!recalc_cs_ft)
return -EINVAL; return -EINVAL;
dmn->cache.recalc_cs_ft[vport_num] = recalc_cs_ft; ret = xa_err(xa_store(&dmn->csum_fts_xa, vport_num,
recalc_cs_ft, GFP_KERNEL));
if (ret)
return ret;
} }
*rx_icm_addr = recalc_cs_ft->rx_icm_addr; *rx_icm_addr = recalc_cs_ft->rx_icm_addr;
...@@ -124,18 +121,39 @@ static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn) ...@@ -124,18 +121,39 @@ static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn); mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
} }
static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn,
struct mlx5dr_cmd_vport_cap *uplink_vport)
{
struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
uplink_vport->num = MLX5_VPORT_UPLINK;
uplink_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
uplink_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
uplink_vport->vport_gvmi = 0;
uplink_vport->vhca_gvmi = dmn->info.caps.gvmi;
}
static int dr_domain_query_vport(struct mlx5dr_domain *dmn, static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
bool other_vport, u16 vport_number,
u16 vport_number) struct mlx5dr_cmd_vport_cap *vport_caps)
{ {
struct mlx5dr_cmd_vport_cap *vport_caps; u16 cmd_vport = vport_number;
bool other_vport = true;
int ret; int ret;
vport_caps = &dmn->info.caps.vports_caps[vport_number]; if (vport_number == MLX5_VPORT_UPLINK) {
dr_domain_fill_uplink_caps(dmn, vport_caps);
return 0;
}
if (dmn->info.caps.is_ecpf && vport_number == MLX5_VPORT_ECPF) {
other_vport = false;
cmd_vport = 0;
}
ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev, ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
other_vport, other_vport,
vport_number, cmd_vport,
&vport_caps->icm_address_rx, &vport_caps->icm_address_rx,
&vport_caps->icm_address_tx); &vport_caps->icm_address_tx);
if (ret) if (ret)
...@@ -143,7 +161,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn, ...@@ -143,7 +161,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
ret = mlx5dr_cmd_query_gvmi(dmn->mdev, ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
other_vport, other_vport,
vport_number, cmd_vport,
&vport_caps->vport_gvmi); &vport_caps->vport_gvmi);
if (ret) if (ret)
return ret; return ret;
...@@ -154,27 +172,82 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn, ...@@ -154,27 +172,82 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
return 0; return 0;
} }
static int dr_domain_query_vports(struct mlx5dr_domain *dmn) static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
{ {
struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps; return dr_domain_query_vport(dmn,
struct mlx5dr_cmd_vport_cap *wire_vport; dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0,
int vport; &dmn->info.caps.vports.esw_manager_caps);
}
static struct mlx5dr_cmd_vport_cap *
dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
{
struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
struct mlx5dr_cmd_vport_cap *vport_caps;
int ret; int ret;
/* Query vports (except wire vport) */ vport_caps = kvzalloc(sizeof(*vport_caps), GFP_KERNEL);
for (vport = 0; vport < dmn->info.caps.num_esw_ports - 1; vport++) { if (!vport_caps)
ret = dr_domain_query_vport(dmn, !!vport, vport); return NULL;
if (ret)
return ret; ret = dr_domain_query_vport(dmn, vport, vport_caps);
if (ret) {
kvfree(vport_caps);
return NULL;
} }
/* Last vport is the wire port */ ret = xa_insert(&caps->vports.vports_caps_xa, vport,
wire_vport = &dmn->info.caps.vports_caps[vport]; vport_caps, GFP_KERNEL);
wire_vport->num = WIRE_PORT; if (ret) {
wire_vport->icm_address_rx = esw_caps->uplink_icm_address_rx; mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret);
wire_vport->icm_address_tx = esw_caps->uplink_icm_address_tx; kvfree(vport_caps);
wire_vport->vport_gvmi = 0; return ERR_PTR(ret);
wire_vport->vhca_gvmi = dmn->info.caps.gvmi; }
return vport_caps;
}
struct mlx5dr_cmd_vport_cap *
mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
{
struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
struct mlx5dr_cmd_vport_cap *vport_caps;
if ((caps->is_ecpf && vport == MLX5_VPORT_ECPF) ||
(!caps->is_ecpf && vport == 0))
return &caps->vports.esw_manager_caps;
vport_load:
vport_caps = xa_load(&caps->vports.vports_caps_xa, vport);
if (vport_caps)
return vport_caps;
vport_caps = dr_domain_add_vport_cap(dmn, vport);
if (PTR_ERR(vport_caps) == -EBUSY)
/* caps were already stored by another thread */
goto vport_load;
return vport_caps;
}
static void dr_domain_clear_vports(struct mlx5dr_domain *dmn)
{
struct mlx5dr_cmd_vport_cap *vport_caps;
unsigned long i;
xa_for_each(&dmn->info.caps.vports.vports_caps_xa, i, vport_caps) {
vport_caps = xa_erase(&dmn->info.caps.vports.vports_caps_xa, i);
kvfree(vport_caps);
}
}
static int dr_domain_query_uplink(struct mlx5dr_domain *dmn)
{
struct mlx5dr_cmd_vport_cap *vport_caps;
vport_caps = mlx5dr_domain_get_vport_cap(dmn, MLX5_VPORT_UPLINK);
if (!vport_caps)
return -EINVAL;
return 0; return 0;
} }
...@@ -196,25 +269,29 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev, ...@@ -196,25 +269,29 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx; dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx; dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
dmn->info.caps.vports_caps = kcalloc(dmn->info.caps.num_esw_ports, xa_init(&dmn->info.caps.vports.vports_caps_xa);
sizeof(dmn->info.caps.vports_caps[0]),
GFP_KERNEL);
if (!dmn->info.caps.vports_caps)
return -ENOMEM;
ret = dr_domain_query_vports(dmn); /* Query eswitch manager and uplink vports only. Rest of the
* vports (vport 0, VFs and SFs) will be queried dynamically.
*/
ret = dr_domain_query_esw_mngr(dmn);
if (ret) { if (ret) {
mlx5dr_err(dmn, "Failed to query vports caps (err: %d)", ret); mlx5dr_err(dmn, "Failed to query eswitch manager vport caps (err: %d)", ret);
goto free_vports_caps; goto free_vports_caps_xa;
} }
dmn->info.caps.num_vports = dmn->info.caps.num_esw_ports - 1; ret = dr_domain_query_uplink(dmn);
if (ret) {
mlx5dr_err(dmn, "Failed to query uplink vport caps (err: %d)", ret);
goto free_vports_caps_xa;
}
return 0; return 0;
free_vports_caps: free_vports_caps_xa:
kfree(dmn->info.caps.vports_caps); xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
dmn->info.caps.vports_caps = NULL;
return ret; return ret;
} }
...@@ -229,8 +306,6 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, ...@@ -229,8 +306,6 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev);
ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps); ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
if (ret) if (ret)
return ret; return ret;
...@@ -267,11 +342,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, ...@@ -267,11 +342,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX; dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX; dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0); vport_cap = &dmn->info.caps.vports.esw_manager_caps;
if (!vport_cap) {
mlx5dr_err(dmn, "Failed to get esw manager vport\n");
return -ENOENT;
}
dmn->info.supp_sw_steering = true; dmn->info.supp_sw_steering = true;
dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx; dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
...@@ -290,7 +361,8 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, ...@@ -290,7 +361,8 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn) static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
{ {
kfree(dmn->info.caps.vports_caps); dr_domain_clear_vports(dmn);
xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
} }
struct mlx5dr_domain * struct mlx5dr_domain *
...@@ -333,16 +405,10 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type) ...@@ -333,16 +405,10 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
goto uninit_caps; goto uninit_caps;
} }
ret = dr_domain_init_cache(dmn); dr_domain_init_csum_recalc_fts(dmn);
if (ret) {
mlx5dr_err(dmn, "Failed initialize domain cache\n");
goto uninit_resourses;
}
return dmn; return dmn;
uninit_resourses:
dr_domain_uninit_resources(dmn);
uninit_caps: uninit_caps:
dr_domain_caps_uninit(dmn); dr_domain_caps_uninit(dmn);
free_domain: free_domain:
...@@ -381,7 +447,7 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn) ...@@ -381,7 +447,7 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
/* make sure resources are not used by the hardware */ /* make sure resources are not used by the hardware */
mlx5dr_cmd_sync_steering(dmn->mdev); mlx5dr_cmd_sync_steering(dmn->mdev);
dr_domain_uninit_cache(dmn); dr_domain_uninit_csum_recalc_fts(dmn);
dr_domain_uninit_resources(dmn); dr_domain_uninit_resources(dmn);
dr_domain_caps_uninit(dmn); dr_domain_caps_uninit(dmn);
mutex_destroy(&dmn->info.tx.mutex); mutex_destroy(&dmn->info.tx.mutex);
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#include "dr_types.h" #include "dr_types.h"
struct mlx5dr_fw_recalc_cs_ft * struct mlx5dr_fw_recalc_cs_ft *
mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num) mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num)
{ {
struct mlx5dr_cmd_create_flow_table_attr ft_attr = {}; struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft; struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
......
...@@ -1042,10 +1042,10 @@ static bool dr_rule_skip(enum mlx5dr_domain_type domain, ...@@ -1042,10 +1042,10 @@ static bool dr_rule_skip(enum mlx5dr_domain_type domain,
return false; return false;
if (mask->misc.source_port) { if (mask->misc.source_port) {
if (rx && value->misc.source_port != WIRE_PORT) if (rx && value->misc.source_port != MLX5_VPORT_UPLINK)
return true; return true;
if (!rx && value->misc.source_port == WIRE_PORT) if (!rx && value->misc.source_port == MLX5_VPORT_UPLINK)
return true; return true;
} }
......
...@@ -1645,7 +1645,7 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, ...@@ -1645,7 +1645,7 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
struct mlx5dr_match_misc *misc = &value->misc; struct mlx5dr_match_misc *misc = &value->misc;
struct mlx5dr_cmd_vport_cap *vport_cap; struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn; struct mlx5dr_domain *dmn = sb->dmn;
struct mlx5dr_cmd_caps *caps; struct mlx5dr_domain *vport_dmn;
u8 *bit_mask = sb->bit_mask; u8 *bit_mask = sb->bit_mask;
bool source_gvmi_set; bool source_gvmi_set;
...@@ -1654,23 +1654,24 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, ...@@ -1654,23 +1654,24 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
if (sb->vhca_id_valid) { if (sb->vhca_id_valid) {
/* Find port GVMI based on the eswitch_owner_vhca_id */ /* Find port GVMI based on the eswitch_owner_vhca_id */
if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi) if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
caps = &dmn->info.caps; vport_dmn = dmn;
else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id == else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
dmn->peer_dmn->info.caps.gvmi)) dmn->peer_dmn->info.caps.gvmi))
caps = &dmn->peer_dmn->info.caps; vport_dmn = dmn->peer_dmn;
else else
return -EINVAL; return -EINVAL;
misc->source_eswitch_owner_vhca_id = 0; misc->source_eswitch_owner_vhca_id = 0;
} else { } else {
caps = &dmn->info.caps; vport_dmn = dmn;
} }
source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi); source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
if (source_gvmi_set) { if (source_gvmi_set) {
vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port); vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn,
misc->source_port);
if (!vport_cap) { if (!vport_cap) {
mlx5dr_err(dmn, "Vport 0x%x is invalid\n", mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
misc->source_port); misc->source_port);
return -EINVAL; return -EINVAL;
} }
......
...@@ -586,9 +586,11 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, ...@@ -586,9 +586,11 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) { } else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
u8 *d_action; u8 *d_action;
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action_sz = DR_STE_ACTION_TRIPLE_SZ; action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
d_action = action + DR_STE_ACTION_SINGLE_SZ; d_action = action + DR_STE_ACTION_SINGLE_SZ;
dr_ste_v1_set_encap_l3(last_ste, dr_ste_v1_set_encap_l3(last_ste,
...@@ -1776,7 +1778,7 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, ...@@ -1776,7 +1778,7 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
struct mlx5dr_match_misc *misc = &value->misc; struct mlx5dr_match_misc *misc = &value->misc;
struct mlx5dr_cmd_vport_cap *vport_cap; struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn; struct mlx5dr_domain *dmn = sb->dmn;
struct mlx5dr_cmd_caps *caps; struct mlx5dr_domain *vport_dmn;
u8 *bit_mask = sb->bit_mask; u8 *bit_mask = sb->bit_mask;
DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn); DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn);
...@@ -1784,22 +1786,22 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value, ...@@ -1784,22 +1786,22 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
if (sb->vhca_id_valid) { if (sb->vhca_id_valid) {
/* Find port GVMI based on the eswitch_owner_vhca_id */ /* Find port GVMI based on the eswitch_owner_vhca_id */
if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi) if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
caps = &dmn->info.caps; vport_dmn = dmn;
else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id == else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
dmn->peer_dmn->info.caps.gvmi)) dmn->peer_dmn->info.caps.gvmi))
caps = &dmn->peer_dmn->info.caps; vport_dmn = dmn->peer_dmn;
else else
return -EINVAL; return -EINVAL;
misc->source_eswitch_owner_vhca_id = 0; misc->source_eswitch_owner_vhca_id = 0;
} else { } else {
caps = &dmn->info.caps; vport_dmn = dmn;
} }
if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi)) if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi))
return 0; return 0;
vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port); vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, misc->source_port);
if (!vport_cap) { if (!vport_cap) {
mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n", mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
misc->source_port); misc->source_port);
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
#ifndef _DR_TYPES_ #ifndef _DR_TYPES_
#define _DR_TYPES_ #define _DR_TYPES_
#include <linux/mlx5/driver.h> #include <linux/mlx5/vport.h>
#include <linux/refcount.h> #include <linux/refcount.h>
#include "fs_core.h" #include "fs_core.h"
#include "wq.h" #include "wq.h"
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#define DR_RULE_MAX_STES 18 #define DR_RULE_MAX_STES 18
#define DR_ACTION_MAX_STES 5 #define DR_ACTION_MAX_STES 5
#define WIRE_PORT 0xFFFF
#define DR_STE_SVLAN 0x1 #define DR_STE_SVLAN 0x1
#define DR_STE_CVLAN 0x2 #define DR_STE_CVLAN 0x2
#define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4) #define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
...@@ -752,9 +751,9 @@ struct mlx5dr_esw_caps { ...@@ -752,9 +751,9 @@ struct mlx5dr_esw_caps {
struct mlx5dr_cmd_vport_cap { struct mlx5dr_cmd_vport_cap {
u16 vport_gvmi; u16 vport_gvmi;
u16 vhca_gvmi; u16 vhca_gvmi;
u16 num;
u64 icm_address_rx; u64 icm_address_rx;
u64 icm_address_tx; u64 icm_address_tx;
u32 num;
}; };
struct mlx5dr_roce_cap { struct mlx5dr_roce_cap {
...@@ -763,6 +762,11 @@ struct mlx5dr_roce_cap { ...@@ -763,6 +762,11 @@ struct mlx5dr_roce_cap {
u8 fl_rc_qp_when_roce_enabled:1; u8 fl_rc_qp_when_roce_enabled:1;
}; };
struct mlx5dr_vports {
struct mlx5dr_cmd_vport_cap esw_manager_caps;
struct xarray vports_caps_xa;
};
struct mlx5dr_cmd_caps { struct mlx5dr_cmd_caps {
u16 gvmi; u16 gvmi;
u64 nic_rx_drop_address; u64 nic_rx_drop_address;
...@@ -786,7 +790,6 @@ struct mlx5dr_cmd_caps { ...@@ -786,7 +790,6 @@ struct mlx5dr_cmd_caps {
u8 flex_parser_id_gtpu_first_ext_dw_0; u8 flex_parser_id_gtpu_first_ext_dw_0;
u8 max_ft_level; u8 max_ft_level;
u16 roce_min_src_udp; u16 roce_min_src_udp;
u8 num_esw_ports;
u8 sw_format_ver; u8 sw_format_ver;
bool eswitch_manager; bool eswitch_manager;
bool rx_sw_owner; bool rx_sw_owner;
...@@ -795,11 +798,11 @@ struct mlx5dr_cmd_caps { ...@@ -795,11 +798,11 @@ struct mlx5dr_cmd_caps {
u8 rx_sw_owner_v2:1; u8 rx_sw_owner_v2:1;
u8 tx_sw_owner_v2:1; u8 tx_sw_owner_v2:1;
u8 fdb_sw_owner_v2:1; u8 fdb_sw_owner_v2:1;
u32 num_vports;
struct mlx5dr_esw_caps esw_caps; struct mlx5dr_esw_caps esw_caps;
struct mlx5dr_cmd_vport_cap *vports_caps; struct mlx5dr_vports vports;
bool prio_tag_required; bool prio_tag_required;
struct mlx5dr_roce_cap roce_caps; struct mlx5dr_roce_cap roce_caps;
u8 is_ecpf:1;
u8 isolate_vl_tc:1; u8 isolate_vl_tc:1;
}; };
...@@ -826,10 +829,6 @@ struct mlx5dr_domain_info { ...@@ -826,10 +829,6 @@ struct mlx5dr_domain_info {
struct mlx5dr_cmd_caps caps; struct mlx5dr_cmd_caps caps;
}; };
struct mlx5dr_domain_cache {
struct mlx5dr_fw_recalc_cs_ft **recalc_cs_ft;
};
struct mlx5dr_domain { struct mlx5dr_domain {
struct mlx5dr_domain *peer_dmn; struct mlx5dr_domain *peer_dmn;
struct mlx5_core_dev *mdev; struct mlx5_core_dev *mdev;
...@@ -841,7 +840,7 @@ struct mlx5dr_domain { ...@@ -841,7 +840,7 @@ struct mlx5dr_domain {
struct mlx5dr_icm_pool *action_icm_pool; struct mlx5dr_icm_pool *action_icm_pool;
struct mlx5dr_send_ring *send_ring; struct mlx5dr_send_ring *send_ring;
struct mlx5dr_domain_info info; struct mlx5dr_domain_info info;
struct mlx5dr_domain_cache cache; struct xarray csum_fts_xa;
struct mlx5dr_ste_ctx *ste_ctx; struct mlx5dr_ste_ctx *ste_ctx;
}; };
...@@ -942,7 +941,7 @@ struct mlx5dr_action_dest_tbl { ...@@ -942,7 +941,7 @@ struct mlx5dr_action_dest_tbl {
struct mlx5dr_action_ctr { struct mlx5dr_action_ctr {
u32 ctr_id; u32 ctr_id;
u32 offeset; u32 offset;
}; };
struct mlx5dr_action_vport { struct mlx5dr_action_vport {
...@@ -1102,18 +1101,8 @@ mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl) ...@@ -1102,18 +1101,8 @@ mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl)
return true; return true;
} }
static inline struct mlx5dr_cmd_vport_cap * struct mlx5dr_cmd_vport_cap *
mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport) mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport);
{
if (!caps->vports_caps ||
(vport >= caps->num_vports && vport != WIRE_PORT))
return NULL;
if (vport == WIRE_PORT)
vport = caps->num_vports;
return &caps->vports_caps[vport];
}
struct mlx5dr_cmd_query_flow_table_details { struct mlx5dr_cmd_query_flow_table_details {
u8 status; u8 status;
...@@ -1154,7 +1143,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev, ...@@ -1154,7 +1143,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
u32 table_id, u32 table_id,
u32 group_id, u32 group_id,
u32 modify_header_id, u32 modify_header_id,
u32 vport_id); u16 vport_id);
int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev, int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
u32 table_type, u32 table_type,
u32 table_id); u32 table_id);
...@@ -1372,12 +1361,12 @@ struct mlx5dr_fw_recalc_cs_ft { ...@@ -1372,12 +1361,12 @@ struct mlx5dr_fw_recalc_cs_ft {
}; };
struct mlx5dr_fw_recalc_cs_ft * struct mlx5dr_fw_recalc_cs_ft *
mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num); mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num);
void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn, void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft); struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft);
int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn, int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
u32 vport_num, u16 vport_num,
u64 *rx_icm_addr); u64 *rx_icm_addr);
int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_cmd_flow_destination_hw_info *dest, struct mlx5dr_cmd_flow_destination_hw_info *dest,
int num_dest, int num_dest,
......
...@@ -222,7 +222,7 @@ static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst) ...@@ -222,7 +222,7 @@ static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID; dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
} }
#define MLX5_FLOW_CONTEXT_ACTION_MAX 20 #define MLX5_FLOW_CONTEXT_ACTION_MAX 32
static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, struct mlx5_flow_table *ft,
struct mlx5_flow_group *group, struct mlx5_flow_group *group,
......
...@@ -89,7 +89,7 @@ mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain, ...@@ -89,7 +89,7 @@ mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain,
struct mlx5dr_action * struct mlx5dr_action *
mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain, mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
u32 vport, u8 vhca_id_valid, u16 vport, u8 vhca_id_valid,
u16 vhca_id); u16 vhca_id);
struct mlx5dr_action * struct mlx5dr_action *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment