Commit a034fcdb authored by Przemek Kitszel's avatar Przemek Kitszel Committed by Jakub Kicinski

ice: drop two params of ice_aq_move_sched_elems()

Remove two arguments of ice_aq_move_sched_elems().
Last of them was always NULL, and @grps_req was always 1.

Assuming @grps_req to be one, allows us to use DEFINE_FLEX() macro,
what removes some need for heap allocations.
Signed-off-by: default avatarPrzemek Kitszel <przemyslaw.kitszel@intel.com>
Link: https://lore.kernel.org/r/20230912115937.1645707-4-przemyslaw.kitszel@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent ece285af
...@@ -430,10 +430,11 @@ static void ...@@ -430,10 +430,11 @@ static void
ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport, ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
u16 vsi_num, u8 tc) u16 vsi_num, u8 tc)
{ {
u16 numq, valq, buf_size, num_moved, qbuf_size; DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf); struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf; struct ice_aqc_cfg_txqs_buf *qbuf;
struct ice_aqc_move_elem *buf;
struct ice_sched_node *n_prt; struct ice_sched_node *n_prt;
struct ice_hw *new_hw = NULL; struct ice_hw *new_hw = NULL;
__le32 teid, parent_teid; __le32 teid, parent_teid;
...@@ -505,26 +506,17 @@ ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport, ...@@ -505,26 +506,17 @@ ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
goto resume_traffic; goto resume_traffic;
/* Move Vf's VSI node for this TC to newport's scheduler tree */ /* Move Vf's VSI node for this TC to newport's scheduler tree */
buf_size = struct_size(buf, teid, 1);
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf) {
dev_warn(dev, "Failure to alloc memory for VF node failover\n");
goto resume_traffic;
}
buf->hdr.src_parent_teid = parent_teid; buf->hdr.src_parent_teid = parent_teid;
buf->hdr.dest_parent_teid = n_prt->info.node_teid; buf->hdr.dest_parent_teid = n_prt->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1); buf->hdr.num_elems = cpu_to_le16(1);
buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN; buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
buf->teid[0] = teid; buf->teid[0] = teid;
if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved, if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved))
NULL))
dev_warn(dev, "Failure to move VF nodes for failover\n"); dev_warn(dev, "Failure to move VF nodes for failover\n");
else else
ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]); ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
kfree(buf);
goto resume_traffic; goto resume_traffic;
qbuf_err: qbuf_err:
...@@ -755,10 +747,11 @@ static void ...@@ -755,10 +747,11 @@ static void
ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num, ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
u8 tc) u8 tc)
{ {
u16 numq, valq, buf_size, num_moved, qbuf_size; DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf); struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf; struct ice_aqc_cfg_txqs_buf *qbuf;
struct ice_aqc_move_elem *buf;
struct ice_sched_node *n_prt; struct ice_sched_node *n_prt;
__le32 teid, parent_teid; __le32 teid, parent_teid;
struct ice_vsi_ctx *ctx; struct ice_vsi_ctx *ctx;
...@@ -820,26 +813,17 @@ ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num, ...@@ -820,26 +813,17 @@ ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
goto resume_reclaim; goto resume_reclaim;
/* Move node to new parent */ /* Move node to new parent */
buf_size = struct_size(buf, teid, 1);
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf) {
dev_warn(dev, "Failure to alloc memory for VF node failover\n");
goto resume_reclaim;
}
buf->hdr.src_parent_teid = parent_teid; buf->hdr.src_parent_teid = parent_teid;
buf->hdr.dest_parent_teid = n_prt->info.node_teid; buf->hdr.dest_parent_teid = n_prt->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1); buf->hdr.num_elems = cpu_to_le16(1);
buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN; buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
buf->teid[0] = teid; buf->teid[0] = teid;
if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved, if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved))
NULL))
dev_warn(dev, "Failure to move VF nodes for LAG reclaim\n"); dev_warn(dev, "Failure to move VF nodes for LAG reclaim\n");
else else
ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]); ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
kfree(buf);
goto resume_reclaim; goto resume_reclaim;
reclaim_qerr: reclaim_qerr:
...@@ -1792,10 +1776,11 @@ static void ...@@ -1792,10 +1776,11 @@ static void
ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw, ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
u16 vsi_num, u8 tc) u16 vsi_num, u8 tc)
{ {
u16 numq, valq, buf_size, num_moved, qbuf_size; DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf); struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
struct ice_aqc_cfg_txqs_buf *qbuf; struct ice_aqc_cfg_txqs_buf *qbuf;
struct ice_aqc_move_elem *buf;
struct ice_sched_node *n_prt; struct ice_sched_node *n_prt;
__le32 teid, parent_teid; __le32 teid, parent_teid;
struct ice_vsi_ctx *ctx; struct ice_vsi_ctx *ctx;
...@@ -1853,26 +1838,17 @@ ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw, ...@@ -1853,26 +1838,17 @@ ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
goto resume_sync; goto resume_sync;
/* Move node to new parent */ /* Move node to new parent */
buf_size = struct_size(buf, teid, 1);
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf) {
dev_warn(dev, "Failure to alloc for VF node move in reset rebuild\n");
goto resume_sync;
}
buf->hdr.src_parent_teid = parent_teid; buf->hdr.src_parent_teid = parent_teid;
buf->hdr.dest_parent_teid = n_prt->info.node_teid; buf->hdr.dest_parent_teid = n_prt->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1); buf->hdr.num_elems = cpu_to_le16(1);
buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN; buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
buf->teid[0] = teid; buf->teid[0] = teid;
if (ice_aq_move_sched_elems(&lag->pf->hw, 1, buf, buf_size, &num_moved, if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved))
NULL))
dev_warn(dev, "Failure to move VF nodes for LAG reset rebuild\n"); dev_warn(dev, "Failure to move VF nodes for LAG reset rebuild\n");
else else
ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]); ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
kfree(buf);
goto resume_sync; goto resume_sync;
sync_qerr: sync_qerr:
......
...@@ -429,24 +429,20 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req, ...@@ -429,24 +429,20 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
} }
/** /**
* ice_aq_move_sched_elems - move scheduler elements * ice_aq_move_sched_elems - move scheduler element (just 1 group)
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
* @grps_req: number of groups to move
* @buf: pointer to buffer * @buf: pointer to buffer
* @buf_size: buffer size in bytes * @buf_size: buffer size in bytes
* @grps_movd: returns total number of groups moved * @grps_movd: returns total number of groups moved
* @cd: pointer to command details structure or NULL
* *
* Move scheduling elements (0x0408) * Move scheduling elements (0x0408)
*/ */
int int
ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, ice_aq_move_sched_elems(struct ice_hw *hw, struct ice_aqc_move_elem *buf,
struct ice_aqc_move_elem *buf, u16 buf_size, u16 buf_size, u16 *grps_movd)
u16 *grps_movd, struct ice_sq_cd *cd)
{ {
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems, return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems,
grps_req, (void *)buf, buf_size, 1, buf, buf_size, grps_movd, NULL);
grps_movd, cd);
} }
/** /**
...@@ -2224,12 +2220,12 @@ int ...@@ -2224,12 +2220,12 @@ int
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list) u16 num_items, u32 *list)
{ {
struct ice_aqc_move_elem *buf; DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
u16 buf_len = __struct_size(buf);
struct ice_sched_node *node; struct ice_sched_node *node;
u16 i, grps_movd = 0; u16 i, grps_movd = 0;
struct ice_hw *hw; struct ice_hw *hw;
int status = 0; int status = 0;
u16 buf_len;
hw = pi->hw; hw = pi->hw;
...@@ -2241,35 +2237,27 @@ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent, ...@@ -2241,35 +2237,27 @@ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
hw->max_children[parent->tx_sched_layer]) hw->max_children[parent->tx_sched_layer])
return -ENOSPC; return -ENOSPC;
buf_len = struct_size(buf, teid, 1);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
for (i = 0; i < num_items; i++) { for (i = 0; i < num_items; i++) {
node = ice_sched_find_node_by_teid(pi->root, list[i]); node = ice_sched_find_node_by_teid(pi->root, list[i]);
if (!node) { if (!node) {
status = -EINVAL; status = -EINVAL;
goto move_err_exit; break;
} }
buf->hdr.src_parent_teid = node->info.parent_teid; buf->hdr.src_parent_teid = node->info.parent_teid;
buf->hdr.dest_parent_teid = parent->info.node_teid; buf->hdr.dest_parent_teid = parent->info.node_teid;
buf->teid[0] = node->info.node_teid; buf->teid[0] = node->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1); buf->hdr.num_elems = cpu_to_le16(1);
status = ice_aq_move_sched_elems(hw, 1, buf, buf_len, status = ice_aq_move_sched_elems(hw, buf, buf_len, &grps_movd);
&grps_movd, NULL);
if (status && grps_movd != 1) { if (status && grps_movd != 1) {
status = -EIO; status = -EIO;
goto move_err_exit; break;
} }
/* update the SW DB */ /* update the SW DB */
ice_sched_update_parent(parent, node); ice_sched_update_parent(parent, node);
} }
move_err_exit:
kfree(buf);
return status; return status;
} }
......
...@@ -161,10 +161,8 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, ...@@ -161,10 +161,8 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
u16 *num_nodes_added); u16 *num_nodes_added);
void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw); void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
void ice_sched_replay_agg(struct ice_hw *hw); void ice_sched_replay_agg(struct ice_hw *hw);
int int ice_aq_move_sched_elems(struct ice_hw *hw, struct ice_aqc_move_elem *buf,
ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req, u16 buf_size, u16 *grps_movd);
struct ice_aqc_move_elem *buf, u16 buf_size,
u16 *grps_movd, struct ice_sq_cd *cd);
int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle); int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx); int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
#endif /* _ICE_SCHED_H_ */ #endif /* _ICE_SCHED_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment