Commit cfd40b82 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2023-06-22 (ice)

This series contains updates to ice driver only.

Jake adds a slight wait on control queue send to reduce wait time for
responses that occur within normal times.

Maciej allows for hot-swapping XDP programs.

Przemek removes unnecessary checks when enabling SR-IOV and freeing
allocated memory.

Christophe Jaillet converts a managed memory allocation to a regular one.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue:
  ice: use ice_down_up() where applicable
  ice: Remove managed memory usage in ice_get_fw_log_cfg()
  ice: remove null checks before devm_kfree() calls
  ice: clean up freeing SR-IOV VFs
  ice: allow hot-swapping XDP programs
  ice: reduce initial wait for control queue messages
====================

Link: https://lore.kernel.org/r/20230622183601.2406499-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 2fe11c9d b7a03457
......@@ -814,7 +814,6 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), lst_itr);
}
}
if (recps[i].root_buf)
devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
}
ice_rm_all_sw_replay_rule_info(hw);
......@@ -834,7 +833,7 @@ static int ice_get_fw_log_cfg(struct ice_hw *hw)
u16 size;
size = sizeof(*config) * ICE_AQC_FW_LOG_ID_MAX;
config = devm_kzalloc(ice_hw_to_dev(hw), size, GFP_KERNEL);
config = kzalloc(size, GFP_KERNEL);
if (!config)
return -ENOMEM;
......@@ -857,7 +856,7 @@ static int ice_get_fw_log_cfg(struct ice_hw *hw)
}
}
devm_kfree(ice_hw_to_dev(hw), config);
kfree(config);
return status;
}
......@@ -1011,7 +1010,6 @@ static int ice_cfg_fw_log(struct ice_hw *hw, bool enable)
}
out:
if (data)
devm_kfree(ice_hw_to_dev(hw), data);
return status;
......
......@@ -339,7 +339,6 @@ do { \
} \
} \
/* free the buffer info list */ \
if ((qi)->ring.cmd_buf) \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
/* free DMA head */ \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
......@@ -1056,14 +1055,19 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (cq->sq.next_to_use == cq->sq.count)
cq->sq.next_to_use = 0;
wr32(hw, cq->sq.tail, cq->sq.next_to_use);
ice_flush(hw);
/* Wait a short time before initial ice_sq_done() check, to allow
* hardware time for completion.
*/
udelay(5);
timeout = jiffies + ICE_CTL_Q_SQ_CMD_TIMEOUT;
do {
if (ice_sq_done(hw, cq))
break;
usleep_range(ICE_CTL_Q_SQ_CMD_USEC,
ICE_CTL_Q_SQ_CMD_USEC * 3 / 2);
usleep_range(100, 150);
} while (time_before(jiffies, timeout));
/* if ready, copy the desc back to temp */
......
......@@ -35,7 +35,6 @@ enum ice_ctl_q {
/* Control Queue timeout settings - max delay 1s */
#define ICE_CTL_Q_SQ_CMD_TIMEOUT HZ /* Wait max 1s */
#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
#define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */
#define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */
......
......@@ -1303,23 +1303,6 @@ ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
return NULL;
}
/**
* ice_dealloc_flow_entry - Deallocate flow entry memory
* @hw: pointer to the HW struct
* @entry: flow entry to be removed
*/
static void
ice_dealloc_flow_entry(struct ice_hw *hw, struct ice_flow_entry *entry)
{
if (!entry)
return;
if (entry->entry)
devm_kfree(ice_hw_to_dev(hw), entry->entry);
devm_kfree(ice_hw_to_dev(hw), entry);
}
/**
* ice_flow_rem_entry_sync - Remove a flow entry
* @hw: pointer to the HW struct
......@@ -1335,7 +1318,8 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
list_del(&entry->l_entry);
ice_dealloc_flow_entry(hw, entry);
devm_kfree(ice_hw_to_dev(hw), entry->entry);
devm_kfree(ice_hw_to_dev(hw), entry);
return 0;
}
......@@ -1662,7 +1646,6 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
out:
if (status && e) {
if (e->entry)
devm_kfree(ice_hw_to_dev(hw), e->entry);
devm_kfree(ice_hw_to_dev(hw), e);
}
......
......@@ -321,31 +321,19 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
if (vsi->af_xdp_zc_qps) {
bitmap_free(vsi->af_xdp_zc_qps);
vsi->af_xdp_zc_qps = NULL;
}
/* free the ring and vector containers */
if (vsi->q_vectors) {
devm_kfree(dev, vsi->q_vectors);
vsi->q_vectors = NULL;
}
if (vsi->tx_rings) {
devm_kfree(dev, vsi->tx_rings);
vsi->tx_rings = NULL;
}
if (vsi->rx_rings) {
devm_kfree(dev, vsi->rx_rings);
vsi->rx_rings = NULL;
}
if (vsi->txq_map) {
devm_kfree(dev, vsi->txq_map);
vsi->txq_map = NULL;
}
if (vsi->rxq_map) {
devm_kfree(dev, vsi->rxq_map);
vsi->rxq_map = NULL;
}
}
/**
......@@ -902,9 +890,7 @@ static void ice_rss_clean(struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
if (vsi->rss_hkey_user)
devm_kfree(dev, vsi->rss_hkey_user);
if (vsi->rss_lut_user)
devm_kfree(dev, vsi->rss_lut_user);
ice_vsi_clean_rss_flow_fld(vsi);
......
......@@ -2633,11 +2633,11 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
int i;
old_prog = xchg(&vsi->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
ice_for_each_rxq(vsi, i)
WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
if (old_prog)
bpf_prog_put(old_prog);
}
/**
......@@ -2922,6 +2922,12 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
}
}
/* hot swap progs and avoid toggling link */
if (ice_is_xdp_ena_vsi(vsi) == !!prog) {
ice_vsi_assign_bpf_prog(vsi, prog);
return 0;
}
/* need to stop netdev while setting up the program for Rx rings */
if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
ret = ice_down(vsi);
......@@ -2954,13 +2960,6 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
xdp_ring_err = ice_realloc_zc_buf(vsi, false);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
} else {
/* safe to call even when prog == vsi->xdp_prog as
* dev_xdp_install in net/core/dev.c incremented prog's
* refcount so corresponding bpf_prog_put won't cause
* underflow
*/
ice_vsi_assign_bpf_prog(vsi, prog);
}
if (if_running)
......@@ -7413,21 +7412,9 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
netdev->mtu = (unsigned int)new_mtu;
/* if VSI is up, bring it down and then back up */
if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
err = ice_down(vsi);
if (err) {
netdev_err(netdev, "change MTU if_down err %d\n", err);
return err;
}
err = ice_up(vsi);
if (err) {
netdev_err(netdev, "change MTU if_up err %d\n", err);
err = ice_down_up(vsi);
if (err)
return err;
}
}
netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
set_bit(ICE_FLAG_MTU_CHANGED, pf->flags);
......
......@@ -358,10 +358,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
node->sibling;
}
/* leaf nodes have no children */
if (node->children)
devm_kfree(ice_hw_to_dev(hw), node->children);
kfree(node->name);
xa_erase(&pi->sched_node_ids, node->id);
devm_kfree(ice_hw_to_dev(hw), node);
......@@ -859,10 +856,8 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
if (!hw)
return;
if (hw->layer_info) {
devm_kfree(ice_hw_to_dev(hw), hw->layer_info);
hw->layer_info = NULL;
}
ice_sched_clear_port(hw->port_info);
......
......@@ -905,14 +905,13 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
*/
static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
{
int pre_existing_vfs = pci_num_vf(pf->pdev);
struct device *dev = ice_pf_to_dev(pf);
int err;
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
if (!num_vfs) {
ice_free_vfs(pf);
else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
return 0;
}
if (num_vfs > pf->vfs.num_supported) {
dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
......
......@@ -1636,22 +1636,17 @@ ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
*/
static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_vsi_ctx *vsi;
struct ice_vsi_ctx *vsi = ice_get_vsi_ctx(hw, vsi_handle);
u8 i;
vsi = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi)
return;
ice_for_each_traffic_class(i) {
if (vsi->lan_q_ctx[i]) {
devm_kfree(ice_hw_to_dev(hw), vsi->lan_q_ctx[i]);
vsi->lan_q_ctx[i] = NULL;
}
if (vsi->rdma_q_ctx[i]) {
devm_kfree(ice_hw_to_dev(hw), vsi->rdma_q_ctx[i]);
vsi->rdma_q_ctx[i] = NULL;
}
}
}
/**
......@@ -5468,9 +5463,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
devm_kfree(ice_hw_to_dev(hw), fvit);
}
if (rm->root_buf)
devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
kfree(rm);
err_free_lkup_exts:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment