Commit 165d3515 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2023-03-28 (ice)

This series contains updates to ice driver only.

Jesse fixes mismatched header documentation reported when building with
W=1.

Brett restricts setting of VSI context to only applicable fields for the
given ICE_AQ_VSI_PROP_Q_OPT_VALID bit.

Junfeng adds check when adding Flow Director filters that conflict with
existing filter rules.

Jakob Koschel adds interim variable for iterating to prevent possible
misuse after looping.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue:
  ice: fix invalid check for empty list in ice_sched_assoc_vsi_to_agg()
  ice: add profile conflict check for AVF FDIR
  ice: Fix ice_cfg_rdma_fltr() to only update relevant fields
  ice: fix W=1 headers mismatch
====================

Link: https://lore.kernel.org/r/20230328172035.3904953-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents a4d7108c e9a1cc2e
...@@ -2788,7 +2788,7 @@ static int ...@@ -2788,7 +2788,7 @@ static int
ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
u16 vsi_handle, unsigned long *tc_bitmap) u16 vsi_handle, unsigned long *tc_bitmap)
{ {
struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL; struct ice_sched_agg_vsi_info *agg_vsi_info, *iter, *old_agg_vsi_info = NULL;
struct ice_sched_agg_info *agg_info, *old_agg_info; struct ice_sched_agg_info *agg_info, *old_agg_info;
struct ice_hw *hw = pi->hw; struct ice_hw *hw = pi->hw;
int status = 0; int status = 0;
...@@ -2806,12 +2806,14 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, ...@@ -2806,12 +2806,14 @@ ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
if (old_agg_info && old_agg_info != agg_info) { if (old_agg_info && old_agg_info != agg_info) {
struct ice_sched_agg_vsi_info *vtmp; struct ice_sched_agg_vsi_info *vtmp;
list_for_each_entry_safe(old_agg_vsi_info, vtmp, list_for_each_entry_safe(iter, vtmp,
&old_agg_info->agg_vsi_list, &old_agg_info->agg_vsi_list,
list_entry) list_entry)
if (old_agg_vsi_info->vsi_handle == vsi_handle) if (iter->vsi_handle == vsi_handle) {
old_agg_vsi_info = iter;
break; break;
} }
}
/* check if entry already exist */ /* check if entry already exist */
agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle); agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
......
...@@ -1780,18 +1780,36 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx, ...@@ -1780,18 +1780,36 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
int int
ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable) ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
{ {
struct ice_vsi_ctx *ctx; struct ice_vsi_ctx *ctx, *cached_ctx;
int status;
cached_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!cached_ctx)
return -ENOENT;
ctx = ice_get_vsi_ctx(hw, vsi_handle); ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) if (!ctx)
return -EIO; return -ENOMEM;
ctx->info.q_opt_rss = cached_ctx->info.q_opt_rss;
ctx->info.q_opt_tc = cached_ctx->info.q_opt_tc;
ctx->info.q_opt_flags = cached_ctx->info.q_opt_flags;
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
if (enable) if (enable)
ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; ctx->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
else else
ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN; ctx->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
return ice_update_vsi(hw, vsi_handle, ctx, NULL); status = ice_update_vsi(hw, vsi_handle, ctx, NULL);
if (!status) {
cached_ctx->info.q_opt_flags = ctx->info.q_opt_flags;
cached_ctx->info.valid_sections |= ctx->info.valid_sections;
}
kfree(ctx);
return status;
} }
/** /**
......
...@@ -938,6 +938,7 @@ ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf) ...@@ -938,6 +938,7 @@ ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
* ice_get_rx_buf - Fetch Rx buffer and synchronize data for use * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
* @rx_ring: Rx descriptor ring to transact packets on * @rx_ring: Rx descriptor ring to transact packets on
* @size: size of buffer to add to skb * @size: size of buffer to add to skb
* @ntc: index of next to clean element
* *
* This function will pull an Rx buffer from the ring and synchronize it * This function will pull an Rx buffer from the ring and synchronize it
* for use by the CPU. * for use by the CPU.
...@@ -1026,7 +1027,6 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) ...@@ -1026,7 +1027,6 @@ ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
/** /**
* ice_construct_skb - Allocate skb and populate it * ice_construct_skb - Allocate skb and populate it
* @rx_ring: Rx descriptor ring to transact packets on * @rx_ring: Rx descriptor ring to transact packets on
* @rx_buf: Rx buffer to pull data from
* @xdp: xdp_buff pointing to the data * @xdp: xdp_buff pointing to the data
* *
* This function allocates an skb. It then populates it with the page * This function allocates an skb. It then populates it with the page
......
...@@ -438,6 +438,7 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring, ...@@ -438,6 +438,7 @@ int __ice_xmit_xdp_ring(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring,
* ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map * ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
* @xdp_ring: XDP ring * @xdp_ring: XDP ring
* @xdp_res: Result of the receive batch * @xdp_res: Result of the receive batch
* @first_idx: index to write from caller
* *
* This function bumps XDP Tx tail and/or flush redirect map, and * This function bumps XDP Tx tail and/or flush redirect map, and
* should be called when a batch of packets has been processed in the * should be called when a batch of packets has been processed in the
......
...@@ -541,6 +541,72 @@ static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf) ...@@ -541,6 +541,72 @@ static void ice_vc_fdir_rem_prof_all(struct ice_vf *vf)
} }
} }
/**
* ice_vc_fdir_has_prof_conflict
* @vf: pointer to the VF structure
* @conf: FDIR configuration for each filter
*
* Check if @conf has conflicting profile with existing profiles
*
* Return: true on success, and false on error.
*/
static bool
ice_vc_fdir_has_prof_conflict(struct ice_vf *vf,
struct virtchnl_fdir_fltr_conf *conf)
{
struct ice_fdir_fltr *desc;
list_for_each_entry(desc, &vf->fdir.fdir_rule_list, fltr_node) {
struct virtchnl_fdir_fltr_conf *existing_conf;
enum ice_fltr_ptype flow_type_a, flow_type_b;
struct ice_fdir_fltr *a, *b;
existing_conf = to_fltr_conf_from_desc(desc);
a = &existing_conf->input;
b = &conf->input;
flow_type_a = a->flow_type;
flow_type_b = b->flow_type;
/* No need to compare two rules with different tunnel types or
* with the same protocol type.
*/
if (existing_conf->ttype != conf->ttype ||
flow_type_a == flow_type_b)
continue;
switch (flow_type_a) {
case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
return true;
break;
case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
flow_type_b == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
return true;
break;
case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_OTHER)
return true;
break;
case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
if (flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
flow_type_b == ICE_FLTR_PTYPE_NONF_IPV6_SCTP)
return true;
break;
default:
break;
}
}
return false;
}
/** /**
* ice_vc_fdir_write_flow_prof * ice_vc_fdir_write_flow_prof
* @vf: pointer to the VF structure * @vf: pointer to the VF structure
...@@ -677,6 +743,13 @@ ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr, ...@@ -677,6 +743,13 @@ ice_vc_fdir_config_input_set(struct ice_vf *vf, struct virtchnl_fdir_add *fltr,
enum ice_fltr_ptype flow; enum ice_fltr_ptype flow;
int ret; int ret;
ret = ice_vc_fdir_has_prof_conflict(vf, conf);
if (ret) {
dev_dbg(dev, "Found flow profile conflict for VF %d\n",
vf->vf_id);
return ret;
}
flow = input->flow_type; flow = input->flow_type;
ret = ice_vc_fdir_alloc_prof(vf, flow); ret = ice_vc_fdir_alloc_prof(vf, flow);
if (ret) { if (ret) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment