Commit f3df4044 authored by Larysa Zaremba's avatar Larysa Zaremba Committed by Jakub Kicinski

ice: map XDP queues to vectors in ice_vsi_map_rings_to_vectors()

ice_pf_dcb_recfg() re-maps queues to vectors with
ice_vsi_map_rings_to_vectors(), which does not restore the previous
state for XDP queues. This leads to no AF_XDP traffic after rebuild.

Map XDP queues to vectors in ice_vsi_map_rings_to_vectors().
Also, move the code around, so XDP queues are mapped independently only
through .ndo_bpf().

Fixes: 6624e780 ("ice: split ice_vsi_setup into smaller functions")
Reviewed-by: default avatarPrzemek Kitszel <przemyslaw.kitszel@intel.com>
Signed-off-by: default avatarLarysa Zaremba <larysa.zaremba@intel.com>
Reviewed-by: default avatarSimon Horman <horms@kernel.org>
Tested-by: default avatarChandan Kumar Rout <chandanx.rout@intel.com>
Signed-off-by: default avatarJacob Keller <jacob.e.keller@intel.com>
Link: https://lore.kernel.org/r/20240603-net-2024-05-30-intel-net-fixes-v2-5-e3563aa89b0c@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 744d1971
...@@ -940,6 +940,7 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi); ...@@ -940,6 +940,7 @@ int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
enum ice_xdp_cfg cfg_type); enum ice_xdp_cfg cfg_type);
int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type); int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
void ice_map_xdp_rings(struct ice_vsi *vsi);
int int
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags); u32 flags);
......
...@@ -842,6 +842,9 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) ...@@ -842,6 +842,9 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
} }
rx_rings_rem -= rx_rings_per_v; rx_rings_rem -= rx_rings_per_v;
} }
if (ice_is_xdp_ena_vsi(vsi))
ice_map_xdp_rings(vsi);
} }
/** /**
......
...@@ -2274,13 +2274,6 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi) ...@@ -2274,13 +2274,6 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
if (ret) if (ret)
goto unroll_vector_base; goto unroll_vector_base;
ice_vsi_map_rings_to_vectors(vsi);
/* Associate q_vector rings to napi */
ice_vsi_set_napi_queues(vsi);
vsi->stat_offsets_loaded = false;
if (ice_is_xdp_ena_vsi(vsi)) { if (ice_is_xdp_ena_vsi(vsi)) {
ret = ice_vsi_determine_xdp_res(vsi); ret = ice_vsi_determine_xdp_res(vsi);
if (ret) if (ret)
...@@ -2291,6 +2284,13 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi) ...@@ -2291,6 +2284,13 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
goto unroll_vector_base; goto unroll_vector_base;
} }
ice_vsi_map_rings_to_vectors(vsi);
/* Associate q_vector rings to napi */
ice_vsi_set_napi_queues(vsi);
vsi->stat_offsets_loaded = false;
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */ /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
if (vsi->type != ICE_VSI_CTRL) if (vsi->type != ICE_VSI_CTRL)
/* Do not exit if configuring RSS had an issue, at /* Do not exit if configuring RSS had an issue, at
......
...@@ -2707,6 +2707,60 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog) ...@@ -2707,6 +2707,60 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
bpf_prog_put(old_prog); bpf_prog_put(old_prog);
} }
static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
{
struct ice_q_vector *q_vector;
struct ice_tx_ring *ring;
if (static_key_enabled(&ice_xdp_locking_key))
return vsi->xdp_rings[qid % vsi->num_xdp_txq];
q_vector = vsi->rx_rings[qid]->q_vector;
ice_for_each_tx_ring(ring, q_vector->tx)
if (ice_ring_is_xdp(ring))
return ring;
return NULL;
}
/**
* ice_map_xdp_rings - Map XDP rings to interrupt vectors
* @vsi: the VSI with XDP rings being configured
*
* Map XDP rings to interrupt vectors and perform the configuration steps
* dependent on the mapping.
*/
void ice_map_xdp_rings(struct ice_vsi *vsi)
{
int xdp_rings_rem = vsi->num_xdp_txq;
int v_idx, q_idx;
/* follow the logic from ice_vsi_map_rings_to_vectors */
ice_for_each_q_vector(vsi, v_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
int xdp_rings_per_v, q_id, q_base;
xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
vsi->num_q_vectors - v_idx);
q_base = vsi->num_xdp_txq - xdp_rings_rem;
for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
xdp_ring->q_vector = q_vector;
xdp_ring->next = q_vector->tx.tx_ring;
q_vector->tx.tx_ring = xdp_ring;
}
xdp_rings_rem -= xdp_rings_per_v;
}
ice_for_each_rxq(vsi, q_idx) {
vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
q_idx);
ice_tx_xsk_pool(vsi, q_idx);
}
}
/** /**
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
* @vsi: VSI to bring up Tx rings used by XDP * @vsi: VSI to bring up Tx rings used by XDP
...@@ -2719,7 +2773,6 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, ...@@ -2719,7 +2773,6 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
enum ice_xdp_cfg cfg_type) enum ice_xdp_cfg cfg_type)
{ {
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
int xdp_rings_rem = vsi->num_xdp_txq;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_qs_cfg xdp_qs_cfg = { struct ice_qs_cfg xdp_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex, .qs_mutex = &pf->avail_q_mutex,
...@@ -2732,8 +2785,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, ...@@ -2732,8 +2785,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
.mapping_mode = ICE_VSI_MAP_CONTIG .mapping_mode = ICE_VSI_MAP_CONTIG
}; };
struct device *dev; struct device *dev;
int i, v_idx; int status, i;
int status;
dev = ice_pf_to_dev(pf); dev = ice_pf_to_dev(pf);
vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
...@@ -2752,42 +2804,6 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, ...@@ -2752,42 +2804,6 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
if (ice_xdp_alloc_setup_rings(vsi)) if (ice_xdp_alloc_setup_rings(vsi))
goto clear_xdp_rings; goto clear_xdp_rings;
/* follow the logic from ice_vsi_map_rings_to_vectors */
ice_for_each_q_vector(vsi, v_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
int xdp_rings_per_v, q_id, q_base;
xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
vsi->num_q_vectors - v_idx);
q_base = vsi->num_xdp_txq - xdp_rings_rem;
for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
xdp_ring->q_vector = q_vector;
xdp_ring->next = q_vector->tx.tx_ring;
q_vector->tx.tx_ring = xdp_ring;
}
xdp_rings_rem -= xdp_rings_per_v;
}
ice_for_each_rxq(vsi, i) {
if (static_key_enabled(&ice_xdp_locking_key)) {
vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
} else {
struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
struct ice_tx_ring *ring;
ice_for_each_tx_ring(ring, q_vector->tx) {
if (ice_ring_is_xdp(ring)) {
vsi->rx_rings[i]->xdp_ring = ring;
break;
}
}
}
ice_tx_xsk_pool(vsi, i);
}
/* omit the scheduler update if in reset path; XDP queues will be /* omit the scheduler update if in reset path; XDP queues will be
* taken into account at the end of ice_vsi_rebuild, where * taken into account at the end of ice_vsi_rebuild, where
* ice_cfg_vsi_lan is being called * ice_cfg_vsi_lan is being called
...@@ -2795,6 +2811,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog, ...@@ -2795,6 +2811,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
if (cfg_type == ICE_XDP_CFG_PART) if (cfg_type == ICE_XDP_CFG_PART)
return 0; return 0;
ice_map_xdp_rings(vsi);
/* tell the Tx scheduler that right now we have /* tell the Tx scheduler that right now we have
* additional queues * additional queues
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment