Commit d5922717 authored by Maciej Fijalkowski's avatar Maciej Fijalkowski Committed by Tony Nguyen

ice: modify error handling when setting XSK pool in ndo_bpf

Don't bail out right when spotting an error within ice_qp_{dis,ena}()
but rather track error and go through whole flow of disabling and
enabling queue pair.

Fixes: 2d4238f5 ("ice: Add support for AF_XDP")
Reviewed-by: default avatarShannon Nelson <shannon.nelson@amd.com>
Tested-by: Chandan Kumar Rout <chandanx.rout@intel.com> (A Contingent Worker at Intel)
Signed-off-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
parent 405d9999
...@@ -162,6 +162,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) ...@@ -162,6 +162,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
struct ice_tx_ring *tx_ring; struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring; struct ice_rx_ring *rx_ring;
int timeout = 50; int timeout = 50;
int fail = 0;
int err; int err;
if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq) if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
...@@ -186,8 +187,8 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) ...@@ -186,8 +187,8 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
ice_fill_txq_meta(vsi, tx_ring, &txq_meta); ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta); err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
if (err) if (!fail)
return err; fail = err;
if (ice_is_xdp_ena_vsi(vsi)) { if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
...@@ -195,15 +196,15 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) ...@@ -195,15 +196,15 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
ice_fill_txq_meta(vsi, xdp_ring, &txq_meta); ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring, err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
&txq_meta); &txq_meta);
if (err) if (!fail)
return err; fail = err;
} }
ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false); ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
ice_qp_clean_rings(vsi, q_idx); ice_qp_clean_rings(vsi, q_idx);
ice_qp_reset_stats(vsi, q_idx); ice_qp_reset_stats(vsi, q_idx);
return 0; return fail;
} }
/** /**
...@@ -216,32 +217,33 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) ...@@ -216,32 +217,33 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{ {
struct ice_q_vector *q_vector; struct ice_q_vector *q_vector;
int fail = 0;
int err; int err;
err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx); err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
if (err) if (!fail)
return err; fail = err;
if (ice_is_xdp_ena_vsi(vsi)) { if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx]; struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx); err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
if (err) if (!fail)
return err; fail = err;
ice_set_ring_xdp(xdp_ring); ice_set_ring_xdp(xdp_ring);
ice_tx_xsk_pool(vsi, q_idx); ice_tx_xsk_pool(vsi, q_idx);
} }
err = ice_vsi_cfg_single_rxq(vsi, q_idx); err = ice_vsi_cfg_single_rxq(vsi, q_idx);
if (err) if (!fail)
return err; fail = err;
q_vector = vsi->rx_rings[q_idx]->q_vector; q_vector = vsi->rx_rings[q_idx]->q_vector;
ice_qvec_cfg_msix(vsi, q_vector); ice_qvec_cfg_msix(vsi, q_vector);
err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true); err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
if (err) if (!fail)
return err; fail = err;
ice_qvec_toggle_napi(vsi, q_vector, true); ice_qvec_toggle_napi(vsi, q_vector, true);
ice_qvec_ena_irq(vsi, q_vector); ice_qvec_ena_irq(vsi, q_vector);
...@@ -249,7 +251,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) ...@@ -249,7 +251,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
clear_bit(ICE_CFG_BUSY, vsi->state); clear_bit(ICE_CFG_BUSY, vsi->state);
return 0; return fail;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment