Commit fdf41237 authored by Ziwei Xiao's avatar Ziwei Xiao Committed by Jakub Kicinski

gve: Remove qpl_cfg struct since qpl_ids map with queues respectively

The qpl_cfg struct was used to make sure that no two different queues
are using QPL with the same qpl_id. We can remove that qpl_cfg struct
since now the qpl_ids map with the queues respectively as follows:
For tx queues: qpl_id = tx_qid
For rx queues: qpl_id = max_tx_queues + rx_qid

And when XDP is used, it will need the user to reduce the tx queues to
be at most half of the max_tx_queues. Then it will use the same number
of tx queues starting from the end of existing tx queues for XDP. So the
XDP queues will not exceed the max_tx_queues range and will not overlap
with the rx queues, where the qpl_ids will not have overlapping too.

Considering of that, we remove the qpl_cfg struct to get the qpl_id
directly based on the queue id. Unless we are erroneously allocating a
rx/tx queue that has already been allocated, we would never allocate
the qpl with the same qpl_id twice. In that case, it should fail much
earlier than the QPL assignment.
Suggested-by: default avatarPraveen Kaligineedi <pkaligineedi@google.com>
Signed-off-by: default avatarZiwei Xiao <ziweixiao@google.com>
Reviewed-by: default avatarHarshitha Ramamurthy <hramamurthy@google.com>
Reviewed-by: default avatarShailend Chand <shailend@google.com>
Link: https://lore.kernel.org/r/20240417205757.778551-1-ziweixiao@google.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 33d21bd1
...@@ -639,7 +639,6 @@ struct gve_ptype_lut { ...@@ -639,7 +639,6 @@ struct gve_ptype_lut {
/* Parameters for allocating queue page lists */ /* Parameters for allocating queue page lists */
struct gve_qpls_alloc_cfg { struct gve_qpls_alloc_cfg {
struct gve_qpl_config *qpl_cfg;
struct gve_queue_config *tx_cfg; struct gve_queue_config *tx_cfg;
struct gve_queue_config *rx_cfg; struct gve_queue_config *rx_cfg;
...@@ -655,9 +654,8 @@ struct gve_qpls_alloc_cfg { ...@@ -655,9 +654,8 @@ struct gve_qpls_alloc_cfg {
struct gve_tx_alloc_rings_cfg { struct gve_tx_alloc_rings_cfg {
struct gve_queue_config *qcfg; struct gve_queue_config *qcfg;
/* qpls and qpl_cfg must already be allocated */ /* qpls must already be allocated */
struct gve_queue_page_list *qpls; struct gve_queue_page_list *qpls;
struct gve_qpl_config *qpl_cfg;
u16 ring_size; u16 ring_size;
u16 start_idx; u16 start_idx;
...@@ -674,9 +672,8 @@ struct gve_rx_alloc_rings_cfg { ...@@ -674,9 +672,8 @@ struct gve_rx_alloc_rings_cfg {
struct gve_queue_config *qcfg; struct gve_queue_config *qcfg;
struct gve_queue_config *qcfg_tx; struct gve_queue_config *qcfg_tx;
/* qpls and qpl_cfg must already be allocated */ /* qpls must already be allocated */
struct gve_queue_page_list *qpls; struct gve_queue_page_list *qpls;
struct gve_qpl_config *qpl_cfg;
u16 ring_size; u16 ring_size;
u16 packet_buffer_size; u16 packet_buffer_size;
...@@ -732,7 +729,6 @@ struct gve_priv { ...@@ -732,7 +729,6 @@ struct gve_priv {
u16 num_xdp_queues; u16 num_xdp_queues;
struct gve_queue_config tx_cfg; struct gve_queue_config tx_cfg;
struct gve_queue_config rx_cfg; struct gve_queue_config rx_cfg;
struct gve_qpl_config qpl_cfg; /* map used QPL ids */
u32 num_ntfy_blks; /* spilt between TX and RX so must be even */ u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
struct gve_registers __iomem *reg_bar0; /* see gve_register.h */ struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
...@@ -1053,37 +1049,6 @@ static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt) ...@@ -1053,37 +1049,6 @@ static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
return 2 * rx_desc_cnt; return 2 * rx_desc_cnt;
} }
/* Returns a pointer to the next available tx qpl in the list of qpls */
static inline
struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_tx_alloc_rings_cfg *cfg,
int tx_qid)
{
/* QPL already in use */
if (test_bit(tx_qid, cfg->qpl_cfg->qpl_id_map))
return NULL;
set_bit(tx_qid, cfg->qpl_cfg->qpl_id_map);
return &cfg->qpls[tx_qid];
}
/* Returns a pointer to the next available rx qpl in the list of qpls */
static inline
struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_rx_alloc_rings_cfg *cfg,
int rx_qid)
{
int id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx_qid);
/* QPL already in use */
if (test_bit(id, cfg->qpl_cfg->qpl_id_map))
return NULL;
set_bit(id, cfg->qpl_cfg->qpl_id_map);
return &cfg->qpls[id];
}
/* Unassigns the qpl with the given id */
static inline void gve_unassign_qpl(struct gve_qpl_config *qpl_cfg, int id)
{
clear_bit(id, qpl_cfg->qpl_id_map);
}
/* Returns the correct dma direction for tx and rx qpls */ /* Returns the correct dma direction for tx and rx qpls */
static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv, static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
int id) int id)
......
...@@ -510,7 +510,6 @@ static int gve_adjust_ring_sizes(struct gve_priv *priv, ...@@ -510,7 +510,6 @@ static int gve_adjust_ring_sizes(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0}; struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_qpl_config new_qpl_cfg;
int err; int err;
/* get current queue configuration */ /* get current queue configuration */
...@@ -521,14 +520,6 @@ static int gve_adjust_ring_sizes(struct gve_priv *priv, ...@@ -521,14 +520,6 @@ static int gve_adjust_ring_sizes(struct gve_priv *priv,
tx_alloc_cfg.ring_size = new_tx_desc_cnt; tx_alloc_cfg.ring_size = new_tx_desc_cnt;
rx_alloc_cfg.ring_size = new_rx_desc_cnt; rx_alloc_cfg.ring_size = new_rx_desc_cnt;
/* qpl_cfg is not read-only, it contains a map that gets updated as
* rings are allocated, which is why we cannot use the yet unreleased
* one in priv.
*/
qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
if (netif_running(priv->dev)) { if (netif_running(priv->dev)) {
err = gve_adjust_config(priv, &qpls_alloc_cfg, err = gve_adjust_config(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg); &tx_alloc_cfg, &rx_alloc_cfg);
......
...@@ -829,7 +829,6 @@ static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv, ...@@ -829,7 +829,6 @@ static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
cfg->qcfg = &priv->tx_cfg; cfg->qcfg = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv); cfg->raw_addressing = !gve_is_qpl(priv);
cfg->qpls = priv->qpls; cfg->qpls = priv->qpls;
cfg->qpl_cfg = &priv->qpl_cfg;
cfg->ring_size = priv->tx_desc_cnt; cfg->ring_size = priv->tx_desc_cnt;
cfg->start_idx = 0; cfg->start_idx = 0;
cfg->num_rings = gve_num_tx_queues(priv); cfg->num_rings = gve_num_tx_queues(priv);
...@@ -1119,22 +1118,13 @@ static int gve_alloc_qpls(struct gve_priv *priv, struct gve_qpls_alloc_cfg *cfg, ...@@ -1119,22 +1118,13 @@ static int gve_alloc_qpls(struct gve_priv *priv, struct gve_qpls_alloc_cfg *cfg,
if (!qpls) if (!qpls)
return -ENOMEM; return -ENOMEM;
cfg->qpl_cfg->qpl_map_size = BITS_TO_LONGS(max_queues) *
sizeof(unsigned long) * BITS_PER_BYTE;
cfg->qpl_cfg->qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues),
sizeof(unsigned long), GFP_KERNEL);
if (!cfg->qpl_cfg->qpl_id_map) {
err = -ENOMEM;
goto free_qpl_array;
}
/* Allocate TX QPLs */ /* Allocate TX QPLs */
page_count = priv->tx_pages_per_qpl; page_count = priv->tx_pages_per_qpl;
tx_num_qpls = gve_num_tx_qpls(cfg->tx_cfg, cfg->num_xdp_queues, tx_num_qpls = gve_num_tx_qpls(cfg->tx_cfg, cfg->num_xdp_queues,
gve_is_qpl(priv)); gve_is_qpl(priv));
err = gve_alloc_n_qpls(priv, qpls, page_count, 0, tx_num_qpls); err = gve_alloc_n_qpls(priv, qpls, page_count, 0, tx_num_qpls);
if (err) if (err)
goto free_qpl_map; goto free_qpl_array;
/* Allocate RX QPLs */ /* Allocate RX QPLs */
rx_start_id = gve_rx_start_qpl_id(cfg->tx_cfg); rx_start_id = gve_rx_start_qpl_id(cfg->tx_cfg);
...@@ -1157,9 +1147,6 @@ static int gve_alloc_qpls(struct gve_priv *priv, struct gve_qpls_alloc_cfg *cfg, ...@@ -1157,9 +1147,6 @@ static int gve_alloc_qpls(struct gve_priv *priv, struct gve_qpls_alloc_cfg *cfg,
free_tx_qpls: free_tx_qpls:
gve_free_n_qpls(priv, qpls, 0, tx_num_qpls); gve_free_n_qpls(priv, qpls, 0, tx_num_qpls);
free_qpl_map:
kvfree(cfg->qpl_cfg->qpl_id_map);
cfg->qpl_cfg->qpl_id_map = NULL;
free_qpl_array: free_qpl_array:
kvfree(qpls); kvfree(qpls);
return err; return err;
...@@ -1175,9 +1162,6 @@ static void gve_free_qpls(struct gve_priv *priv, ...@@ -1175,9 +1162,6 @@ static void gve_free_qpls(struct gve_priv *priv,
if (!qpls) if (!qpls)
return; return;
kvfree(cfg->qpl_cfg->qpl_id_map);
cfg->qpl_cfg->qpl_id_map = NULL;
for (i = 0; i < max_queues; i++) for (i = 0; i < max_queues; i++)
gve_free_queue_page_list(priv, &qpls[i], i); gve_free_queue_page_list(priv, &qpls[i], i);
...@@ -1292,7 +1276,6 @@ static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv, ...@@ -1292,7 +1276,6 @@ static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv,
cfg->raw_addressing = !gve_is_qpl(priv); cfg->raw_addressing = !gve_is_qpl(priv);
cfg->is_gqi = gve_is_gqi(priv); cfg->is_gqi = gve_is_gqi(priv);
cfg->num_xdp_queues = priv->num_xdp_queues; cfg->num_xdp_queues = priv->num_xdp_queues;
cfg->qpl_cfg = &priv->qpl_cfg;
cfg->tx_cfg = &priv->tx_cfg; cfg->tx_cfg = &priv->tx_cfg;
cfg->rx_cfg = &priv->rx_cfg; cfg->rx_cfg = &priv->rx_cfg;
cfg->qpls = priv->qpls; cfg->qpls = priv->qpls;
...@@ -1306,7 +1289,6 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv, ...@@ -1306,7 +1289,6 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
cfg->raw_addressing = !gve_is_qpl(priv); cfg->raw_addressing = !gve_is_qpl(priv);
cfg->enable_header_split = priv->header_split_enabled; cfg->enable_header_split = priv->header_split_enabled;
cfg->qpls = priv->qpls; cfg->qpls = priv->qpls;
cfg->qpl_cfg = &priv->qpl_cfg;
cfg->ring_size = priv->rx_desc_cnt; cfg->ring_size = priv->rx_desc_cnt;
cfg->packet_buffer_size = gve_is_gqi(priv) ? cfg->packet_buffer_size = gve_is_gqi(priv) ?
GVE_DEFAULT_RX_BUFFER_SIZE : GVE_DEFAULT_RX_BUFFER_SIZE :
...@@ -1419,7 +1401,6 @@ static int gve_queues_start(struct gve_priv *priv, ...@@ -1419,7 +1401,6 @@ static int gve_queues_start(struct gve_priv *priv,
priv->rx = rx_alloc_cfg->rx; priv->rx = rx_alloc_cfg->rx;
/* Record new configs into priv */ /* Record new configs into priv */
priv->qpl_cfg = *qpls_alloc_cfg->qpl_cfg;
priv->tx_cfg = *tx_alloc_cfg->qcfg; priv->tx_cfg = *tx_alloc_cfg->qcfg;
priv->rx_cfg = *rx_alloc_cfg->qcfg; priv->rx_cfg = *rx_alloc_cfg->qcfg;
priv->tx_desc_cnt = tx_alloc_cfg->ring_size; priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
...@@ -1916,20 +1897,11 @@ int gve_adjust_queues(struct gve_priv *priv, ...@@ -1916,20 +1897,11 @@ int gve_adjust_queues(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0}; struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0}; struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_qpl_config new_qpl_cfg;
int err; int err;
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg, gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg); &tx_alloc_cfg, &rx_alloc_cfg);
/* qpl_cfg is not read-only, it contains a map that gets updated as
* rings are allocated, which is why we cannot use the yet unreleased
* one in priv.
*/
qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
/* Relay the new config from ethtool */ /* Relay the new config from ethtool */
qpls_alloc_cfg.tx_cfg = &new_tx_config; qpls_alloc_cfg.tx_cfg = &new_tx_config;
tx_alloc_cfg.qcfg = &new_tx_config; tx_alloc_cfg.qcfg = &new_tx_config;
...@@ -2121,18 +2093,10 @@ static int gve_set_features(struct net_device *netdev, ...@@ -2121,18 +2093,10 @@ static int gve_set_features(struct net_device *netdev,
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0}; struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0}; struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(netdev); struct gve_priv *priv = netdev_priv(netdev);
struct gve_qpl_config new_qpl_cfg;
int err; int err;
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg, gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg); &tx_alloc_cfg, &rx_alloc_cfg);
/* qpl_cfg is not read-only, it contains a map that gets updated as
* rings are allocated, which is why we cannot use the yet unreleased
* one in priv.
*/
qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) { if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
netdev->features ^= NETIF_F_LRO; netdev->features ^= NETIF_F_LRO;
......
...@@ -38,7 +38,6 @@ static void gve_rx_unfill_pages(struct gve_priv *priv, ...@@ -38,7 +38,6 @@ static void gve_rx_unfill_pages(struct gve_priv *priv,
for (i = 0; i < slots; i++) for (i = 0; i < slots; i++)
page_ref_sub(rx->data.page_info[i].page, page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1); rx->data.page_info[i].pagecnt_bias - 1);
gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id);
rx->data.qpl = NULL; rx->data.qpl = NULL;
for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) { for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) {
...@@ -145,13 +144,11 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx, ...@@ -145,13 +144,11 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
return -ENOMEM; return -ENOMEM;
if (!rx->data.raw_addressing) { if (!rx->data.raw_addressing) {
rx->data.qpl = gve_assign_rx_qpl(cfg, rx->q_num); u32 qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
if (!rx->data.qpl) {
kvfree(rx->data.page_info); rx->data.qpl = &cfg->qpls[qpl_id];
rx->data.page_info = NULL;
return -ENOMEM;
}
} }
for (i = 0; i < slots; i++) { for (i = 0; i < slots; i++) {
if (!rx->data.raw_addressing) { if (!rx->data.raw_addressing) {
struct page *page = rx->data.qpl->pages[i]; struct page *page = rx->data.qpl->pages[i];
...@@ -204,7 +201,6 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx, ...@@ -204,7 +201,6 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
page_ref_sub(rx->data.page_info[i].page, page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1); rx->data.page_info[i].pagecnt_bias - 1);
gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id);
rx->data.qpl = NULL; rx->data.qpl = NULL;
return err; return err;
......
...@@ -247,10 +247,8 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, ...@@ -247,10 +247,8 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
if (bs->page_info.page) if (bs->page_info.page)
gve_free_page_dqo(priv, bs, !rx->dqo.qpl); gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
} }
if (rx->dqo.qpl) {
gve_unassign_qpl(cfg->qpl_cfg, rx->dqo.qpl->id); rx->dqo.qpl = NULL;
rx->dqo.qpl = NULL;
}
if (rx->dqo.bufq.desc_ring) { if (rx->dqo.bufq.desc_ring) {
size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots; size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots;
...@@ -359,9 +357,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, ...@@ -359,9 +357,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
goto err; goto err;
if (!cfg->raw_addressing) { if (!cfg->raw_addressing) {
rx->dqo.qpl = gve_assign_rx_qpl(cfg, rx->q_num); u32 qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
if (!rx->dqo.qpl)
goto err; rx->dqo.qpl = &cfg->qpls[qpl_id];
rx->dqo.next_qpl_page_idx = 0; rx->dqo.next_qpl_page_idx = 0;
} }
......
...@@ -225,7 +225,6 @@ static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx, ...@@ -225,7 +225,6 @@ static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
if (!tx->raw_addressing) { if (!tx->raw_addressing) {
gve_tx_fifo_release(priv, &tx->tx_fifo); gve_tx_fifo_release(priv, &tx->tx_fifo);
gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id);
tx->tx_fifo.qpl = NULL; tx->tx_fifo.qpl = NULL;
} }
...@@ -280,12 +279,12 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv, ...@@ -280,12 +279,12 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
tx->raw_addressing = cfg->raw_addressing; tx->raw_addressing = cfg->raw_addressing;
tx->dev = hdev; tx->dev = hdev;
if (!tx->raw_addressing) { if (!tx->raw_addressing) {
tx->tx_fifo.qpl = gve_assign_tx_qpl(cfg, idx); u32 qpl_id = gve_tx_qpl_id(priv, tx->q_num);
if (!tx->tx_fifo.qpl)
goto abort_with_desc; tx->tx_fifo.qpl = &cfg->qpls[qpl_id];
/* map Tx FIFO */ /* map Tx FIFO */
if (gve_tx_fifo_init(priv, &tx->tx_fifo)) if (gve_tx_fifo_init(priv, &tx->tx_fifo))
goto abort_with_qpl; goto abort_with_desc;
} }
tx->q_resources = tx->q_resources =
...@@ -301,9 +300,6 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv, ...@@ -301,9 +300,6 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
abort_with_fifo: abort_with_fifo:
if (!tx->raw_addressing) if (!tx->raw_addressing)
gve_tx_fifo_release(priv, &tx->tx_fifo); gve_tx_fifo_release(priv, &tx->tx_fifo);
abort_with_qpl:
if (!tx->raw_addressing)
gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id);
abort_with_desc: abort_with_desc:
dma_free_coherent(hdev, bytes, tx->desc, tx->bus); dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
tx->desc = NULL; tx->desc = NULL;
......
...@@ -236,10 +236,7 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, ...@@ -236,10 +236,7 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
kvfree(tx->dqo.tx_qpl_buf_next); kvfree(tx->dqo.tx_qpl_buf_next);
tx->dqo.tx_qpl_buf_next = NULL; tx->dqo.tx_qpl_buf_next = NULL;
if (tx->dqo.qpl) { tx->dqo.qpl = NULL;
gve_unassign_qpl(cfg->qpl_cfg, tx->dqo.qpl->id);
tx->dqo.qpl = NULL;
}
netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx); netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
} }
...@@ -352,9 +349,9 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, ...@@ -352,9 +349,9 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
goto err; goto err;
if (!cfg->raw_addressing) { if (!cfg->raw_addressing) {
tx->dqo.qpl = gve_assign_tx_qpl(cfg, idx); u32 qpl_id = gve_tx_qpl_id(priv, tx->q_num);
if (!tx->dqo.qpl)
goto err; tx->dqo.qpl = &cfg->qpls[qpl_id];
if (gve_tx_qpl_buf_init(tx)) if (gve_tx_qpl_buf_init(tx))
goto err; goto err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment