Commit ee24284e authored by Shailend Chand's avatar Shailend Chand Committed by David S. Miller

gve: Alloc and free QPLs with the rings

Every tx and rx ring has its own queue-page-list (QPL) that serves as
the bounce buffer. Previously we were allocating QPLs for all queues
before the queues themselves were allocated and later associating a QPL
with a queue. This is avoidable complexity: it is much more natural for
each queue to allocate and free its own QPL.

Moreover, the advent of new queue-manipulating ndo hooks make it hard to
keep things as is: we would need to transfer a QPL from an old queue to
a new queue, and that is unpleasant.
Tested-by: default avatarMina Almasry <almasrymina@google.com>
Reviewed-by: default avatarPraveen Kaligineedi <pkaligineedi@google.com>
Reviewed-by: default avatarHarshitha Ramamurthy <hramamurthy@google.com>
Signed-off-by: default avatarShailend Chand <shailend@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent af9bcf91
......@@ -638,26 +638,10 @@ struct gve_ptype_lut {
struct gve_ptype ptypes[GVE_NUM_PTYPES];
};
/* Parameters for allocating queue page lists */
struct gve_qpls_alloc_cfg {
struct gve_queue_config *tx_cfg;
struct gve_queue_config *rx_cfg;
u16 num_xdp_queues;
bool raw_addressing;
bool is_gqi;
/* Allocated resources are returned here */
struct gve_queue_page_list *qpls;
};
/* Parameters for allocating resources for tx queues */
struct gve_tx_alloc_rings_cfg {
struct gve_queue_config *qcfg;
/* qpls must already be allocated */
struct gve_queue_page_list *qpls;
u16 ring_size;
u16 start_idx;
u16 num_rings;
......@@ -673,9 +657,6 @@ struct gve_rx_alloc_rings_cfg {
struct gve_queue_config *qcfg;
struct gve_queue_config *qcfg_tx;
/* qpls must already be allocated */
struct gve_queue_page_list *qpls;
u16 ring_size;
u16 packet_buffer_size;
bool raw_addressing;
......@@ -701,7 +682,6 @@ struct gve_priv {
struct net_device *dev;
struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
struct gve_queue_page_list *qpls; /* array of num qpls */
struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
dma_addr_t irq_db_indices_bus;
......@@ -1025,7 +1005,6 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
return priv->tx_cfg.max_queues + rx_qid;
}
/* Returns the index into priv->qpls where a certain rx queue's QPL resides */
static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
{
return tx_cfg->max_queues + rx_qid;
......@@ -1036,7 +1015,6 @@ static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
return gve_tx_qpl_id(priv, 0);
}
/* Returns the index into priv->qpls where the first rx queue's QPL resides */
static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
{
return gve_get_rx_qpl_id(tx_cfg, 0);
......@@ -1090,6 +1068,12 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
enum dma_data_direction, gfp_t gfp_flags);
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
enum dma_data_direction);
/* qpls */
struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
u32 id, int pages);
void gve_free_queue_page_list(struct gve_priv *priv,
struct gve_queue_page_list *qpl,
u32 id);
/* tx handling */
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
......@@ -1126,11 +1110,9 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
int gve_adjust_config(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
int gve_adjust_queues(struct gve_priv *priv,
......
......@@ -538,20 +538,17 @@ static int gve_adjust_ring_sizes(struct gve_priv *priv,
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
int err;
/* get current queue configuration */
gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
/* copy over the new ring_size from ethtool */
tx_alloc_cfg.ring_size = new_tx_desc_cnt;
rx_alloc_cfg.ring_size = new_rx_desc_cnt;
if (netif_running(priv->dev)) {
err = gve_adjust_config(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err)
return err;
}
......
This diff is collapsed.
......@@ -41,7 +41,6 @@ static void gve_rx_unfill_pages(struct gve_priv *priv,
for (i = 0; i < slots; i++)
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
rx->data.qpl = NULL;
for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) {
page_ref_sub(rx->qpl_copy_pool[i].page,
......@@ -107,6 +106,7 @@ static void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
u32 slots = rx->mask + 1;
int idx = rx->q_num;
size_t bytes;
u32 qpl_id;
if (rx->desc.desc_ring) {
bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
......@@ -132,6 +132,12 @@ static void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
kvfree(rx->qpl_copy_pool);
rx->qpl_copy_pool = NULL;
if (rx->data.qpl) {
qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, idx);
gve_free_queue_page_list(priv, rx->data.qpl, qpl_id);
rx->data.qpl = NULL;
}
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
......@@ -188,12 +194,6 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
if (!rx->data.page_info)
return -ENOMEM;
if (!rx->data.raw_addressing) {
u32 qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
rx->data.qpl = &cfg->qpls[qpl_id];
}
for (i = 0; i < slots; i++) {
if (!rx->data.raw_addressing) {
struct page *page = rx->data.qpl->pages[i];
......@@ -246,8 +246,6 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
rx->data.qpl = NULL;
return err;
alloc_err_rda:
......@@ -274,6 +272,8 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
struct device *hdev = &priv->pdev->dev;
u32 slots = cfg->ring_size;
int filled_pages;
int qpl_page_cnt;
u32 qpl_id = 0;
size_t bytes;
int err;
......@@ -306,10 +306,22 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
goto abort_with_slots;
}
if (!rx->data.raw_addressing) {
qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
qpl_page_cnt = cfg->ring_size;
rx->data.qpl = gve_alloc_queue_page_list(priv, qpl_id,
qpl_page_cnt);
if (!rx->data.qpl) {
err = -ENOMEM;
goto abort_with_copy_pool;
}
}
filled_pages = gve_rx_prefill_pages(rx, cfg);
if (filled_pages < 0) {
err = -ENOMEM;
goto abort_with_copy_pool;
goto abort_with_qpl;
}
rx->fill_cnt = filled_pages;
/* Ensure data ring slots (packet buffers) are visible. */
......@@ -350,6 +362,11 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
rx->q_resources = NULL;
abort_filled:
gve_rx_unfill_pages(priv, rx, cfg);
abort_with_qpl:
if (!rx->data.raw_addressing) {
gve_free_queue_page_list(priv, rx->data.qpl, qpl_id);
rx->data.qpl = NULL;
}
abort_with_copy_pool:
kvfree(rx->qpl_copy_pool);
rx->qpl_copy_pool = NULL;
......@@ -368,12 +385,6 @@ int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
int err = 0;
int i, j;
if (!cfg->raw_addressing && !cfg->qpls) {
netif_err(priv, drv, priv->dev,
"Cannot alloc QPL ring before allocing QPLs\n");
return -EINVAL;
}
rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
......
......@@ -307,6 +307,7 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
size_t buffer_queue_slots;
int idx = rx->q_num;
size_t size;
u32 qpl_id;
int i;
completion_queue_slots = rx->dqo.complq.mask + 1;
......@@ -325,7 +326,11 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
}
rx->dqo.qpl = NULL;
if (rx->dqo.qpl) {
qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
gve_free_queue_page_list(priv, rx->dqo.qpl, qpl_id);
rx->dqo.qpl = NULL;
}
if (rx->dqo.bufq.desc_ring) {
size = sizeof(rx->dqo.bufq.desc_ring[0]) * buffer_queue_slots;
......@@ -377,7 +382,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
int idx)
{
struct device *hdev = &priv->pdev->dev;
int qpl_page_cnt;
size_t size;
u32 qpl_id;
const u32 buffer_queue_slots = cfg->ring_size;
const u32 completion_queue_slots = cfg->ring_size;
......@@ -418,9 +425,13 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
goto err;
if (!cfg->raw_addressing) {
u32 qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
qpl_page_cnt = gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
rx->dqo.qpl = &cfg->qpls[qpl_id];
rx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
qpl_page_cnt);
if (!rx->dqo.qpl)
goto err;
rx->dqo.next_qpl_page_idx = 0;
}
......@@ -454,12 +465,6 @@ int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
int err;
int i;
if (!cfg->raw_addressing && !cfg->qpls) {
netif_err(priv, drv, priv->dev,
"Cannot alloc QPL ring before allocing QPLs\n");
return -EINVAL;
}
rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
......
......@@ -216,6 +216,7 @@ static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
struct device *hdev = &priv->pdev->dev;
int idx = tx->q_num;
size_t bytes;
u32 qpl_id;
u32 slots;
slots = tx->mask + 1;
......@@ -223,8 +224,12 @@ static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->q_resources, tx->q_resources_bus);
tx->q_resources = NULL;
if (!tx->raw_addressing) {
gve_tx_fifo_release(priv, &tx->tx_fifo);
if (tx->tx_fifo.qpl) {
if (tx->tx_fifo.base)
gve_tx_fifo_release(priv, &tx->tx_fifo);
qpl_id = gve_tx_qpl_id(priv, tx->q_num);
gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
tx->tx_fifo.qpl = NULL;
}
......@@ -255,6 +260,8 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
int idx)
{
struct device *hdev = &priv->pdev->dev;
int qpl_page_cnt;
u32 qpl_id = 0;
size_t bytes;
/* Make sure everything is zeroed to start */
......@@ -279,12 +286,17 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
tx->raw_addressing = cfg->raw_addressing;
tx->dev = hdev;
if (!tx->raw_addressing) {
u32 qpl_id = gve_tx_qpl_id(priv, tx->q_num);
qpl_id = gve_tx_qpl_id(priv, tx->q_num);
qpl_page_cnt = priv->tx_pages_per_qpl;
tx->tx_fifo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
qpl_page_cnt);
if (!tx->tx_fifo.qpl)
goto abort_with_desc;
tx->tx_fifo.qpl = &cfg->qpls[qpl_id];
/* map Tx FIFO */
if (gve_tx_fifo_init(priv, &tx->tx_fifo))
goto abort_with_desc;
goto abort_with_qpl;
}
tx->q_resources =
......@@ -300,6 +312,11 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
abort_with_fifo:
if (!tx->raw_addressing)
gve_tx_fifo_release(priv, &tx->tx_fifo);
abort_with_qpl:
if (!tx->raw_addressing) {
gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
tx->tx_fifo.qpl = NULL;
}
abort_with_desc:
dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
tx->desc = NULL;
......@@ -316,12 +333,6 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
int err = 0;
int i, j;
if (!cfg->raw_addressing && !cfg->qpls) {
netif_err(priv, drv, priv->dev,
"Cannot alloc QPL ring before allocing QPLs\n");
return -EINVAL;
}
if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
......
......@@ -209,6 +209,7 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
struct device *hdev = &priv->pdev->dev;
int idx = tx->q_num;
size_t bytes;
u32 qpl_id;
if (tx->q_resources) {
dma_free_coherent(hdev, sizeof(*tx->q_resources),
......@@ -236,7 +237,11 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
kvfree(tx->dqo.tx_qpl_buf_next);
tx->dqo.tx_qpl_buf_next = NULL;
tx->dqo.qpl = NULL;
if (tx->dqo.qpl) {
qpl_id = gve_tx_qpl_id(priv, tx->q_num);
gve_free_queue_page_list(priv, tx->dqo.qpl, qpl_id);
tx->dqo.qpl = NULL;
}
netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
}
......@@ -282,7 +287,9 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
{
struct device *hdev = &priv->pdev->dev;
int num_pending_packets;
int qpl_page_cnt;
size_t bytes;
u32 qpl_id;
int i;
memset(tx, 0, sizeof(*tx));
......@@ -349,9 +356,13 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
goto err;
if (!cfg->raw_addressing) {
u32 qpl_id = gve_tx_qpl_id(priv, tx->q_num);
qpl_id = gve_tx_qpl_id(priv, tx->q_num);
qpl_page_cnt = priv->tx_pages_per_qpl;
tx->dqo.qpl = &cfg->qpls[qpl_id];
tx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
qpl_page_cnt);
if (!tx->dqo.qpl)
goto err;
if (gve_tx_qpl_buf_init(tx))
goto err;
......@@ -371,12 +382,6 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
int err = 0;
int i, j;
if (!cfg->raw_addressing && !cfg->qpls) {
netif_err(priv, drv, priv->dev,
"Cannot alloc QPL ring before allocing QPLs\n");
return -EINVAL;
}
if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment