Commit f13697cc authored by Shailend Chand's avatar Shailend Chand Committed by Jakub Kicinski

gve: Switch to config-aware queue allocation

The new config-aware functions will help achieve the goal of being able
to allocate resources for new queues while there already are active
queues serving traffic.

These new functions work off of arbitrary queue allocation configs
rather than just the currently active config in priv, and they return
the newly allocated resources instead of writing them into priv.
Signed-off-by: default avatarShailend Chand <shailend@google.com>
Reviewed-by: default avatarWillem de Bruijn <willemb@google.com>
Reviewed-by: default avatarJeroen de Borst <jeroendb@google.com>
Reviewed-by: default avatarSimon Horman <horms@kernel.org>
Link: https://lore.kernel.org/r/20240122182632.1102721-4-shailend@google.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 1dfc2e46
...@@ -966,14 +966,14 @@ static inline bool gve_is_qpl(struct gve_priv *priv) ...@@ -966,14 +966,14 @@ static inline bool gve_is_qpl(struct gve_priv *priv)
priv->queue_format == GVE_DQO_QPL_FORMAT; priv->queue_format == GVE_DQO_QPL_FORMAT;
} }
/* Returns the number of tx queue page lists /* Returns the number of tx queue page lists */
*/ static inline u32 gve_num_tx_qpls(const struct gve_queue_config *tx_cfg,
static inline u32 gve_num_tx_qpls(struct gve_priv *priv) int num_xdp_queues,
bool is_qpl)
{ {
if (!gve_is_qpl(priv)) if (!is_qpl)
return 0; return 0;
return tx_cfg->num_queues + num_xdp_queues;
return priv->tx_cfg.num_queues + priv->num_xdp_queues;
} }
/* Returns the number of XDP tx queue page lists /* Returns the number of XDP tx queue page lists
...@@ -986,14 +986,13 @@ static inline u32 gve_num_xdp_qpls(struct gve_priv *priv) ...@@ -986,14 +986,13 @@ static inline u32 gve_num_xdp_qpls(struct gve_priv *priv)
return priv->num_xdp_queues; return priv->num_xdp_queues;
} }
/* Returns the number of rx queue page lists /* Returns the number of rx queue page lists */
*/ static inline u32 gve_num_rx_qpls(const struct gve_queue_config *rx_cfg,
static inline u32 gve_num_rx_qpls(struct gve_priv *priv) bool is_qpl)
{ {
if (!gve_is_qpl(priv)) if (!is_qpl)
return 0; return 0;
return rx_cfg->num_queues;
return priv->rx_cfg.num_queues;
} }
static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid) static inline u32 gve_tx_qpl_id(struct gve_priv *priv, int tx_qid)
...@@ -1006,59 +1005,59 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid) ...@@ -1006,59 +1005,59 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
return priv->tx_cfg.max_queues + rx_qid; return priv->tx_cfg.max_queues + rx_qid;
} }
/* Returns the index into priv->qpls where a certain rx queue's QPL resides */
static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
{
return tx_cfg->max_queues + rx_qid;
}
static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv) static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
{ {
return gve_tx_qpl_id(priv, 0); return gve_tx_qpl_id(priv, 0);
} }
static inline u32 gve_rx_start_qpl_id(struct gve_priv *priv) /* Returns the index into priv->qpls where the first rx queue's QPL resides */
static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
{ {
return gve_rx_qpl_id(priv, 0); return gve_get_rx_qpl_id(tx_cfg, 0);
} }
/* Returns a pointer to the next available tx qpl in the list of qpls /* Returns a pointer to the next available tx qpl in the list of qpls */
*/
static inline static inline
struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv, int tx_qid) struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_tx_alloc_rings_cfg *cfg,
int tx_qid)
{ {
int id = gve_tx_qpl_id(priv, tx_qid);
/* QPL already in use */ /* QPL already in use */
if (test_bit(id, priv->qpl_cfg.qpl_id_map)) if (test_bit(tx_qid, cfg->qpl_cfg->qpl_id_map))
return NULL; return NULL;
set_bit(tx_qid, cfg->qpl_cfg->qpl_id_map);
set_bit(id, priv->qpl_cfg.qpl_id_map); return &cfg->qpls[tx_qid];
return &priv->qpls[id];
} }
/* Returns a pointer to the next available rx qpl in the list of qpls /* Returns a pointer to the next available rx qpl in the list of qpls */
*/
static inline static inline
struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv, int rx_qid) struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_rx_alloc_rings_cfg *cfg,
int rx_qid)
{ {
int id = gve_rx_qpl_id(priv, rx_qid); int id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx_qid);
/* QPL already in use */ /* QPL already in use */
if (test_bit(id, priv->qpl_cfg.qpl_id_map)) if (test_bit(id, cfg->qpl_cfg->qpl_id_map))
return NULL; return NULL;
set_bit(id, cfg->qpl_cfg->qpl_id_map);
set_bit(id, priv->qpl_cfg.qpl_id_map); return &cfg->qpls[id];
return &priv->qpls[id];
} }
/* Unassigns the qpl with the given id /* Unassigns the qpl with the given id */
*/ static inline void gve_unassign_qpl(struct gve_qpl_config *qpl_cfg, int id)
static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
{ {
clear_bit(id, priv->qpl_cfg.qpl_id_map); clear_bit(id, qpl_cfg->qpl_id_map);
} }
/* Returns the correct dma direction for tx and rx qpls /* Returns the correct dma direction for tx and rx qpls */
*/
static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv, static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
int id) int id)
{ {
if (id < gve_rx_start_qpl_id(priv)) if (id < gve_rx_start_qpl_id(&priv->tx_cfg))
return DMA_TO_DEVICE; return DMA_TO_DEVICE;
else else
return DMA_FROM_DEVICE; return DMA_FROM_DEVICE;
...@@ -1103,8 +1102,12 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx, ...@@ -1103,8 +1102,12 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid); void gve_xdp_tx_flush(struct gve_priv *priv, u32 xdp_qid);
bool gve_tx_poll(struct gve_notify_block *block, int budget); bool gve_tx_poll(struct gve_notify_block *block, int budget);
bool gve_xdp_poll(struct gve_notify_block *block, int budget); bool gve_xdp_poll(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings); int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings); struct gve_tx_alloc_rings_cfg *cfg);
void gve_tx_free_rings_gqi(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx);
void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx);
u32 gve_tx_load_event_counter(struct gve_priv *priv, u32 gve_tx_load_event_counter(struct gve_priv *priv,
struct gve_tx_ring *tx); struct gve_tx_ring *tx);
bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx); bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
...@@ -1113,7 +1116,12 @@ void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx); ...@@ -1113,7 +1116,12 @@ void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
int gve_rx_poll(struct gve_notify_block *block, int budget); int gve_rx_poll(struct gve_notify_block *block, int budget);
bool gve_rx_work_pending(struct gve_rx_ring *rx); bool gve_rx_work_pending(struct gve_rx_ring *rx);
int gve_rx_alloc_rings(struct gve_priv *priv); int gve_rx_alloc_rings(struct gve_priv *priv);
void gve_rx_free_rings_gqi(struct gve_priv *priv); int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_free_rings_gqi(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
/* Reset */ /* Reset */
void gve_schedule_reset(struct gve_priv *priv); void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown); int gve_reset(struct gve_priv *priv, bool attempt_teardown);
......
...@@ -38,10 +38,18 @@ netdev_features_t gve_features_check_dqo(struct sk_buff *skb, ...@@ -38,10 +38,18 @@ netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
netdev_features_t features); netdev_features_t features);
bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean); bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget); int gve_rx_poll_dqo(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings_dqo(struct gve_priv *priv); int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
void gve_tx_free_rings_dqo(struct gve_priv *priv); struct gve_tx_alloc_rings_cfg *cfg);
int gve_rx_alloc_rings_dqo(struct gve_priv *priv); void gve_tx_free_rings_dqo(struct gve_priv *priv,
void gve_rx_free_rings_dqo(struct gve_priv *priv); struct gve_tx_alloc_rings_cfg *cfg);
void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx);
void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx);
int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_free_rings_dqo(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_start_ring_dqo(struct gve_priv *priv, int idx);
void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx);
int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
struct napi_struct *napi); struct napi_struct *napi);
void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx); void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx);
......
...@@ -582,61 +582,102 @@ static void gve_teardown_device_resources(struct gve_priv *priv) ...@@ -582,61 +582,102 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
gve_clear_device_resources_ok(priv); gve_clear_device_resources_ok(priv);
} }
static int gve_unregister_qpl(struct gve_priv *priv, u32 i)
{
int err;
err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
if (err) {
netif_err(priv, drv, priv->dev,
"Failed to unregister queue page list %d\n",
priv->qpls[i].id);
return err;
}
priv->num_registered_pages -= priv->qpls[i].num_entries;
return 0;
}
static int gve_register_qpl(struct gve_priv *priv, u32 i)
{
int num_rx_qpls;
int pages;
int err;
/* Rx QPLs succeed Tx QPLs in the priv->qpls array. */
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
if (i >= gve_rx_start_qpl_id(&priv->tx_cfg) + num_rx_qpls) {
netif_err(priv, drv, priv->dev,
"Cannot register nonexisting QPL at index %d\n", i);
return -EINVAL;
}
pages = priv->qpls[i].num_entries;
if (pages + priv->num_registered_pages > priv->max_registered_pages) {
netif_err(priv, drv, priv->dev,
"Reached max number of registered pages %llu > %llu\n",
pages + priv->num_registered_pages,
priv->max_registered_pages);
return -EINVAL;
}
err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
if (err) {
netif_err(priv, drv, priv->dev,
"failed to register queue page list %d\n",
priv->qpls[i].id);
/* This failure will trigger a reset - no need to clean
* up
*/
return err;
}
priv->num_registered_pages += pages;
return 0;
}
static int gve_register_xdp_qpls(struct gve_priv *priv) static int gve_register_xdp_qpls(struct gve_priv *priv)
{ {
int start_id; int start_id;
int err; int err;
int i; int i;
start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv)); start_id = gve_xdp_tx_start_queue_id(priv);
for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) { for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
err = gve_adminq_register_page_list(priv, &priv->qpls[i]); err = gve_register_qpl(priv, i);
if (err) { /* This failure will trigger a reset - no need to clean up */
netif_err(priv, drv, priv->dev, if (err)
"failed to register queue page list %d\n",
priv->qpls[i].id);
/* This failure will trigger a reset - no need to clean
* up
*/
return err; return err;
}
} }
return 0; return 0;
} }
static int gve_register_qpls(struct gve_priv *priv) static int gve_register_qpls(struct gve_priv *priv)
{ {
int num_tx_qpls, num_rx_qpls;
int start_id; int start_id;
int err; int err;
int i; int i;
start_id = gve_tx_start_qpl_id(priv); num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) { gve_is_qpl(priv));
err = gve_adminq_register_page_list(priv, &priv->qpls[i]); num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
if (err) {
netif_err(priv, drv, priv->dev, for (i = 0; i < num_tx_qpls; i++) {
"failed to register queue page list %d\n", err = gve_register_qpl(priv, i);
priv->qpls[i].id); if (err)
/* This failure will trigger a reset - no need to clean
* up
*/
return err; return err;
}
} }
start_id = gve_rx_start_qpl_id(priv); /* there might be a gap between the tx and rx qpl ids */
for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) { start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
err = gve_adminq_register_page_list(priv, &priv->qpls[i]); for (i = 0; i < num_rx_qpls; i++) {
if (err) { err = gve_register_qpl(priv, start_id + i);
netif_err(priv, drv, priv->dev, if (err)
"failed to register queue page list %d\n",
priv->qpls[i].id);
/* This failure will trigger a reset - no need to clean
* up
*/
return err; return err;
}
} }
return 0; return 0;
} }
...@@ -646,48 +687,40 @@ static int gve_unregister_xdp_qpls(struct gve_priv *priv) ...@@ -646,48 +687,40 @@ static int gve_unregister_xdp_qpls(struct gve_priv *priv)
int err; int err;
int i; int i;
start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv)); start_id = gve_xdp_tx_start_queue_id(priv);
for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) { for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id); err = gve_unregister_qpl(priv, i);
/* This failure will trigger a reset - no need to clean up */ /* This failure will trigger a reset - no need to clean */
if (err) { if (err)
netif_err(priv, drv, priv->dev,
"Failed to unregister queue page list %d\n",
priv->qpls[i].id);
return err; return err;
}
} }
return 0; return 0;
} }
static int gve_unregister_qpls(struct gve_priv *priv) static int gve_unregister_qpls(struct gve_priv *priv)
{ {
int num_tx_qpls, num_rx_qpls;
int start_id; int start_id;
int err; int err;
int i; int i;
start_id = gve_tx_start_qpl_id(priv); num_tx_qpls = gve_num_tx_qpls(&priv->tx_cfg, gve_num_xdp_qpls(priv),
for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) { gve_is_qpl(priv));
err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id); num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
/* This failure will trigger a reset - no need to clean up */
if (err) { for (i = 0; i < num_tx_qpls; i++) {
netif_err(priv, drv, priv->dev, err = gve_unregister_qpl(priv, i);
"Failed to unregister queue page list %d\n", /* This failure will trigger a reset - no need to clean */
priv->qpls[i].id); if (err)
return err; return err;
}
} }
start_id = gve_rx_start_qpl_id(priv); start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) { for (i = 0; i < num_rx_qpls; i++) {
err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id); err = gve_unregister_qpl(priv, start_id + i);
/* This failure will trigger a reset - no need to clean up */ /* This failure will trigger a reset - no need to clean */
if (err) { if (err)
netif_err(priv, drv, priv->dev,
"Failed to unregister queue page list %d\n",
priv->qpls[i].id);
return err; return err;
}
} }
return 0; return 0;
} }
...@@ -762,120 +795,124 @@ static int gve_create_rings(struct gve_priv *priv) ...@@ -762,120 +795,124 @@ static int gve_create_rings(struct gve_priv *priv)
return 0; return 0;
} }
static void add_napi_init_xdp_sync_stats(struct gve_priv *priv, static void init_xdp_sync_stats(struct gve_priv *priv)
int (*napi_poll)(struct napi_struct *napi,
int budget))
{ {
int start_id = gve_xdp_tx_start_queue_id(priv); int start_id = gve_xdp_tx_start_queue_id(priv);
int i; int i;
/* Add xdp tx napi & init sync stats*/ /* Init stats */
for (i = start_id; i < start_id + priv->num_xdp_queues; i++) { for (i = start_id; i < start_id + priv->num_xdp_queues; i++) {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, i); int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
u64_stats_init(&priv->tx[i].statss); u64_stats_init(&priv->tx[i].statss);
priv->tx[i].ntfy_id = ntfy_idx; priv->tx[i].ntfy_id = ntfy_idx;
gve_add_napi(priv, ntfy_idx, napi_poll);
} }
} }
static void add_napi_init_sync_stats(struct gve_priv *priv, static void gve_init_sync_stats(struct gve_priv *priv)
int (*napi_poll)(struct napi_struct *napi,
int budget))
{ {
int i; int i;
/* Add tx napi & init sync stats*/ for (i = 0; i < priv->tx_cfg.num_queues; i++)
for (i = 0; i < gve_num_tx_queues(priv); i++) {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
u64_stats_init(&priv->tx[i].statss); u64_stats_init(&priv->tx[i].statss);
priv->tx[i].ntfy_id = ntfy_idx;
gve_add_napi(priv, ntfy_idx, napi_poll);
}
/* Add rx napi & init sync stats*/
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
/* Init stats for XDP TX queues */
init_xdp_sync_stats(priv);
for (i = 0; i < priv->rx_cfg.num_queues; i++)
u64_stats_init(&priv->rx[i].statss); u64_stats_init(&priv->rx[i].statss);
priv->rx[i].ntfy_id = ntfy_idx; }
gve_add_napi(priv, ntfy_idx, napi_poll);
static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{
cfg->qcfg = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->qpls = priv->qpls;
cfg->qpl_cfg = &priv->qpl_cfg;
cfg->ring_size = priv->tx_desc_cnt;
cfg->start_idx = 0;
cfg->num_rings = gve_num_tx_queues(priv);
cfg->tx = priv->tx;
}
static void gve_tx_stop_rings(struct gve_priv *priv, int start_id, int num_rings)
{
int i;
if (!priv->tx)
return;
for (i = start_id; i < start_id + num_rings; i++) {
if (gve_is_gqi(priv))
gve_tx_stop_ring_gqi(priv, i);
else
gve_tx_stop_ring_dqo(priv, i);
} }
} }
static void gve_tx_free_rings(struct gve_priv *priv, int start_id, int num_rings) static void gve_tx_start_rings(struct gve_priv *priv, int start_id,
int num_rings)
{ {
if (gve_is_gqi(priv)) { int i;
gve_tx_free_rings_gqi(priv, start_id, num_rings);
} else { for (i = start_id; i < start_id + num_rings; i++) {
gve_tx_free_rings_dqo(priv); if (gve_is_gqi(priv))
gve_tx_start_ring_gqi(priv, i);
else
gve_tx_start_ring_dqo(priv, i);
} }
} }
static int gve_alloc_xdp_rings(struct gve_priv *priv) static int gve_alloc_xdp_rings(struct gve_priv *priv)
{ {
int start_id; struct gve_tx_alloc_rings_cfg cfg = {0};
int err = 0; int err = 0;
if (!priv->num_xdp_queues) if (!priv->num_xdp_queues)
return 0; return 0;
start_id = gve_xdp_tx_start_queue_id(priv); gve_tx_get_curr_alloc_cfg(priv, &cfg);
err = gve_tx_alloc_rings(priv, start_id, priv->num_xdp_queues); cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
cfg.num_rings = priv->num_xdp_queues;
err = gve_tx_alloc_rings_gqi(priv, &cfg);
if (err) if (err)
return err; return err;
add_napi_init_xdp_sync_stats(priv, gve_napi_poll);
gve_tx_start_rings(priv, cfg.start_idx, cfg.num_rings);
init_xdp_sync_stats(priv);
return 0; return 0;
} }
static int gve_alloc_rings(struct gve_priv *priv) static int gve_alloc_rings(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{ {
int err; int err;
/* Setup tx rings */
priv->tx = kvcalloc(priv->tx_cfg.max_queues, sizeof(*priv->tx),
GFP_KERNEL);
if (!priv->tx)
return -ENOMEM;
if (gve_is_gqi(priv)) if (gve_is_gqi(priv))
err = gve_tx_alloc_rings(priv, 0, gve_num_tx_queues(priv)); err = gve_tx_alloc_rings_gqi(priv, tx_alloc_cfg);
else else
err = gve_tx_alloc_rings_dqo(priv); err = gve_tx_alloc_rings_dqo(priv, tx_alloc_cfg);
if (err) if (err)
goto free_tx; return err;
/* Setup rx rings */
priv->rx = kvcalloc(priv->rx_cfg.max_queues, sizeof(*priv->rx),
GFP_KERNEL);
if (!priv->rx) {
err = -ENOMEM;
goto free_tx_queue;
}
if (gve_is_gqi(priv)) if (gve_is_gqi(priv))
err = gve_rx_alloc_rings(priv); err = gve_rx_alloc_rings_gqi(priv, rx_alloc_cfg);
else else
err = gve_rx_alloc_rings_dqo(priv); err = gve_rx_alloc_rings_dqo(priv, rx_alloc_cfg);
if (err) if (err)
goto free_rx; goto free_tx;
if (gve_is_gqi(priv))
add_napi_init_sync_stats(priv, gve_napi_poll);
else
add_napi_init_sync_stats(priv, gve_napi_poll_dqo);
return 0; return 0;
free_rx:
kvfree(priv->rx);
priv->rx = NULL;
free_tx_queue:
gve_tx_free_rings(priv, 0, gve_num_tx_queues(priv));
free_tx: free_tx:
kvfree(priv->tx); if (gve_is_gqi(priv))
priv->tx = NULL; gve_tx_free_rings_gqi(priv, tx_alloc_cfg);
else
gve_tx_free_rings_dqo(priv, tx_alloc_cfg);
return err; return err;
} }
...@@ -923,52 +960,30 @@ static int gve_destroy_rings(struct gve_priv *priv) ...@@ -923,52 +960,30 @@ static int gve_destroy_rings(struct gve_priv *priv)
return 0; return 0;
} }
static void gve_rx_free_rings(struct gve_priv *priv)
{
if (gve_is_gqi(priv))
gve_rx_free_rings_gqi(priv);
else
gve_rx_free_rings_dqo(priv);
}
static void gve_free_xdp_rings(struct gve_priv *priv) static void gve_free_xdp_rings(struct gve_priv *priv)
{ {
int ntfy_idx, start_id; struct gve_tx_alloc_rings_cfg cfg = {0};
int i;
gve_tx_get_curr_alloc_cfg(priv, &cfg);
cfg.start_idx = gve_xdp_tx_start_queue_id(priv);
cfg.num_rings = priv->num_xdp_queues;
start_id = gve_xdp_tx_start_queue_id(priv);
if (priv->tx) { if (priv->tx) {
for (i = start_id; i < start_id + priv->num_xdp_queues; i++) { gve_tx_stop_rings(priv, cfg.start_idx, cfg.num_rings);
ntfy_idx = gve_tx_idx_to_ntfy(priv, i); gve_tx_free_rings_gqi(priv, &cfg);
gve_remove_napi(priv, ntfy_idx);
}
gve_tx_free_rings(priv, start_id, priv->num_xdp_queues);
} }
} }
static void gve_free_rings(struct gve_priv *priv) static void gve_free_rings(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *tx_cfg,
struct gve_rx_alloc_rings_cfg *rx_cfg)
{ {
int num_tx_queues = gve_num_tx_queues(priv); if (gve_is_gqi(priv)) {
int ntfy_idx; gve_tx_free_rings_gqi(priv, tx_cfg);
int i; gve_rx_free_rings_gqi(priv, rx_cfg);
} else {
if (priv->tx) { gve_tx_free_rings_dqo(priv, tx_cfg);
for (i = 0; i < num_tx_queues; i++) { gve_rx_free_rings_dqo(priv, rx_cfg);
ntfy_idx = gve_tx_idx_to_ntfy(priv, i);
gve_remove_napi(priv, ntfy_idx);
}
gve_tx_free_rings(priv, 0, num_tx_queues);
kvfree(priv->tx);
priv->tx = NULL;
}
if (priv->rx) {
for (i = 0; i < priv->rx_cfg.num_queues; i++) {
ntfy_idx = gve_rx_idx_to_ntfy(priv, i);
gve_remove_napi(priv, ntfy_idx);
}
gve_rx_free_rings(priv);
kvfree(priv->rx);
priv->rx = NULL;
} }
} }
...@@ -990,21 +1005,13 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev, ...@@ -990,21 +1005,13 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
return 0; return 0;
} }
static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id, static int gve_alloc_queue_page_list(struct gve_priv *priv,
int pages) struct gve_queue_page_list *qpl,
u32 id, int pages)
{ {
struct gve_queue_page_list *qpl = &priv->qpls[id];
int err; int err;
int i; int i;
if (pages + priv->num_registered_pages > priv->max_registered_pages) {
netif_err(priv, drv, priv->dev,
"Reached max number of registered pages %llu > %llu\n",
pages + priv->num_registered_pages,
priv->max_registered_pages);
return -EINVAL;
}
qpl->id = id; qpl->id = id;
qpl->num_entries = 0; qpl->num_entries = 0;
qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL); qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
...@@ -1025,7 +1032,6 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id, ...@@ -1025,7 +1032,6 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
return -ENOMEM; return -ENOMEM;
qpl->num_entries++; qpl->num_entries++;
} }
priv->num_registered_pages += pages;
return 0; return 0;
} }
...@@ -1039,9 +1045,10 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma, ...@@ -1039,9 +1045,10 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
put_page(page); put_page(page);
} }
static void gve_free_queue_page_list(struct gve_priv *priv, u32 id) static void gve_free_queue_page_list(struct gve_priv *priv,
struct gve_queue_page_list *qpl,
int id)
{ {
struct gve_queue_page_list *qpl = &priv->qpls[id];
int i; int i;
if (!qpl->pages) if (!qpl->pages)
...@@ -1058,19 +1065,30 @@ static void gve_free_queue_page_list(struct gve_priv *priv, u32 id) ...@@ -1058,19 +1065,30 @@ static void gve_free_queue_page_list(struct gve_priv *priv, u32 id)
free_pages: free_pages:
kvfree(qpl->pages); kvfree(qpl->pages);
qpl->pages = NULL; qpl->pages = NULL;
priv->num_registered_pages -= qpl->num_entries;
} }
static int gve_alloc_xdp_qpls(struct gve_priv *priv) static void gve_free_n_qpls(struct gve_priv *priv,
struct gve_queue_page_list *qpls,
int start_id,
int num_qpls)
{
int i;
for (i = start_id; i < start_id + num_qpls; i++)
gve_free_queue_page_list(priv, &qpls[i], i);
}
static int gve_alloc_n_qpls(struct gve_priv *priv,
struct gve_queue_page_list *qpls,
int page_count,
int start_id,
int num_qpls)
{ {
int start_id;
int i, j;
int err; int err;
int i;
start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv)); for (i = start_id; i < start_id + num_qpls; i++) {
for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) { err = gve_alloc_queue_page_list(priv, &qpls[i], i, page_count);
err = gve_alloc_queue_page_list(priv, i,
priv->tx_pages_per_qpl);
if (err) if (err)
goto free_qpls; goto free_qpls;
} }
...@@ -1078,95 +1096,89 @@ static int gve_alloc_xdp_qpls(struct gve_priv *priv) ...@@ -1078,95 +1096,89 @@ static int gve_alloc_xdp_qpls(struct gve_priv *priv)
return 0; return 0;
free_qpls: free_qpls:
for (j = start_id; j <= i; j++) /* Must include the failing QPL too for gve_alloc_queue_page_list fails
gve_free_queue_page_list(priv, j); * without cleaning up.
*/
gve_free_n_qpls(priv, qpls, start_id, i - start_id + 1);
return err; return err;
} }
static int gve_alloc_qpls(struct gve_priv *priv) static int gve_alloc_qpls(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *cfg)
{ {
int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues; int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
int rx_start_id, tx_num_qpls, rx_num_qpls;
struct gve_queue_page_list *qpls;
int page_count; int page_count;
int start_id;
int i, j;
int err; int err;
if (!gve_is_qpl(priv)) if (cfg->raw_addressing)
return 0; return 0;
priv->qpls = kvcalloc(max_queues, sizeof(*priv->qpls), GFP_KERNEL); qpls = kvcalloc(max_queues, sizeof(*qpls), GFP_KERNEL);
if (!priv->qpls) if (!qpls)
return -ENOMEM; return -ENOMEM;
start_id = gve_tx_start_qpl_id(priv); cfg->qpl_cfg->qpl_map_size = BITS_TO_LONGS(max_queues) *
page_count = priv->tx_pages_per_qpl; sizeof(unsigned long) * BITS_PER_BYTE;
for (i = start_id; i < start_id + gve_num_tx_qpls(priv); i++) { cfg->qpl_cfg->qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues),
err = gve_alloc_queue_page_list(priv, i, sizeof(unsigned long), GFP_KERNEL);
page_count); if (!cfg->qpl_cfg->qpl_id_map) {
if (err) err = -ENOMEM;
goto free_qpls; goto free_qpl_array;
} }
start_id = gve_rx_start_qpl_id(priv); /* Allocate TX QPLs */
page_count = priv->tx_pages_per_qpl;
tx_num_qpls = gve_num_tx_qpls(cfg->tx_cfg, cfg->num_xdp_queues,
gve_is_qpl(priv));
err = gve_alloc_n_qpls(priv, qpls, page_count, 0, tx_num_qpls);
if (err)
goto free_qpl_map;
/* Allocate RX QPLs */
rx_start_id = gve_rx_start_qpl_id(cfg->tx_cfg);
/* For GQI_QPL number of pages allocated have 1:1 relationship with /* For GQI_QPL number of pages allocated have 1:1 relationship with
* number of descriptors. For DQO, number of pages required are * number of descriptors. For DQO, number of pages required are
* more than descriptors (because of out of order completions). * more than descriptors (because of out of order completions).
*/ */
page_count = priv->queue_format == GVE_GQI_QPL_FORMAT ? page_count = cfg->is_gqi ? priv->rx_data_slot_cnt : priv->rx_pages_per_qpl;
priv->rx_data_slot_cnt : priv->rx_pages_per_qpl; rx_num_qpls = gve_num_rx_qpls(cfg->rx_cfg, gve_is_qpl(priv));
for (i = start_id; i < start_id + gve_num_rx_qpls(priv); i++) { err = gve_alloc_n_qpls(priv, qpls, page_count, rx_start_id, rx_num_qpls);
err = gve_alloc_queue_page_list(priv, i, if (err)
page_count); goto free_tx_qpls;
if (err)
goto free_qpls;
}
priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(max_queues) *
sizeof(unsigned long) * BITS_PER_BYTE;
priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues),
sizeof(unsigned long), GFP_KERNEL);
if (!priv->qpl_cfg.qpl_id_map) {
err = -ENOMEM;
goto free_qpls;
}
cfg->qpls = qpls;
return 0; return 0;
free_qpls: free_tx_qpls:
for (j = 0; j <= i; j++) gve_free_n_qpls(priv, qpls, 0, tx_num_qpls);
gve_free_queue_page_list(priv, j); free_qpl_map:
kvfree(priv->qpls); kvfree(cfg->qpl_cfg->qpl_id_map);
priv->qpls = NULL; cfg->qpl_cfg->qpl_id_map = NULL;
free_qpl_array:
kvfree(qpls);
return err; return err;
} }
static void gve_free_xdp_qpls(struct gve_priv *priv) static void gve_free_qpls(struct gve_priv *priv,
{ struct gve_qpls_alloc_cfg *cfg)
int start_id;
int i;
start_id = gve_tx_qpl_id(priv, gve_xdp_tx_start_queue_id(priv));
for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++)
gve_free_queue_page_list(priv, i);
}
static void gve_free_qpls(struct gve_priv *priv)
{ {
int max_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues; int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
struct gve_queue_page_list *qpls = cfg->qpls;
int i; int i;
if (!priv->qpls) if (!qpls)
return; return;
kvfree(priv->qpl_cfg.qpl_id_map); kvfree(cfg->qpl_cfg->qpl_id_map);
priv->qpl_cfg.qpl_id_map = NULL; cfg->qpl_cfg->qpl_id_map = NULL;
for (i = 0; i < max_queues; i++) for (i = 0; i < max_queues; i++)
gve_free_queue_page_list(priv, i); gve_free_queue_page_list(priv, &qpls[i], i);
kvfree(priv->qpls); kvfree(qpls);
priv->qpls = NULL; cfg->qpls = NULL;
} }
/* Use this to schedule a reset when the device is capable of continuing /* Use this to schedule a reset when the device is capable of continuing
...@@ -1277,8 +1289,72 @@ static void gve_drain_page_cache(struct gve_priv *priv) ...@@ -1277,8 +1289,72 @@ static void gve_drain_page_cache(struct gve_priv *priv)
} }
} }
static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *cfg)
{
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->is_gqi = gve_is_gqi(priv);
cfg->num_xdp_queues = priv->num_xdp_queues;
cfg->qpl_cfg = &priv->qpl_cfg;
cfg->tx_cfg = &priv->tx_cfg;
cfg->rx_cfg = &priv->rx_cfg;
cfg->qpls = priv->qpls;
}
static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg)
{
cfg->qcfg = &priv->rx_cfg;
cfg->qcfg_tx = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->qpls = priv->qpls;
cfg->qpl_cfg = &priv->qpl_cfg;
cfg->ring_size = priv->rx_desc_cnt;
cfg->rx = priv->rx;
}
static void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
gve_qpls_get_curr_alloc_cfg(priv, qpls_alloc_cfg);
gve_tx_get_curr_alloc_cfg(priv, tx_alloc_cfg);
gve_rx_get_curr_alloc_cfg(priv, rx_alloc_cfg);
}
static void gve_rx_start_rings(struct gve_priv *priv, int num_rings)
{
int i;
for (i = 0; i < num_rings; i++) {
if (gve_is_gqi(priv))
gve_rx_start_ring_gqi(priv, i);
else
gve_rx_start_ring_dqo(priv, i);
}
}
static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
{
int i;
if (!priv->rx)
return;
for (i = 0; i < num_rings; i++) {
if (gve_is_gqi(priv))
gve_rx_stop_ring_gqi(priv, i);
else
gve_rx_stop_ring_dqo(priv, i);
}
}
static int gve_open(struct net_device *dev) static int gve_open(struct net_device *dev)
{ {
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(dev); struct gve_priv *priv = netdev_priv(dev);
int err; int err;
...@@ -1287,14 +1363,22 @@ static int gve_open(struct net_device *dev) ...@@ -1287,14 +1363,22 @@ static int gve_open(struct net_device *dev)
else else
priv->num_xdp_queues = 0; priv->num_xdp_queues = 0;
err = gve_alloc_qpls(priv); gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
err = gve_alloc_qpls(priv, &qpls_alloc_cfg);
if (err) if (err)
return err; return err;
priv->qpls = qpls_alloc_cfg.qpls;
err = gve_alloc_rings(priv); tx_alloc_cfg.qpls = priv->qpls;
rx_alloc_cfg.qpls = priv->qpls;
err = gve_alloc_rings(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err) if (err)
goto free_qpls; goto free_qpls;
gve_tx_start_rings(priv, 0, tx_alloc_cfg.num_rings);
gve_rx_start_rings(priv, rx_alloc_cfg.qcfg->num_queues);
gve_init_sync_stats(priv);
err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues); err = netif_set_real_num_tx_queues(dev, priv->tx_cfg.num_queues);
if (err) if (err)
goto free_rings; goto free_rings;
...@@ -1333,9 +1417,11 @@ static int gve_open(struct net_device *dev) ...@@ -1333,9 +1417,11 @@ static int gve_open(struct net_device *dev)
return 0; return 0;
free_rings: free_rings:
gve_free_rings(priv); gve_tx_stop_rings(priv, 0, tx_alloc_cfg.num_rings);
gve_rx_stop_rings(priv, rx_alloc_cfg.qcfg->num_queues);
gve_free_rings(priv, &tx_alloc_cfg, &rx_alloc_cfg);
free_qpls: free_qpls:
gve_free_qpls(priv); gve_free_qpls(priv, &qpls_alloc_cfg);
return err; return err;
reset: reset:
...@@ -1354,6 +1440,9 @@ static int gve_open(struct net_device *dev) ...@@ -1354,6 +1440,9 @@ static int gve_open(struct net_device *dev)
static int gve_close(struct net_device *dev) static int gve_close(struct net_device *dev)
{ {
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(dev); struct gve_priv *priv = netdev_priv(dev);
int err; int err;
...@@ -1372,8 +1461,14 @@ static int gve_close(struct net_device *dev) ...@@ -1372,8 +1461,14 @@ static int gve_close(struct net_device *dev)
del_timer_sync(&priv->stats_report_timer); del_timer_sync(&priv->stats_report_timer);
gve_unreg_xdp_info(priv); gve_unreg_xdp_info(priv);
gve_free_rings(priv);
gve_free_qpls(priv); gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
&tx_alloc_cfg, &rx_alloc_cfg);
gve_tx_stop_rings(priv, 0, tx_alloc_cfg.num_rings);
gve_rx_stop_rings(priv, rx_alloc_cfg.qcfg->num_queues);
gve_free_rings(priv, &tx_alloc_cfg, &rx_alloc_cfg);
gve_free_qpls(priv, &qpls_alloc_cfg);
priv->interface_down_cnt++; priv->interface_down_cnt++;
return 0; return 0;
...@@ -1390,8 +1485,11 @@ static int gve_close(struct net_device *dev) ...@@ -1390,8 +1485,11 @@ static int gve_close(struct net_device *dev)
static int gve_remove_xdp_queues(struct gve_priv *priv) static int gve_remove_xdp_queues(struct gve_priv *priv)
{ {
int qpl_start_id;
int err; int err;
qpl_start_id = gve_xdp_tx_start_queue_id(priv);
err = gve_destroy_xdp_rings(priv); err = gve_destroy_xdp_rings(priv);
if (err) if (err)
return err; return err;
...@@ -1402,18 +1500,22 @@ static int gve_remove_xdp_queues(struct gve_priv *priv) ...@@ -1402,18 +1500,22 @@ static int gve_remove_xdp_queues(struct gve_priv *priv)
gve_unreg_xdp_info(priv); gve_unreg_xdp_info(priv);
gve_free_xdp_rings(priv); gve_free_xdp_rings(priv);
gve_free_xdp_qpls(priv);
gve_free_n_qpls(priv, priv->qpls, qpl_start_id, gve_num_xdp_qpls(priv));
priv->num_xdp_queues = 0; priv->num_xdp_queues = 0;
return 0; return 0;
} }
static int gve_add_xdp_queues(struct gve_priv *priv) static int gve_add_xdp_queues(struct gve_priv *priv)
{ {
int start_id;
int err; int err;
priv->num_xdp_queues = priv->tx_cfg.num_queues; priv->num_xdp_queues = priv->rx_cfg.num_queues;
err = gve_alloc_xdp_qpls(priv); start_id = gve_xdp_tx_start_queue_id(priv);
err = gve_alloc_n_qpls(priv, priv->qpls, priv->tx_pages_per_qpl,
start_id, gve_num_xdp_qpls(priv));
if (err) if (err)
goto err; goto err;
...@@ -1438,7 +1540,7 @@ static int gve_add_xdp_queues(struct gve_priv *priv) ...@@ -1438,7 +1540,7 @@ static int gve_add_xdp_queues(struct gve_priv *priv)
free_xdp_rings: free_xdp_rings:
gve_free_xdp_rings(priv); gve_free_xdp_rings(priv);
free_xdp_qpls: free_xdp_qpls:
gve_free_xdp_qpls(priv); gve_free_n_qpls(priv, priv->qpls, start_id, gve_num_xdp_qpls(priv));
err: err:
priv->num_xdp_queues = 0; priv->num_xdp_queues = 0;
return err; return err;
...@@ -2037,6 +2139,8 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) ...@@ -2037,6 +2139,8 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
goto err; goto err;
} }
priv->num_registered_pages = 0;
if (skip_describe_device) if (skip_describe_device)
goto setup_device; goto setup_device;
...@@ -2066,7 +2170,6 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device) ...@@ -2066,7 +2170,6 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
if (!gve_is_gqi(priv)) if (!gve_is_gqi(priv))
netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX); netif_set_tso_max_size(priv->dev, GVE_DQO_TX_MAX);
priv->num_registered_pages = 0;
priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK; priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
/* gvnic has one Notification Block per MSI-x vector, except for the /* gvnic has one Notification Block per MSI-x vector, except for the
* management vector * management vector
......
...@@ -23,7 +23,9 @@ static void gve_rx_free_buffer(struct device *dev, ...@@ -23,7 +23,9 @@ static void gve_rx_free_buffer(struct device *dev,
gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE); gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE);
} }
static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx) static void gve_rx_unfill_pages(struct gve_priv *priv,
struct gve_rx_ring *rx,
struct gve_rx_alloc_rings_cfg *cfg)
{ {
u32 slots = rx->mask + 1; u32 slots = rx->mask + 1;
int i; int i;
...@@ -36,7 +38,7 @@ static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx) ...@@ -36,7 +38,7 @@ static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
for (i = 0; i < slots; i++) for (i = 0; i < slots; i++)
page_ref_sub(rx->data.page_info[i].page, page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1); rx->data.page_info[i].pagecnt_bias - 1);
gve_unassign_qpl(priv, rx->data.qpl->id); gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id);
rx->data.qpl = NULL; rx->data.qpl = NULL;
for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) { for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) {
...@@ -49,16 +51,26 @@ static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx) ...@@ -49,16 +51,26 @@ static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
rx->data.page_info = NULL; rx->data.page_info = NULL;
} }
static void gve_rx_free_ring(struct gve_priv *priv, int idx) void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
if (!gve_rx_was_added_to_block(priv, idx))
return;
gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx);
}
static void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_alloc_rings_cfg *cfg)
{ {
struct gve_rx_ring *rx = &priv->rx[idx];
struct device *dev = &priv->pdev->dev; struct device *dev = &priv->pdev->dev;
u32 slots = rx->mask + 1; u32 slots = rx->mask + 1;
int idx = rx->q_num;
size_t bytes; size_t bytes;
gve_rx_remove_from_block(priv, idx); bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt;
dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus); dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus);
rx->desc.desc_ring = NULL; rx->desc.desc_ring = NULL;
...@@ -66,7 +78,7 @@ static void gve_rx_free_ring(struct gve_priv *priv, int idx) ...@@ -66,7 +78,7 @@ static void gve_rx_free_ring(struct gve_priv *priv, int idx)
rx->q_resources, rx->q_resources_bus); rx->q_resources, rx->q_resources_bus);
rx->q_resources = NULL; rx->q_resources = NULL;
gve_rx_unfill_pages(priv, rx); gve_rx_unfill_pages(priv, rx, cfg);
bytes = sizeof(*rx->data.data_ring) * slots; bytes = sizeof(*rx->data.data_ring) * slots;
dma_free_coherent(dev, bytes, rx->data.data_ring, dma_free_coherent(dev, bytes, rx->data.data_ring,
...@@ -108,7 +120,8 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev, ...@@ -108,7 +120,8 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
return 0; return 0;
} }
static int gve_prefill_rx_pages(struct gve_rx_ring *rx) static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
struct gve_rx_alloc_rings_cfg *cfg)
{ {
struct gve_priv *priv = rx->gve; struct gve_priv *priv = rx->gve;
u32 slots; u32 slots;
...@@ -127,7 +140,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx) ...@@ -127,7 +140,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
return -ENOMEM; return -ENOMEM;
if (!rx->data.raw_addressing) { if (!rx->data.raw_addressing) {
rx->data.qpl = gve_assign_rx_qpl(priv, rx->q_num); rx->data.qpl = gve_assign_rx_qpl(cfg, rx->q_num);
if (!rx->data.qpl) { if (!rx->data.qpl) {
kvfree(rx->data.page_info); kvfree(rx->data.page_info);
rx->data.page_info = NULL; rx->data.page_info = NULL;
...@@ -185,7 +198,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx) ...@@ -185,7 +198,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
page_ref_sub(rx->data.page_info[i].page, page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1); rx->data.page_info[i].pagecnt_bias - 1);
gve_unassign_qpl(priv, rx->data.qpl->id); gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id);
rx->data.qpl = NULL; rx->data.qpl = NULL;
return err; return err;
...@@ -207,13 +220,23 @@ static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx) ...@@ -207,13 +220,23 @@ static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
ctx->drop_pkt = false; ctx->drop_pkt = false;
} }
static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
gve_rx_add_to_block(priv, idx);
gve_add_napi(priv, ntfy_idx, gve_napi_poll);
}
static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg,
struct gve_rx_ring *rx,
int idx)
{ {
struct gve_rx_ring *rx = &priv->rx[idx];
struct device *hdev = &priv->pdev->dev; struct device *hdev = &priv->pdev->dev;
u32 slots = priv->rx_data_slot_cnt;
int filled_pages; int filled_pages;
size_t bytes; size_t bytes;
u32 slots;
int err; int err;
netif_dbg(priv, drv, priv->dev, "allocating rx ring\n"); netif_dbg(priv, drv, priv->dev, "allocating rx ring\n");
...@@ -223,9 +246,8 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) ...@@ -223,9 +246,8 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
rx->gve = priv; rx->gve = priv;
rx->q_num = idx; rx->q_num = idx;
slots = priv->rx_data_slot_cnt;
rx->mask = slots - 1; rx->mask = slots - 1;
rx->data.raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT; rx->data.raw_addressing = cfg->raw_addressing;
/* alloc rx data ring */ /* alloc rx data ring */
bytes = sizeof(*rx->data.data_ring) * slots; bytes = sizeof(*rx->data.data_ring) * slots;
...@@ -246,7 +268,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) ...@@ -246,7 +268,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
goto abort_with_slots; goto abort_with_slots;
} }
filled_pages = gve_prefill_rx_pages(rx); filled_pages = gve_rx_prefill_pages(rx, cfg);
if (filled_pages < 0) { if (filled_pages < 0) {
err = -ENOMEM; err = -ENOMEM;
goto abort_with_copy_pool; goto abort_with_copy_pool;
...@@ -269,7 +291,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) ...@@ -269,7 +291,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
(unsigned long)rx->data.data_bus); (unsigned long)rx->data.data_bus);
/* alloc rx desc ring */ /* alloc rx desc ring */
bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt; bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus, rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus,
GFP_KERNEL); GFP_KERNEL);
if (!rx->desc.desc_ring) { if (!rx->desc.desc_ring) {
...@@ -277,15 +299,11 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) ...@@ -277,15 +299,11 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
goto abort_with_q_resources; goto abort_with_q_resources;
} }
rx->cnt = 0; rx->cnt = 0;
rx->db_threshold = priv->rx_desc_cnt / 2; rx->db_threshold = slots / 2;
rx->desc.seqno = 1; rx->desc.seqno = 1;
/* Allocating half-page buffers allows page-flipping which is faster
* than copying or allocating new pages.
*/
rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE; rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx); gve_rx_ctx_clear(&rx->ctx);
gve_rx_add_to_block(priv, idx);
return 0; return 0;
...@@ -294,7 +312,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) ...@@ -294,7 +312,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
rx->q_resources, rx->q_resources_bus); rx->q_resources, rx->q_resources_bus);
rx->q_resources = NULL; rx->q_resources = NULL;
abort_filled: abort_filled:
gve_rx_unfill_pages(priv, rx); gve_rx_unfill_pages(priv, rx, cfg);
abort_with_copy_pool: abort_with_copy_pool:
kvfree(rx->qpl_copy_pool); kvfree(rx->qpl_copy_pool);
rx->qpl_copy_pool = NULL; rx->qpl_copy_pool = NULL;
...@@ -306,36 +324,58 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx) ...@@ -306,36 +324,58 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
return err; return err;
} }
int gve_rx_alloc_rings(struct gve_priv *priv) int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg)
{ {
struct gve_rx_ring *rx;
int err = 0; int err = 0;
int i; int i, j;
for (i = 0; i < priv->rx_cfg.num_queues; i++) { if (!cfg->raw_addressing && !cfg->qpls) {
err = gve_rx_alloc_ring(priv, i); netif_err(priv, drv, priv->dev,
"Cannot alloc QPL ring before allocing QPLs\n");
return -EINVAL;
}
rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
return -ENOMEM;
for (i = 0; i < cfg->qcfg->num_queues; i++) {
err = gve_rx_alloc_ring_gqi(priv, cfg, &rx[i], i);
if (err) { if (err) {
netif_err(priv, drv, priv->dev, netif_err(priv, drv, priv->dev,
"Failed to alloc rx ring=%d: err=%d\n", "Failed to alloc rx ring=%d: err=%d\n",
i, err); i, err);
break; goto cleanup;
} }
} }
/* Unallocate if there was an error */
if (err) {
int j;
for (j = 0; j < i; j++) cfg->rx = rx;
gve_rx_free_ring(priv, j); return 0;
}
cleanup:
for (j = 0; j < i; j++)
gve_rx_free_ring_gqi(priv, &rx[j], cfg);
kvfree(rx);
return err; return err;
} }
void gve_rx_free_rings_gqi(struct gve_priv *priv) void gve_rx_free_rings_gqi(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg)
{ {
struct gve_rx_ring *rx = cfg->rx;
int i; int i;
for (i = 0; i < priv->rx_cfg.num_queues; i++) if (!rx)
gve_rx_free_ring(priv, i); return;
for (i = 0; i < cfg->qcfg->num_queues; i++)
gve_rx_free_ring_gqi(priv, &rx[i], cfg);
kvfree(rx);
cfg->rx = NULL;
} }
void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx) void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx)
......
...@@ -199,20 +199,30 @@ static int gve_alloc_page_dqo(struct gve_rx_ring *rx, ...@@ -199,20 +199,30 @@ static int gve_alloc_page_dqo(struct gve_rx_ring *rx,
return 0; return 0;
} }
static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx) void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
if (!gve_rx_was_added_to_block(priv, idx))
return;
gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx);
}
static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
struct gve_rx_alloc_rings_cfg *cfg)
{ {
struct gve_rx_ring *rx = &priv->rx[idx];
struct device *hdev = &priv->pdev->dev; struct device *hdev = &priv->pdev->dev;
size_t completion_queue_slots; size_t completion_queue_slots;
size_t buffer_queue_slots; size_t buffer_queue_slots;
int idx = rx->q_num;
size_t size; size_t size;
int i; int i;
completion_queue_slots = rx->dqo.complq.mask + 1; completion_queue_slots = rx->dqo.complq.mask + 1;
buffer_queue_slots = rx->dqo.bufq.mask + 1; buffer_queue_slots = rx->dqo.bufq.mask + 1;
gve_rx_remove_from_block(priv, idx);
if (rx->q_resources) { if (rx->q_resources) {
dma_free_coherent(hdev, sizeof(*rx->q_resources), dma_free_coherent(hdev, sizeof(*rx->q_resources),
rx->q_resources, rx->q_resources_bus); rx->q_resources, rx->q_resources_bus);
...@@ -226,7 +236,7 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx) ...@@ -226,7 +236,7 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx)
gve_free_page_dqo(priv, bs, !rx->dqo.qpl); gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
} }
if (rx->dqo.qpl) { if (rx->dqo.qpl) {
gve_unassign_qpl(priv, rx->dqo.qpl->id); gve_unassign_qpl(cfg->qpl_cfg, rx->dqo.qpl->id);
rx->dqo.qpl = NULL; rx->dqo.qpl = NULL;
} }
...@@ -251,17 +261,26 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx) ...@@ -251,17 +261,26 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, int idx)
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx); netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
} }
static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx) void gve_rx_start_ring_dqo(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
gve_rx_add_to_block(priv, idx);
gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo);
}
static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg,
struct gve_rx_ring *rx,
int idx)
{ {
struct gve_rx_ring *rx = &priv->rx[idx];
struct device *hdev = &priv->pdev->dev; struct device *hdev = &priv->pdev->dev;
size_t size; size_t size;
int i; int i;
const u32 buffer_queue_slots = const u32 buffer_queue_slots = cfg->raw_addressing ?
priv->queue_format == GVE_DQO_RDA_FORMAT ? priv->options_dqo_rda.rx_buff_ring_entries : cfg->ring_size;
priv->options_dqo_rda.rx_buff_ring_entries : priv->rx_desc_cnt; const u32 completion_queue_slots = cfg->ring_size;
const u32 completion_queue_slots = priv->rx_desc_cnt;
netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n"); netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n");
...@@ -274,7 +293,7 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx) ...@@ -274,7 +293,7 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
rx->ctx.skb_head = NULL; rx->ctx.skb_head = NULL;
rx->ctx.skb_tail = NULL; rx->ctx.skb_tail = NULL;
rx->dqo.num_buf_states = priv->queue_format == GVE_DQO_RDA_FORMAT ? rx->dqo.num_buf_states = cfg->raw_addressing ?
min_t(s16, S16_MAX, buffer_queue_slots * 4) : min_t(s16, S16_MAX, buffer_queue_slots * 4) :
priv->rx_pages_per_qpl; priv->rx_pages_per_qpl;
rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states, rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
...@@ -308,8 +327,8 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx) ...@@ -308,8 +327,8 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
if (!rx->dqo.bufq.desc_ring) if (!rx->dqo.bufq.desc_ring)
goto err; goto err;
if (priv->queue_format != GVE_DQO_RDA_FORMAT) { if (!cfg->raw_addressing) {
rx->dqo.qpl = gve_assign_rx_qpl(priv, rx->q_num); rx->dqo.qpl = gve_assign_rx_qpl(cfg, rx->q_num);
if (!rx->dqo.qpl) if (!rx->dqo.qpl)
goto err; goto err;
rx->dqo.next_qpl_page_idx = 0; rx->dqo.next_qpl_page_idx = 0;
...@@ -320,12 +339,10 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx) ...@@ -320,12 +339,10 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
if (!rx->q_resources) if (!rx->q_resources)
goto err; goto err;
gve_rx_add_to_block(priv, idx);
return 0; return 0;
err: err:
gve_rx_free_ring_dqo(priv, idx); gve_rx_free_ring_dqo(priv, rx, cfg);
return -ENOMEM; return -ENOMEM;
} }
...@@ -337,13 +354,26 @@ void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx) ...@@ -337,13 +354,26 @@ void gve_rx_write_doorbell_dqo(const struct gve_priv *priv, int queue_idx)
iowrite32(rx->dqo.bufq.tail, &priv->db_bar2[index]); iowrite32(rx->dqo.bufq.tail, &priv->db_bar2[index]);
} }
int gve_rx_alloc_rings_dqo(struct gve_priv *priv) int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg)
{ {
int err = 0; struct gve_rx_ring *rx;
int err;
int i; int i;
for (i = 0; i < priv->rx_cfg.num_queues; i++) { if (!cfg->raw_addressing && !cfg->qpls) {
err = gve_rx_alloc_ring_dqo(priv, i); netif_err(priv, drv, priv->dev,
"Cannot alloc QPL ring before allocing QPLs\n");
return -EINVAL;
}
rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
return -ENOMEM;
for (i = 0; i < cfg->qcfg->num_queues; i++) {
err = gve_rx_alloc_ring_dqo(priv, cfg, &rx[i], i);
if (err) { if (err) {
netif_err(priv, drv, priv->dev, netif_err(priv, drv, priv->dev,
"Failed to alloc rx ring=%d: err=%d\n", "Failed to alloc rx ring=%d: err=%d\n",
...@@ -352,21 +382,30 @@ int gve_rx_alloc_rings_dqo(struct gve_priv *priv) ...@@ -352,21 +382,30 @@ int gve_rx_alloc_rings_dqo(struct gve_priv *priv)
} }
} }
cfg->rx = rx;
return 0; return 0;
err: err:
for (i--; i >= 0; i--) for (i--; i >= 0; i--)
gve_rx_free_ring_dqo(priv, i); gve_rx_free_ring_dqo(priv, &rx[i], cfg);
kvfree(rx);
return err; return err;
} }
void gve_rx_free_rings_dqo(struct gve_priv *priv) void gve_rx_free_rings_dqo(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg)
{ {
struct gve_rx_ring *rx = cfg->rx;
int i; int i;
for (i = 0; i < priv->rx_cfg.num_queues; i++) if (!rx)
gve_rx_free_ring_dqo(priv, i); return;
for (i = 0; i < cfg->qcfg->num_queues; i++)
gve_rx_free_ring_dqo(priv, &rx[i], cfg);
kvfree(rx);
cfg->rx = NULL;
} }
void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx) void gve_rx_post_buffers_dqo(struct gve_rx_ring *rx)
......
...@@ -196,29 +196,36 @@ static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx, ...@@ -196,29 +196,36 @@ static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx,
static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx, static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
u32 to_do, bool try_to_wake); u32 to_do, bool try_to_wake);
static void gve_tx_free_ring(struct gve_priv *priv, int idx) void gve_tx_stop_ring_gqi(struct gve_priv *priv, int idx)
{ {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_tx_ring *tx = &priv->tx[idx]; struct gve_tx_ring *tx = &priv->tx[idx];
if (!gve_tx_was_added_to_block(priv, idx))
return;
gve_remove_napi(priv, ntfy_idx);
gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
netdev_tx_reset_queue(tx->netdev_txq);
gve_tx_remove_from_block(priv, idx);
}
static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
struct gve_tx_alloc_rings_cfg *cfg)
{
struct device *hdev = &priv->pdev->dev; struct device *hdev = &priv->pdev->dev;
int idx = tx->q_num;
size_t bytes; size_t bytes;
u32 slots; u32 slots;
gve_tx_remove_from_block(priv, idx);
slots = tx->mask + 1; slots = tx->mask + 1;
if (tx->q_num < priv->tx_cfg.num_queues) {
gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
netdev_tx_reset_queue(tx->netdev_txq);
} else {
gve_clean_xdp_done(priv, tx, priv->tx_desc_cnt);
}
dma_free_coherent(hdev, sizeof(*tx->q_resources), dma_free_coherent(hdev, sizeof(*tx->q_resources),
tx->q_resources, tx->q_resources_bus); tx->q_resources, tx->q_resources_bus);
tx->q_resources = NULL; tx->q_resources = NULL;
if (!tx->raw_addressing) { if (!tx->raw_addressing) {
gve_tx_fifo_release(priv, &tx->tx_fifo); gve_tx_fifo_release(priv, &tx->tx_fifo);
gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id);
tx->tx_fifo.qpl = NULL; tx->tx_fifo.qpl = NULL;
} }
...@@ -232,11 +239,23 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx) ...@@ -232,11 +239,23 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx)
netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx); netif_dbg(priv, drv, priv->dev, "freed tx queue %d\n", idx);
} }
static int gve_tx_alloc_ring(struct gve_priv *priv, int idx) void gve_tx_start_ring_gqi(struct gve_priv *priv, int idx)
{ {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_tx_ring *tx = &priv->tx[idx]; struct gve_tx_ring *tx = &priv->tx[idx];
gve_tx_add_to_block(priv, idx);
tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
gve_add_napi(priv, ntfy_idx, gve_napi_poll);
}
static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg,
struct gve_tx_ring *tx,
int idx)
{
struct device *hdev = &priv->pdev->dev; struct device *hdev = &priv->pdev->dev;
u32 slots = priv->tx_desc_cnt;
size_t bytes; size_t bytes;
/* Make sure everything is zeroed to start */ /* Make sure everything is zeroed to start */
...@@ -245,23 +264,23 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx) ...@@ -245,23 +264,23 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
spin_lock_init(&tx->xdp_lock); spin_lock_init(&tx->xdp_lock);
tx->q_num = idx; tx->q_num = idx;
tx->mask = slots - 1; tx->mask = cfg->ring_size - 1;
/* alloc metadata */ /* alloc metadata */
tx->info = vcalloc(slots, sizeof(*tx->info)); tx->info = vcalloc(cfg->ring_size, sizeof(*tx->info));
if (!tx->info) if (!tx->info)
return -ENOMEM; return -ENOMEM;
/* alloc tx queue */ /* alloc tx queue */
bytes = sizeof(*tx->desc) * slots; bytes = sizeof(*tx->desc) * cfg->ring_size;
tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL); tx->desc = dma_alloc_coherent(hdev, bytes, &tx->bus, GFP_KERNEL);
if (!tx->desc) if (!tx->desc)
goto abort_with_info; goto abort_with_info;
tx->raw_addressing = priv->queue_format == GVE_GQI_RDA_FORMAT; tx->raw_addressing = cfg->raw_addressing;
tx->dev = &priv->pdev->dev; tx->dev = hdev;
if (!tx->raw_addressing) { if (!tx->raw_addressing) {
tx->tx_fifo.qpl = gve_assign_tx_qpl(priv, idx); tx->tx_fifo.qpl = gve_assign_tx_qpl(cfg, idx);
if (!tx->tx_fifo.qpl) if (!tx->tx_fifo.qpl)
goto abort_with_desc; goto abort_with_desc;
/* map Tx FIFO */ /* map Tx FIFO */
...@@ -277,12 +296,6 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx) ...@@ -277,12 +296,6 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
if (!tx->q_resources) if (!tx->q_resources)
goto abort_with_fifo; goto abort_with_fifo;
netif_dbg(priv, drv, priv->dev, "tx[%d]->bus=%lx\n", idx,
(unsigned long)tx->bus);
if (idx < priv->tx_cfg.num_queues)
tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
gve_tx_add_to_block(priv, idx);
return 0; return 0;
abort_with_fifo: abort_with_fifo:
...@@ -290,7 +303,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx) ...@@ -290,7 +303,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
gve_tx_fifo_release(priv, &tx->tx_fifo); gve_tx_fifo_release(priv, &tx->tx_fifo);
abort_with_qpl: abort_with_qpl:
if (!tx->raw_addressing) if (!tx->raw_addressing)
gve_unassign_qpl(priv, tx->tx_fifo.qpl->id); gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id);
abort_with_desc: abort_with_desc:
dma_free_coherent(hdev, bytes, tx->desc, tx->bus); dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
tx->desc = NULL; tx->desc = NULL;
...@@ -300,36 +313,73 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx) ...@@ -300,36 +313,73 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
return -ENOMEM; return -ENOMEM;
} }
int gve_tx_alloc_rings(struct gve_priv *priv, int start_id, int num_rings) int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{ {
struct gve_tx_ring *tx = cfg->tx;
int err = 0; int err = 0;
int i; int i, j;
if (!cfg->raw_addressing && !cfg->qpls) {
netif_err(priv, drv, priv->dev,
"Cannot alloc QPL ring before allocing QPLs\n");
return -EINVAL;
}
if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
return -EINVAL;
}
if (cfg->start_idx == 0) {
tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
GFP_KERNEL);
if (!tx)
return -ENOMEM;
} else if (!tx) {
netif_err(priv, drv, priv->dev,
"Cannot alloc tx rings from a nonzero start idx without tx array\n");
return -EINVAL;
}
for (i = start_id; i < start_id + num_rings; i++) { for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
err = gve_tx_alloc_ring(priv, i); err = gve_tx_alloc_ring_gqi(priv, cfg, &tx[i], i);
if (err) { if (err) {
netif_err(priv, drv, priv->dev, netif_err(priv, drv, priv->dev,
"Failed to alloc tx ring=%d: err=%d\n", "Failed to alloc tx ring=%d: err=%d\n",
i, err); i, err);
break; goto cleanup;
} }
} }
/* Unallocate if there was an error */
if (err) {
int j;
for (j = start_id; j < i; j++) cfg->tx = tx;
gve_tx_free_ring(priv, j); return 0;
}
cleanup:
for (j = 0; j < i; j++)
gve_tx_free_ring_gqi(priv, &tx[j], cfg);
if (cfg->start_idx == 0)
kvfree(tx);
return err; return err;
} }
void gve_tx_free_rings_gqi(struct gve_priv *priv, int start_id, int num_rings) void gve_tx_free_rings_gqi(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{ {
struct gve_tx_ring *tx = cfg->tx;
int i; int i;
for (i = start_id; i < start_id + num_rings; i++) if (!tx)
gve_tx_free_ring(priv, i); return;
for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
gve_tx_free_ring_gqi(priv, &tx[i], cfg);
if (cfg->start_idx == 0) {
kvfree(tx);
cfg->tx = NULL;
}
} }
/* gve_tx_avail - Calculates the number of slots available in the ring /* gve_tx_avail - Calculates the number of slots available in the ring
......
...@@ -188,13 +188,27 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx) ...@@ -188,13 +188,27 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
} }
} }
static void gve_tx_free_ring_dqo(struct gve_priv *priv, int idx) void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx)
{ {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_tx_ring *tx = &priv->tx[idx]; struct gve_tx_ring *tx = &priv->tx[idx];
struct device *hdev = &priv->pdev->dev;
size_t bytes;
if (!gve_tx_was_added_to_block(priv, idx))
return;
gve_remove_napi(priv, ntfy_idx);
gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL);
netdev_tx_reset_queue(tx->netdev_txq);
gve_tx_clean_pending_packets(tx);
gve_tx_remove_from_block(priv, idx); gve_tx_remove_from_block(priv, idx);
}
static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
struct gve_tx_alloc_rings_cfg *cfg)
{
struct device *hdev = &priv->pdev->dev;
int idx = tx->q_num;
size_t bytes;
if (tx->q_resources) { if (tx->q_resources) {
dma_free_coherent(hdev, sizeof(*tx->q_resources), dma_free_coherent(hdev, sizeof(*tx->q_resources),
...@@ -223,7 +237,7 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, int idx) ...@@ -223,7 +237,7 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, int idx)
tx->dqo.tx_qpl_buf_next = NULL; tx->dqo.tx_qpl_buf_next = NULL;
if (tx->dqo.qpl) { if (tx->dqo.qpl) {
gve_unassign_qpl(priv, tx->dqo.qpl->id); gve_unassign_qpl(cfg->qpl_cfg, tx->dqo.qpl->id);
tx->dqo.qpl = NULL; tx->dqo.qpl = NULL;
} }
...@@ -253,9 +267,22 @@ static int gve_tx_qpl_buf_init(struct gve_tx_ring *tx) ...@@ -253,9 +267,22 @@ static int gve_tx_qpl_buf_init(struct gve_tx_ring *tx)
return 0; return 0;
} }
static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx) void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx)
{ {
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_tx_ring *tx = &priv->tx[idx]; struct gve_tx_ring *tx = &priv->tx[idx];
gve_tx_add_to_block(priv, idx);
tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo);
}
static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg,
struct gve_tx_ring *tx,
int idx)
{
struct device *hdev = &priv->pdev->dev; struct device *hdev = &priv->pdev->dev;
int num_pending_packets; int num_pending_packets;
size_t bytes; size_t bytes;
...@@ -263,12 +290,11 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx) ...@@ -263,12 +290,11 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
memset(tx, 0, sizeof(*tx)); memset(tx, 0, sizeof(*tx));
tx->q_num = idx; tx->q_num = idx;
tx->dev = &priv->pdev->dev; tx->dev = hdev;
tx->netdev_txq = netdev_get_tx_queue(priv->dev, idx);
atomic_set_release(&tx->dqo_compl.hw_tx_head, 0); atomic_set_release(&tx->dqo_compl.hw_tx_head, 0);
/* Queue sizes must be a power of 2 */ /* Queue sizes must be a power of 2 */
tx->mask = priv->tx_desc_cnt - 1; tx->mask = cfg->ring_size - 1;
tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ? tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ?
priv->options_dqo_rda.tx_comp_ring_entries - 1 : priv->options_dqo_rda.tx_comp_ring_entries - 1 :
tx->mask; tx->mask;
...@@ -327,8 +353,8 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx) ...@@ -327,8 +353,8 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
if (!tx->q_resources) if (!tx->q_resources)
goto err; goto err;
if (gve_is_qpl(priv)) { if (!cfg->raw_addressing) {
tx->dqo.qpl = gve_assign_tx_qpl(priv, idx); tx->dqo.qpl = gve_assign_tx_qpl(cfg, idx);
if (!tx->dqo.qpl) if (!tx->dqo.qpl)
goto err; goto err;
...@@ -336,22 +362,45 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx) ...@@ -336,22 +362,45 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int idx)
goto err; goto err;
} }
gve_tx_add_to_block(priv, idx);
return 0; return 0;
err: err:
gve_tx_free_ring_dqo(priv, idx); gve_tx_free_ring_dqo(priv, tx, cfg);
return -ENOMEM; return -ENOMEM;
} }
int gve_tx_alloc_rings_dqo(struct gve_priv *priv) int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{ {
struct gve_tx_ring *tx = cfg->tx;
int err = 0; int err = 0;
int i; int i, j;
for (i = 0; i < priv->tx_cfg.num_queues; i++) { if (!cfg->raw_addressing && !cfg->qpls) {
err = gve_tx_alloc_ring_dqo(priv, i); netif_err(priv, drv, priv->dev,
"Cannot alloc QPL ring before allocing QPLs\n");
return -EINVAL;
}
if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
return -EINVAL;
}
if (cfg->start_idx == 0) {
tx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_tx_ring),
GFP_KERNEL);
if (!tx)
return -ENOMEM;
} else if (!tx) {
netif_err(priv, drv, priv->dev,
"Cannot alloc tx rings from a nonzero start idx without tx array\n");
return -EINVAL;
}
for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++) {
err = gve_tx_alloc_ring_dqo(priv, cfg, &tx[i], i);
if (err) { if (err) {
netif_err(priv, drv, priv->dev, netif_err(priv, drv, priv->dev,
"Failed to alloc tx ring=%d: err=%d\n", "Failed to alloc tx ring=%d: err=%d\n",
...@@ -360,27 +409,32 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv) ...@@ -360,27 +409,32 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv)
} }
} }
cfg->tx = tx;
return 0; return 0;
err: err:
for (i--; i >= 0; i--) for (j = 0; j < i; j++)
gve_tx_free_ring_dqo(priv, i); gve_tx_free_ring_dqo(priv, &tx[j], cfg);
if (cfg->start_idx == 0)
kvfree(tx);
return err; return err;
} }
void gve_tx_free_rings_dqo(struct gve_priv *priv) void gve_tx_free_rings_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg)
{ {
struct gve_tx_ring *tx = cfg->tx;
int i; int i;
for (i = 0; i < priv->tx_cfg.num_queues; i++) { if (!tx)
struct gve_tx_ring *tx = &priv->tx[i]; return;
gve_clean_tx_done_dqo(priv, tx, /*napi=*/NULL); for (i = cfg->start_idx; i < cfg->start_idx + cfg->num_rings; i++)
netdev_tx_reset_queue(tx->netdev_txq); gve_tx_free_ring_dqo(priv, &tx[i], cfg);
gve_tx_clean_pending_packets(tx);
gve_tx_free_ring_dqo(priv, i); if (cfg->start_idx == 0) {
kvfree(tx);
cfg->tx = NULL;
} }
} }
......
...@@ -8,6 +8,14 @@ ...@@ -8,6 +8,14 @@
#include "gve_adminq.h" #include "gve_adminq.h"
#include "gve_utils.h" #include "gve_utils.h"
bool gve_tx_was_added_to_block(struct gve_priv *priv, int queue_idx)
{
struct gve_notify_block *block =
&priv->ntfy_blocks[gve_tx_idx_to_ntfy(priv, queue_idx)];
return block->tx != NULL;
}
void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx) void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
{ {
struct gve_notify_block *block = struct gve_notify_block *block =
...@@ -30,6 +38,14 @@ void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx) ...@@ -30,6 +38,14 @@ void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
queue_idx); queue_idx);
} }
bool gve_rx_was_added_to_block(struct gve_priv *priv, int queue_idx)
{
struct gve_notify_block *block =
&priv->ntfy_blocks[gve_rx_idx_to_ntfy(priv, queue_idx)];
return block->rx != NULL;
}
void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx) void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
{ {
struct gve_notify_block *block = struct gve_notify_block *block =
......
...@@ -11,9 +11,11 @@ ...@@ -11,9 +11,11 @@
#include "gve.h" #include "gve.h"
bool gve_tx_was_added_to_block(struct gve_priv *priv, int queue_idx);
void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx); void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx);
void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx); void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx);
bool gve_rx_was_added_to_block(struct gve_priv *priv, int queue_idx);
void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx); void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx);
void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx); void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment