Commit 1f6228e4 authored by Bailey Forrest's avatar Bailey Forrest Committed by David S. Miller

gve: Update adminq commands to support DQO queues

DQO queue creation requires additional parameters:
- TX completion/RX buffer queue size
- TX completion/RX buffer queue address
- TX/RX queue size
- RX buffer size
Signed-off-by: default avatarBailey Forrest <bcf@google.com>
Reviewed-by: default avatarWillem de Bruijn <willemb@google.com>
Reviewed-by: default avatarCatherine Sullivan <csully@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a4aa1f1e
...@@ -548,6 +548,9 @@ struct gve_priv { ...@@ -548,6 +548,9 @@ struct gve_priv {
struct gve_options_dqo_rda options_dqo_rda; struct gve_options_dqo_rda options_dqo_rda;
struct gve_ptype_lut *ptype_lut_dqo; struct gve_ptype_lut *ptype_lut_dqo;
/* Must be a power of two. */
int data_buffer_size_dqo;
enum gve_queue_format queue_format; enum gve_queue_format queue_format;
}; };
......
...@@ -443,6 +443,7 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv, ...@@ -443,6 +443,7 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
.irq_db_stride = cpu_to_be32(sizeof(priv->ntfy_blocks[0])), .irq_db_stride = cpu_to_be32(sizeof(priv->ntfy_blocks[0])),
.ntfy_blk_msix_base_idx = .ntfy_blk_msix_base_idx =
cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX), cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX),
.queue_format = priv->queue_format,
}; };
return gve_adminq_execute_cmd(priv, &cmd); return gve_adminq_execute_cmd(priv, &cmd);
...@@ -462,28 +463,32 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) ...@@ -462,28 +463,32 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
{ {
struct gve_tx_ring *tx = &priv->tx[queue_index]; struct gve_tx_ring *tx = &priv->tx[queue_index];
union gve_adminq_command cmd; union gve_adminq_command cmd;
u32 qpl_id;
int err;
qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id;
memset(&cmd, 0, sizeof(cmd)); memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE); cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE);
cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) { cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
.queue_id = cpu_to_be32(queue_index), .queue_id = cpu_to_be32(queue_index),
.reserved = 0,
.queue_resources_addr = .queue_resources_addr =
cpu_to_be64(tx->q_resources_bus), cpu_to_be64(tx->q_resources_bus),
.tx_ring_addr = cpu_to_be64(tx->bus), .tx_ring_addr = cpu_to_be64(tx->bus),
.queue_page_list_id = cpu_to_be32(qpl_id),
.ntfy_id = cpu_to_be32(tx->ntfy_id), .ntfy_id = cpu_to_be32(tx->ntfy_id),
}; };
err = gve_adminq_issue_cmd(priv, &cmd); if (gve_is_gqi(priv)) {
if (err) u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
return err; GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id;
return 0; cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
} else {
cmd.create_tx_queue.tx_ring_size =
cpu_to_be16(priv->tx_desc_cnt);
cmd.create_tx_queue.tx_comp_ring_addr =
cpu_to_be64(tx->complq_bus_dqo);
cmd.create_tx_queue.tx_comp_ring_size =
cpu_to_be16(priv->options_dqo_rda.tx_comp_ring_entries);
}
return gve_adminq_issue_cmd(priv, &cmd);
} }
int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues) int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues)
...@@ -504,29 +509,41 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) ...@@ -504,29 +509,41 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
{ {
struct gve_rx_ring *rx = &priv->rx[queue_index]; struct gve_rx_ring *rx = &priv->rx[queue_index];
union gve_adminq_command cmd; union gve_adminq_command cmd;
u32 qpl_id;
int err;
qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
memset(&cmd, 0, sizeof(cmd)); memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE); cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) { cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
.queue_id = cpu_to_be32(queue_index), .queue_id = cpu_to_be32(queue_index),
.index = cpu_to_be32(queue_index),
.reserved = 0,
.ntfy_id = cpu_to_be32(rx->ntfy_id), .ntfy_id = cpu_to_be32(rx->ntfy_id),
.queue_resources_addr = cpu_to_be64(rx->q_resources_bus), .queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
.rx_desc_ring_addr = cpu_to_be64(rx->desc.bus),
.rx_data_ring_addr = cpu_to_be64(rx->data.data_bus),
.queue_page_list_id = cpu_to_be32(qpl_id),
}; };
err = gve_adminq_issue_cmd(priv, &cmd); if (gve_is_gqi(priv)) {
if (err) u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
return err; GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
cmd.create_rx_queue.rx_desc_ring_addr =
cpu_to_be64(rx->desc.bus),
cmd.create_rx_queue.rx_data_ring_addr =
cpu_to_be64(rx->data.data_bus),
cmd.create_rx_queue.index = cpu_to_be32(queue_index);
cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
} else {
cmd.create_rx_queue.rx_ring_size =
cpu_to_be16(priv->rx_desc_cnt);
cmd.create_rx_queue.rx_desc_ring_addr =
cpu_to_be64(rx->dqo.complq.bus);
cmd.create_rx_queue.rx_data_ring_addr =
cpu_to_be64(rx->dqo.bufq.bus);
cmd.create_rx_queue.packet_buffer_size =
cpu_to_be16(priv->data_buffer_size_dqo);
cmd.create_rx_queue.rx_buff_ring_size =
cpu_to_be16(priv->options_dqo_rda.rx_buff_ring_entries);
cmd.create_rx_queue.enable_rsc =
!!(priv->dev->features & NETIF_F_LRO);
}
return 0; return gve_adminq_issue_cmd(priv, &cmd);
} }
int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues) int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
......
...@@ -139,9 +139,11 @@ struct gve_adminq_configure_device_resources { ...@@ -139,9 +139,11 @@ struct gve_adminq_configure_device_resources {
__be32 num_irq_dbs; __be32 num_irq_dbs;
__be32 irq_db_stride; __be32 irq_db_stride;
__be32 ntfy_blk_msix_base_idx; __be32 ntfy_blk_msix_base_idx;
u8 queue_format;
u8 padding[7];
}; };
static_assert(sizeof(struct gve_adminq_configure_device_resources) == 32); static_assert(sizeof(struct gve_adminq_configure_device_resources) == 40);
struct gve_adminq_register_page_list { struct gve_adminq_register_page_list {
__be32 page_list_id; __be32 page_list_id;
...@@ -166,9 +168,13 @@ struct gve_adminq_create_tx_queue { ...@@ -166,9 +168,13 @@ struct gve_adminq_create_tx_queue {
__be64 tx_ring_addr; __be64 tx_ring_addr;
__be32 queue_page_list_id; __be32 queue_page_list_id;
__be32 ntfy_id; __be32 ntfy_id;
__be64 tx_comp_ring_addr;
__be16 tx_ring_size;
__be16 tx_comp_ring_size;
u8 padding[4];
}; };
static_assert(sizeof(struct gve_adminq_create_tx_queue) == 32); static_assert(sizeof(struct gve_adminq_create_tx_queue) == 48);
struct gve_adminq_create_rx_queue { struct gve_adminq_create_rx_queue {
__be32 queue_id; __be32 queue_id;
...@@ -179,10 +185,14 @@ struct gve_adminq_create_rx_queue { ...@@ -179,10 +185,14 @@ struct gve_adminq_create_rx_queue {
__be64 rx_desc_ring_addr; __be64 rx_desc_ring_addr;
__be64 rx_data_ring_addr; __be64 rx_data_ring_addr;
__be32 queue_page_list_id; __be32 queue_page_list_id;
u8 padding[4]; __be16 rx_ring_size;
__be16 packet_buffer_size;
__be16 rx_buff_ring_size;
u8 enable_rsc;
u8 padding[5];
}; };
static_assert(sizeof(struct gve_adminq_create_rx_queue) == 48); static_assert(sizeof(struct gve_adminq_create_rx_queue) == 56);
/* Queue resources that are shared with the device */ /* Queue resources that are shared with the device */
struct gve_queue_resources { struct gve_queue_resources {
......
// SPDX-License-Identifier: (GPL-2.0 OR MIT) // SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Google virtual Ethernet (gve) driver /* Google virtual Ethernet (gve) driver
* *
* Copyright (C) 2015-2019 Google, Inc. * Copyright (C) 2015-2021 Google, Inc.
*/ */
#include <linux/ethtool.h> #include <linux/ethtool.h>
...@@ -453,11 +453,16 @@ static int gve_set_tunable(struct net_device *netdev, ...@@ -453,11 +453,16 @@ static int gve_set_tunable(struct net_device *netdev,
switch (etuna->id) { switch (etuna->id) {
case ETHTOOL_RX_COPYBREAK: case ETHTOOL_RX_COPYBREAK:
{
u32 max_copybreak = gve_is_gqi(priv) ?
(PAGE_SIZE / 2) : priv->data_buffer_size_dqo;
len = *(u32 *)value; len = *(u32 *)value;
if (len > PAGE_SIZE / 2) if (len > max_copybreak)
return -EINVAL; return -EINVAL;
priv->rx_copybreak = len; priv->rx_copybreak = len;
return 0; return 0;
}
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment