Commit 8f8cea8f authored by Amit Cohen's avatar Amit Cohen Committed by Jakub Kicinski

mlxsw: pci: Store number of scatter/gather entries for maximum packet size

A previous patch-set used page pool for Rx buffers allocations. To
simplify the change, we first used page pool for one allocation per
packet - one continuous buffer is allocated for each packet. This can be
improved by using fragmented buffers, then memory consumption will be
significantly reduced.

WQE (Work Queue Element) includes up to 3 scatter/gather entries for
data. As preparation for fragmented buffer usage, calculate number of
scatter/gather entries which are required for packet according to
maximum MTU and store it for future use. For now use PAGE_SIZE for each
entry, which means that maximum buffer size is 3 * PAGE_SIZE. This is
enough for the maximum MTU which is supported in the driver now (10K).
Warn in an unlikely case of maximum MTU which requires more than 3 pages,
for now this warn should not happen with standard page size (>=4K) and
maximum MTU (10K).
Signed-off-by: default avatarAmit Cohen <amcohen@nvidia.com>
Reviewed-by: default avatarIdo Schimmel <idosch@nvidia.com>
Signed-off-by: default avatarPetr Machata <petrm@nvidia.com>
Link: https://patch.msgid.link/98c3e3adb7e727e571ac538faf67cef262cec4fc.1719321422.git.petrm@nvidia.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent a6a6a980
......@@ -111,6 +111,7 @@ struct mlxsw_pci {
bool cff_support;
enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode;
enum mlxsw_cmd_mbox_config_profile_flood_mode flood_mode;
u8 num_sg_entries; /* Number of scatter/gather entries for packets. */
struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
u32 doorbell_offset;
struct mlxsw_core *core;
......@@ -427,6 +428,12 @@ static void mlxsw_pci_rdq_page_free(struct mlxsw_pci_queue *q,
page_pool_put_page(cq->u.cq.page_pool, elem_info->page, -1, false);
}
static u8 mlxsw_pci_num_sg_entries_get(u16 byte_count)
{
return DIV_ROUND_UP(byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD,
PAGE_SIZE);
}
static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
......@@ -1774,6 +1781,17 @@ static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
pci_free_irq_vectors(mlxsw_pci->pdev);
}
static void mlxsw_pci_num_sg_entries_set(struct mlxsw_pci *mlxsw_pci)
{
u8 num_sg_entries;
num_sg_entries = mlxsw_pci_num_sg_entries_get(MLXSW_PORT_MAX_MTU);
mlxsw_pci->num_sg_entries = min(num_sg_entries,
MLXSW_PCI_WQE_SG_ENTRIES);
WARN_ON(num_sg_entries > MLXSW_PCI_WQE_SG_ENTRIES);
}
static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
struct mlxsw_res *res)
......@@ -1896,6 +1914,8 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_requery_resources;
mlxsw_pci_num_sg_entries_set(mlxsw_pci);
err = mlxsw_pci_napi_devs_init(mlxsw_pci);
if (err)
goto err_napi_devs_init;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment