Commit 382d1741 authored by Haiyang Zhang's avatar Haiyang Zhang Committed by Jakub Kicinski

net: mana: Add support for page sizes other than 4KB on ARM64

As defined by the MANA Hardware spec, the queue size for DMA is 4KB
minimal, and power of 2. And, the HWC queue size has to be exactly
4KB.

To support page sizes other than 4KB on ARM64, define the minimal
queue size as a macro separately from the PAGE_SIZE, which we always
assumed it to be 4KB before supporting ARM64.

Also, add MANA specific macros and update code related to size
alignment, DMA region calculations, etc.
Signed-off-by: default avatarHaiyang Zhang <haiyangz@microsoft.com>
Reviewed-by: default avatarMichael Kelley <mhklinux@outlook.com>
Link: https://lore.kernel.org/r/1718655446-6576-1-git-send-email-haiyangz@microsoft.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 2c6a4b96
...@@ -18,7 +18,7 @@ if NET_VENDOR_MICROSOFT ...@@ -18,7 +18,7 @@ if NET_VENDOR_MICROSOFT
config MICROSOFT_MANA config MICROSOFT_MANA
tristate "Microsoft Azure Network Adapter (MANA) support" tristate "Microsoft Azure Network Adapter (MANA) support"
depends on PCI_MSI depends on PCI_MSI
depends on X86_64 || (ARM64 && !CPU_BIG_ENDIAN && ARM64_4K_PAGES) depends on X86_64 || (ARM64 && !CPU_BIG_ENDIAN)
depends on PCI_HYPERV depends on PCI_HYPERV
select AUXILIARY_BUS select AUXILIARY_BUS
select PAGE_POOL select PAGE_POOL
......
...@@ -182,7 +182,7 @@ int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, ...@@ -182,7 +182,7 @@ int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
dma_addr_t dma_handle; dma_addr_t dma_handle;
void *buf; void *buf;
if (length < PAGE_SIZE || !is_power_of_2(length)) if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
return -EINVAL; return -EINVAL;
gmi->dev = gc->dev; gmi->dev = gc->dev;
...@@ -717,7 +717,7 @@ EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA); ...@@ -717,7 +717,7 @@ EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA);
static int mana_gd_create_dma_region(struct gdma_dev *gd, static int mana_gd_create_dma_region(struct gdma_dev *gd,
struct gdma_mem_info *gmi) struct gdma_mem_info *gmi)
{ {
unsigned int num_page = gmi->length / PAGE_SIZE; unsigned int num_page = gmi->length / MANA_PAGE_SIZE;
struct gdma_create_dma_region_req *req = NULL; struct gdma_create_dma_region_req *req = NULL;
struct gdma_create_dma_region_resp resp = {}; struct gdma_create_dma_region_resp resp = {};
struct gdma_context *gc = gd->gdma_context; struct gdma_context *gc = gd->gdma_context;
...@@ -727,10 +727,10 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd, ...@@ -727,10 +727,10 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
int err; int err;
int i; int i;
if (length < PAGE_SIZE || !is_power_of_2(length)) if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
return -EINVAL; return -EINVAL;
if (offset_in_page(gmi->virt_addr) != 0) if (!MANA_PAGE_ALIGNED(gmi->virt_addr))
return -EINVAL; return -EINVAL;
hwc = gc->hwc.driver_data; hwc = gc->hwc.driver_data;
...@@ -751,7 +751,7 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd, ...@@ -751,7 +751,7 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
req->page_addr_list_len = num_page; req->page_addr_list_len = num_page;
for (i = 0; i < num_page; i++) for (i = 0; i < num_page; i++)
req->page_addr_list[i] = gmi->dma_handle + i * PAGE_SIZE; req->page_addr_list[i] = gmi->dma_handle + i * MANA_PAGE_SIZE;
err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp); err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp);
if (err) if (err)
......
...@@ -362,12 +362,12 @@ static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth, ...@@ -362,12 +362,12 @@ static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
int err; int err;
eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth); eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE) if (eq_size < MANA_MIN_QSIZE)
eq_size = MINIMUM_SUPPORTED_PAGE_SIZE; eq_size = MANA_MIN_QSIZE;
cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth); cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE) if (cq_size < MANA_MIN_QSIZE)
cq_size = MINIMUM_SUPPORTED_PAGE_SIZE; cq_size = MANA_MIN_QSIZE;
hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL); hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
if (!hwc_cq) if (!hwc_cq)
...@@ -429,7 +429,7 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth, ...@@ -429,7 +429,7 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
dma_buf->num_reqs = q_depth; dma_buf->num_reqs = q_depth;
buf_size = PAGE_ALIGN(q_depth * max_msg_size); buf_size = MANA_PAGE_ALIGN(q_depth * max_msg_size);
gmi = &dma_buf->mem_info; gmi = &dma_buf->mem_info;
err = mana_gd_alloc_memory(gc, buf_size, gmi); err = mana_gd_alloc_memory(gc, buf_size, gmi);
...@@ -497,8 +497,8 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc, ...@@ -497,8 +497,8 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
else else
queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth); queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE) if (queue_size < MANA_MIN_QSIZE)
queue_size = MINIMUM_SUPPORTED_PAGE_SIZE; queue_size = MANA_MIN_QSIZE;
hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL); hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
if (!hwc_wq) if (!hwc_wq)
......
...@@ -1904,10 +1904,10 @@ static int mana_create_txq(struct mana_port_context *apc, ...@@ -1904,10 +1904,10 @@ static int mana_create_txq(struct mana_port_context *apc,
* to prevent overflow. * to prevent overflow.
*/ */
txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32; txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
BUILD_BUG_ON(!PAGE_ALIGNED(txq_size)); BUILD_BUG_ON(!MANA_PAGE_ALIGNED(txq_size));
cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE; cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
cq_size = PAGE_ALIGN(cq_size); cq_size = MANA_PAGE_ALIGN(cq_size);
gc = gd->gdma_context; gc = gd->gdma_context;
...@@ -2204,8 +2204,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, ...@@ -2204,8 +2204,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
if (err) if (err)
goto out; goto out;
rq_size = PAGE_ALIGN(rq_size); rq_size = MANA_PAGE_ALIGN(rq_size);
cq_size = PAGE_ALIGN(cq_size); cq_size = MANA_PAGE_ALIGN(cq_size);
/* Create RQ */ /* Create RQ */
memset(&spec, 0, sizeof(spec)); memset(&spec, 0, sizeof(spec));
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <net/mana/gdma.h>
#include <net/mana/shm_channel.h> #include <net/mana/shm_channel.h>
#define PAGE_FRAME_L48_WIDTH_BYTES 6 #define PAGE_FRAME_L48_WIDTH_BYTES 6
...@@ -155,8 +156,8 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, ...@@ -155,8 +156,8 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
return err; return err;
} }
if (!PAGE_ALIGNED(eq_addr) || !PAGE_ALIGNED(cq_addr) || if (!MANA_PAGE_ALIGNED(eq_addr) || !MANA_PAGE_ALIGNED(cq_addr) ||
!PAGE_ALIGNED(rq_addr) || !PAGE_ALIGNED(sq_addr)) !MANA_PAGE_ALIGNED(rq_addr) || !MANA_PAGE_ALIGNED(sq_addr))
return -EINVAL; return -EINVAL;
if ((eq_msix_index & VECTOR_MASK) != eq_msix_index) if ((eq_msix_index & VECTOR_MASK) != eq_msix_index)
...@@ -183,7 +184,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, ...@@ -183,7 +184,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
/* EQ addr: low 48 bits of frame address */ /* EQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr; shmem = (u64 *)ptr;
frame_addr = PHYS_PFN(eq_addr); frame_addr = MANA_PFN(eq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK; *shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
...@@ -191,7 +192,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, ...@@ -191,7 +192,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
/* CQ addr: low 48 bits of frame address */ /* CQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr; shmem = (u64 *)ptr;
frame_addr = PHYS_PFN(cq_addr); frame_addr = MANA_PFN(cq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK; *shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
...@@ -199,7 +200,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, ...@@ -199,7 +200,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
/* RQ addr: low 48 bits of frame address */ /* RQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr; shmem = (u64 *)ptr;
frame_addr = PHYS_PFN(rq_addr); frame_addr = MANA_PFN(rq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK; *shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
...@@ -207,7 +208,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, ...@@ -207,7 +208,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
/* SQ addr: low 48 bits of frame address */ /* SQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr; shmem = (u64 *)ptr;
frame_addr = PHYS_PFN(sq_addr); frame_addr = MANA_PFN(sq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK; *shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
......
...@@ -224,7 +224,15 @@ struct gdma_dev { ...@@ -224,7 +224,15 @@ struct gdma_dev {
struct auxiliary_device *adev; struct auxiliary_device *adev;
}; };
#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE /* MANA_PAGE_SIZE is the DMA unit */
#define MANA_PAGE_SHIFT 12
#define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT)
#define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE)
#define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE)
#define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT)
/* Required by HW */
#define MANA_MIN_QSIZE MANA_PAGE_SIZE
#define GDMA_CQE_SIZE 64 #define GDMA_CQE_SIZE 64
#define GDMA_EQE_SIZE 16 #define GDMA_EQE_SIZE 16
......
...@@ -42,7 +42,8 @@ enum TRI_STATE { ...@@ -42,7 +42,8 @@ enum TRI_STATE {
#define MAX_SEND_BUFFERS_PER_QUEUE 256 #define MAX_SEND_BUFFERS_PER_QUEUE 256
#define EQ_SIZE (8 * PAGE_SIZE) #define EQ_SIZE (8 * MANA_PAGE_SIZE)
#define LOG2_EQ_THROTTLE 3 #define LOG2_EQ_THROTTLE 3
#define MAX_PORTS_IN_MANA_DEV 256 #define MAX_PORTS_IN_MANA_DEV 256
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment