Commit e84d3c18 authored by Michal Kalderon's avatar Michal Kalderon Committed by Jason Gunthorpe

RDMA/efa: Use the common mmap_xa helpers

Remove the functions related to managing the mmap_xa database.  This code
was replaced with common code in ib_core.

Link: https://lore.kernel.org/r/20191030094417.16866-5-michal.kalderon@marvell.comSigned-off-by: default avatarAriel Elior <ariel.elior@marvell.com>
Signed-off-by: default avatarMichal Kalderon <michal.kalderon@marvell.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent c043ff2c
...@@ -71,8 +71,6 @@ struct efa_dev { ...@@ -71,8 +71,6 @@ struct efa_dev {
struct efa_ucontext { struct efa_ucontext {
struct ib_ucontext ibucontext; struct ib_ucontext ibucontext;
struct xarray mmap_xa;
u32 mmap_xa_page;
u16 uarn; u16 uarn;
}; };
...@@ -91,6 +89,7 @@ struct efa_cq { ...@@ -91,6 +89,7 @@ struct efa_cq {
struct efa_ucontext *ucontext; struct efa_ucontext *ucontext;
dma_addr_t dma_addr; dma_addr_t dma_addr;
void *cpu_addr; void *cpu_addr;
struct rdma_user_mmap_entry *mmap_entry;
size_t size; size_t size;
u16 cq_idx; u16 cq_idx;
}; };
...@@ -101,6 +100,13 @@ struct efa_qp { ...@@ -101,6 +100,13 @@ struct efa_qp {
void *rq_cpu_addr; void *rq_cpu_addr;
size_t rq_size; size_t rq_size;
enum ib_qp_state state; enum ib_qp_state state;
/* Used for saving mmap_xa entries */
struct rdma_user_mmap_entry *sq_db_mmap_entry;
struct rdma_user_mmap_entry *llq_desc_mmap_entry;
struct rdma_user_mmap_entry *rq_db_mmap_entry;
struct rdma_user_mmap_entry *rq_mmap_entry;
u32 qp_handle; u32 qp_handle;
u32 max_send_wr; u32 max_send_wr;
u32 max_recv_wr; u32 max_recv_wr;
...@@ -147,6 +153,7 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata); ...@@ -147,6 +153,7 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata);
void efa_dealloc_ucontext(struct ib_ucontext *ibucontext); void efa_dealloc_ucontext(struct ib_ucontext *ibucontext);
int efa_mmap(struct ib_ucontext *ibucontext, int efa_mmap(struct ib_ucontext *ibucontext,
struct vm_area_struct *vma); struct vm_area_struct *vma);
void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry);
int efa_create_ah(struct ib_ah *ibah, int efa_create_ah(struct ib_ah *ibah,
struct rdma_ah_attr *ah_attr, struct rdma_ah_attr *ah_attr,
u32 flags, u32 flags,
......
...@@ -217,6 +217,7 @@ static const struct ib_device_ops efa_dev_ops = { ...@@ -217,6 +217,7 @@ static const struct ib_device_ops efa_dev_ops = {
.get_link_layer = efa_port_link_layer, .get_link_layer = efa_port_link_layer,
.get_port_immutable = efa_get_port_immutable, .get_port_immutable = efa_get_port_immutable,
.mmap = efa_mmap, .mmap = efa_mmap,
.mmap_free = efa_mmap_free,
.modify_qp = efa_modify_qp, .modify_qp = efa_modify_qp,
.query_device = efa_query_device, .query_device = efa_query_device,
.query_gid = efa_query_gid, .query_gid = efa_query_gid,
......
...@@ -13,10 +13,6 @@ ...@@ -13,10 +13,6 @@
#include "efa.h" #include "efa.h"
#define EFA_MMAP_FLAG_SHIFT 56
#define EFA_MMAP_PAGE_MASK GENMASK(EFA_MMAP_FLAG_SHIFT - 1, 0)
#define EFA_MMAP_INVALID U64_MAX
enum { enum {
EFA_MMAP_DMA_PAGE = 0, EFA_MMAP_DMA_PAGE = 0,
EFA_MMAP_IO_WC, EFA_MMAP_IO_WC,
...@@ -27,20 +23,12 @@ enum { ...@@ -27,20 +23,12 @@ enum {
(BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \ (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE)) BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
struct efa_mmap_entry { struct efa_user_mmap_entry {
void *obj; struct rdma_user_mmap_entry rdma_entry;
u64 address; u64 address;
u64 length;
u32 mmap_page;
u8 mmap_flag; u8 mmap_flag;
}; };
static inline u64 get_mmap_key(const struct efa_mmap_entry *efa)
{
return ((u64)efa->mmap_flag << EFA_MMAP_FLAG_SHIFT) |
((u64)efa->mmap_page << PAGE_SHIFT);
}
#define EFA_DEFINE_STATS(op) \ #define EFA_DEFINE_STATS(op) \
op(EFA_TX_BYTES, "tx_bytes") \ op(EFA_TX_BYTES, "tx_bytes") \
op(EFA_TX_PKTS, "tx_pkts") \ op(EFA_TX_PKTS, "tx_pkts") \
...@@ -147,6 +135,12 @@ static inline struct efa_ah *to_eah(struct ib_ah *ibah) ...@@ -147,6 +135,12 @@ static inline struct efa_ah *to_eah(struct ib_ah *ibah)
return container_of(ibah, struct efa_ah, ibah); return container_of(ibah, struct efa_ah, ibah);
} }
static inline struct efa_user_mmap_entry *
to_emmap(struct rdma_user_mmap_entry *rdma_entry)
{
return container_of(rdma_entry, struct efa_user_mmap_entry, rdma_entry);
}
#define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \ #define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \
FIELD_SIZEOF(typeof(x), fld) <= (sz)) FIELD_SIZEOF(typeof(x), fld) <= (sz))
...@@ -172,106 +166,6 @@ static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr, ...@@ -172,106 +166,6 @@ static void *efa_zalloc_mapped(struct efa_dev *dev, dma_addr_t *dma_addr,
return addr; return addr;
} }
/*
* This is only called when the ucontext is destroyed and there can be no
* concurrent query via mmap or allocate on the xarray, thus we can be sure no
* other thread is using the entry pointer. We also know that all the BAR
* pages have either been zap'd or munmaped at this point. Normal pages are
* refcounted and will be freed at the proper time.
*/
static void mmap_entries_remove_free(struct efa_dev *dev,
struct efa_ucontext *ucontext)
{
struct efa_mmap_entry *entry;
unsigned long mmap_page;
xa_for_each(&ucontext->mmap_xa, mmap_page, entry) {
xa_erase(&ucontext->mmap_xa, mmap_page);
ibdev_dbg(
&dev->ibdev,
"mmap: obj[0x%p] key[%#llx] addr[%#llx] len[%#llx] removed\n",
entry->obj, get_mmap_key(entry), entry->address,
entry->length);
if (entry->mmap_flag == EFA_MMAP_DMA_PAGE)
/* DMA mapping is already gone, now free the pages */
free_pages_exact(phys_to_virt(entry->address),
entry->length);
kfree(entry);
}
}
static struct efa_mmap_entry *mmap_entry_get(struct efa_dev *dev,
struct efa_ucontext *ucontext,
u64 key, u64 len)
{
struct efa_mmap_entry *entry;
u64 mmap_page;
mmap_page = (key & EFA_MMAP_PAGE_MASK) >> PAGE_SHIFT;
if (mmap_page > U32_MAX)
return NULL;
entry = xa_load(&ucontext->mmap_xa, mmap_page);
if (!entry || get_mmap_key(entry) != key || entry->length != len)
return NULL;
ibdev_dbg(&dev->ibdev,
"mmap: obj[0x%p] key[%#llx] addr[%#llx] len[%#llx] removed\n",
entry->obj, key, entry->address, entry->length);
return entry;
}
/*
* Note this locking scheme cannot support removal of entries, except during
* ucontext destruction when the core code guarentees no concurrency.
*/
static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext,
void *obj, u64 address, u64 length, u8 mmap_flag)
{
struct efa_mmap_entry *entry;
u32 next_mmap_page;
int err;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return EFA_MMAP_INVALID;
entry->obj = obj;
entry->address = address;
entry->length = length;
entry->mmap_flag = mmap_flag;
xa_lock(&ucontext->mmap_xa);
if (check_add_overflow(ucontext->mmap_xa_page,
(u32)(length >> PAGE_SHIFT),
&next_mmap_page))
goto err_unlock;
entry->mmap_page = ucontext->mmap_xa_page;
ucontext->mmap_xa_page = next_mmap_page;
err = __xa_insert(&ucontext->mmap_xa, entry->mmap_page, entry,
GFP_KERNEL);
if (err)
goto err_unlock;
xa_unlock(&ucontext->mmap_xa);
ibdev_dbg(
&dev->ibdev,
"mmap: obj[0x%p] addr[%#llx], len[%#llx], key[%#llx] inserted\n",
entry->obj, entry->address, entry->length, get_mmap_key(entry));
return get_mmap_key(entry);
err_unlock:
xa_unlock(&ucontext->mmap_xa);
kfree(entry);
return EFA_MMAP_INVALID;
}
int efa_query_device(struct ib_device *ibdev, int efa_query_device(struct ib_device *ibdev,
struct ib_device_attr *props, struct ib_device_attr *props,
struct ib_udata *udata) struct ib_udata *udata)
...@@ -485,8 +379,19 @@ static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle) ...@@ -485,8 +379,19 @@ static int efa_destroy_qp_handle(struct efa_dev *dev, u32 qp_handle)
return efa_com_destroy_qp(&dev->edev, &params); return efa_com_destroy_qp(&dev->edev, &params);
} }
static void efa_qp_user_mmap_entries_remove(struct efa_ucontext *uctx,
struct efa_qp *qp)
{
rdma_user_mmap_entry_remove(qp->rq_mmap_entry);
rdma_user_mmap_entry_remove(qp->rq_db_mmap_entry);
rdma_user_mmap_entry_remove(qp->llq_desc_mmap_entry);
rdma_user_mmap_entry_remove(qp->sq_db_mmap_entry);
}
int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
{ {
struct efa_ucontext *ucontext = rdma_udata_to_drv_context(udata,
struct efa_ucontext, ibucontext);
struct efa_dev *dev = to_edev(ibqp->pd->device); struct efa_dev *dev = to_edev(ibqp->pd->device);
struct efa_qp *qp = to_eqp(ibqp); struct efa_qp *qp = to_eqp(ibqp);
int err; int err;
...@@ -505,61 +410,101 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) ...@@ -505,61 +410,101 @@ int efa_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
DMA_TO_DEVICE); DMA_TO_DEVICE);
} }
efa_qp_user_mmap_entries_remove(ucontext, qp);
kfree(qp); kfree(qp);
return 0; return 0;
} }
static struct rdma_user_mmap_entry*
efa_user_mmap_entry_insert(struct ib_ucontext *ucontext,
u64 address, size_t length,
u8 mmap_flag, u64 *offset)
{
struct efa_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
int err;
if (!entry)
return NULL;
entry->address = address;
entry->mmap_flag = mmap_flag;
err = rdma_user_mmap_entry_insert(ucontext, &entry->rdma_entry,
length);
if (err) {
kfree(entry);
return NULL;
}
*offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
return &entry->rdma_entry;
}
static int qp_mmap_entries_setup(struct efa_qp *qp, static int qp_mmap_entries_setup(struct efa_qp *qp,
struct efa_dev *dev, struct efa_dev *dev,
struct efa_ucontext *ucontext, struct efa_ucontext *ucontext,
struct efa_com_create_qp_params *params, struct efa_com_create_qp_params *params,
struct efa_ibv_create_qp_resp *resp) struct efa_ibv_create_qp_resp *resp)
{ {
/* size_t length;
* Once an entry is inserted it might be mmapped, hence cannot be u64 address;
* cleaned up until dealloc_ucontext.
*/ address = dev->db_bar_addr + resp->sq_db_offset;
resp->sq_db_mmap_key = qp->sq_db_mmap_entry =
mmap_entry_insert(dev, ucontext, qp, efa_user_mmap_entry_insert(&ucontext->ibucontext,
dev->db_bar_addr + resp->sq_db_offset, address,
PAGE_SIZE, EFA_MMAP_IO_NC); PAGE_SIZE, EFA_MMAP_IO_NC,
if (resp->sq_db_mmap_key == EFA_MMAP_INVALID) &resp->sq_db_mmap_key);
if (!qp->sq_db_mmap_entry)
return -ENOMEM; return -ENOMEM;
resp->sq_db_offset &= ~PAGE_MASK; resp->sq_db_offset &= ~PAGE_MASK;
resp->llq_desc_mmap_key = address = dev->mem_bar_addr + resp->llq_desc_offset;
mmap_entry_insert(dev, ucontext, qp, length = PAGE_ALIGN(params->sq_ring_size_in_bytes +
dev->mem_bar_addr + resp->llq_desc_offset, (resp->llq_desc_offset & ~PAGE_MASK));
PAGE_ALIGN(params->sq_ring_size_in_bytes +
(resp->llq_desc_offset & ~PAGE_MASK)), qp->llq_desc_mmap_entry =
EFA_MMAP_IO_WC); efa_user_mmap_entry_insert(&ucontext->ibucontext,
if (resp->llq_desc_mmap_key == EFA_MMAP_INVALID) address, length,
return -ENOMEM; EFA_MMAP_IO_WC,
&resp->llq_desc_mmap_key);
if (!qp->llq_desc_mmap_entry)
goto err_remove_mmap;
resp->llq_desc_offset &= ~PAGE_MASK; resp->llq_desc_offset &= ~PAGE_MASK;
if (qp->rq_size) { if (qp->rq_size) {
resp->rq_db_mmap_key = address = dev->db_bar_addr + resp->rq_db_offset;
mmap_entry_insert(dev, ucontext, qp,
dev->db_bar_addr + resp->rq_db_offset, qp->rq_db_mmap_entry =
PAGE_SIZE, EFA_MMAP_IO_NC); efa_user_mmap_entry_insert(&ucontext->ibucontext,
if (resp->rq_db_mmap_key == EFA_MMAP_INVALID) address, PAGE_SIZE,
return -ENOMEM; EFA_MMAP_IO_NC,
&resp->rq_db_mmap_key);
if (!qp->rq_db_mmap_entry)
goto err_remove_mmap;
resp->rq_db_offset &= ~PAGE_MASK; resp->rq_db_offset &= ~PAGE_MASK;
resp->rq_mmap_key = address = virt_to_phys(qp->rq_cpu_addr);
mmap_entry_insert(dev, ucontext, qp, qp->rq_mmap_entry =
virt_to_phys(qp->rq_cpu_addr), efa_user_mmap_entry_insert(&ucontext->ibucontext,
qp->rq_size, EFA_MMAP_DMA_PAGE); address, qp->rq_size,
if (resp->rq_mmap_key == EFA_MMAP_INVALID) EFA_MMAP_DMA_PAGE,
return -ENOMEM; &resp->rq_mmap_key);
if (!qp->rq_mmap_entry)
goto err_remove_mmap;
resp->rq_mmap_size = qp->rq_size; resp->rq_mmap_size = qp->rq_size;
} }
return 0; return 0;
err_remove_mmap:
efa_qp_user_mmap_entries_remove(ucontext, qp);
return -ENOMEM;
} }
static int efa_qp_validate_cap(struct efa_dev *dev, static int efa_qp_validate_cap(struct efa_dev *dev,
...@@ -634,7 +579,6 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd, ...@@ -634,7 +579,6 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
struct efa_dev *dev = to_edev(ibpd->device); struct efa_dev *dev = to_edev(ibpd->device);
struct efa_ibv_create_qp_resp resp = {}; struct efa_ibv_create_qp_resp resp = {};
struct efa_ibv_create_qp cmd = {}; struct efa_ibv_create_qp cmd = {};
bool rq_entry_inserted = false;
struct efa_ucontext *ucontext; struct efa_ucontext *ucontext;
struct efa_qp *qp; struct efa_qp *qp;
int err; int err;
...@@ -742,7 +686,6 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd, ...@@ -742,7 +686,6 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
if (err) if (err)
goto err_destroy_qp; goto err_destroy_qp;
rq_entry_inserted = true;
qp->qp_handle = create_qp_resp.qp_handle; qp->qp_handle = create_qp_resp.qp_handle;
qp->ibqp.qp_num = create_qp_resp.qp_num; qp->ibqp.qp_num = create_qp_resp.qp_num;
qp->ibqp.qp_type = init_attr->qp_type; qp->ibqp.qp_type = init_attr->qp_type;
...@@ -759,7 +702,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd, ...@@ -759,7 +702,7 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
ibdev_dbg(&dev->ibdev, ibdev_dbg(&dev->ibdev,
"Failed to copy udata for qp[%u]\n", "Failed to copy udata for qp[%u]\n",
create_qp_resp.qp_num); create_qp_resp.qp_num);
goto err_destroy_qp; goto err_remove_mmap_entries;
} }
} }
...@@ -767,13 +710,16 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd, ...@@ -767,13 +710,16 @@ struct ib_qp *efa_create_qp(struct ib_pd *ibpd,
return &qp->ibqp; return &qp->ibqp;
err_remove_mmap_entries:
efa_qp_user_mmap_entries_remove(ucontext, qp);
err_destroy_qp: err_destroy_qp:
efa_destroy_qp_handle(dev, create_qp_resp.qp_handle); efa_destroy_qp_handle(dev, create_qp_resp.qp_handle);
err_free_mapped: err_free_mapped:
if (qp->rq_size) { if (qp->rq_size) {
dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size, dma_unmap_single(&dev->pdev->dev, qp->rq_dma_addr, qp->rq_size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (!rq_entry_inserted)
if (!qp->rq_mmap_entry)
free_pages_exact(qp->rq_cpu_addr, qp->rq_size); free_pages_exact(qp->rq_cpu_addr, qp->rq_size);
} }
err_free_qp: err_free_qp:
...@@ -897,16 +843,18 @@ void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) ...@@ -897,16 +843,18 @@ void efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
efa_destroy_cq_idx(dev, cq->cq_idx); efa_destroy_cq_idx(dev, cq->cq_idx);
dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size, dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
rdma_user_mmap_entry_remove(cq->mmap_entry);
} }
static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq, static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
struct efa_ibv_create_cq_resp *resp) struct efa_ibv_create_cq_resp *resp)
{ {
resp->q_mmap_size = cq->size; resp->q_mmap_size = cq->size;
resp->q_mmap_key = mmap_entry_insert(dev, cq->ucontext, cq, cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
virt_to_phys(cq->cpu_addr), virt_to_phys(cq->cpu_addr),
cq->size, EFA_MMAP_DMA_PAGE); cq->size, EFA_MMAP_DMA_PAGE,
if (resp->q_mmap_key == EFA_MMAP_INVALID) &resp->q_mmap_key);
if (!cq->mmap_entry)
return -ENOMEM; return -ENOMEM;
return 0; return 0;
...@@ -924,7 +872,6 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, ...@@ -924,7 +872,6 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct efa_dev *dev = to_edev(ibdev); struct efa_dev *dev = to_edev(ibdev);
struct efa_ibv_create_cq cmd = {}; struct efa_ibv_create_cq cmd = {};
struct efa_cq *cq = to_ecq(ibcq); struct efa_cq *cq = to_ecq(ibcq);
bool cq_entry_inserted = false;
int entries = attr->cqe; int entries = attr->cqe;
int err; int err;
...@@ -1013,15 +960,13 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, ...@@ -1013,15 +960,13 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_destroy_cq; goto err_destroy_cq;
} }
cq_entry_inserted = true;
if (udata->outlen) { if (udata->outlen) {
err = ib_copy_to_udata(udata, &resp, err = ib_copy_to_udata(udata, &resp,
min(sizeof(resp), udata->outlen)); min(sizeof(resp), udata->outlen));
if (err) { if (err) {
ibdev_dbg(ibdev, ibdev_dbg(ibdev,
"Failed to copy udata for create_cq\n"); "Failed to copy udata for create_cq\n");
goto err_destroy_cq; goto err_remove_mmap;
} }
} }
...@@ -1030,13 +975,16 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, ...@@ -1030,13 +975,16 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return 0; return 0;
err_remove_mmap:
rdma_user_mmap_entry_remove(cq->mmap_entry);
err_destroy_cq: err_destroy_cq:
efa_destroy_cq_idx(dev, cq->cq_idx); efa_destroy_cq_idx(dev, cq->cq_idx);
err_free_mapped: err_free_mapped:
dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size, dma_unmap_single(&dev->pdev->dev, cq->dma_addr, cq->size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (!cq_entry_inserted) if (!cq->mmap_entry)
free_pages_exact(cq->cpu_addr, cq->size); free_pages_exact(cq->cpu_addr, cq->size);
err_out: err_out:
atomic64_inc(&dev->stats.sw_stats.create_cq_err); atomic64_inc(&dev->stats.sw_stats.create_cq_err);
return err; return err;
...@@ -1556,7 +1504,6 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata) ...@@ -1556,7 +1504,6 @@ int efa_alloc_ucontext(struct ib_ucontext *ibucontext, struct ib_udata *udata)
goto err_out; goto err_out;
ucontext->uarn = result.uarn; ucontext->uarn = result.uarn;
xa_init(&ucontext->mmap_xa);
resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE; resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_QUERY_DEVICE;
resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH; resp.cmds_supp_udata_mask |= EFA_USER_CMDS_SUPP_UDATA_CREATE_AH;
...@@ -1585,40 +1532,56 @@ void efa_dealloc_ucontext(struct ib_ucontext *ibucontext) ...@@ -1585,40 +1532,56 @@ void efa_dealloc_ucontext(struct ib_ucontext *ibucontext)
struct efa_ucontext *ucontext = to_eucontext(ibucontext); struct efa_ucontext *ucontext = to_eucontext(ibucontext);
struct efa_dev *dev = to_edev(ibucontext->device); struct efa_dev *dev = to_edev(ibucontext->device);
mmap_entries_remove_free(dev, ucontext);
efa_dealloc_uar(dev, ucontext->uarn); efa_dealloc_uar(dev, ucontext->uarn);
} }
void efa_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
{
struct efa_user_mmap_entry *entry = to_emmap(rdma_entry);
/* DMA mapping is already gone, now free the pages */
if (entry->mmap_flag == EFA_MMAP_DMA_PAGE)
free_pages_exact(phys_to_virt(entry->address),
entry->rdma_entry.npages * PAGE_SIZE);
kfree(entry);
}
static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext, static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
struct vm_area_struct *vma, u64 key, u64 length) struct vm_area_struct *vma)
{ {
struct efa_mmap_entry *entry; struct rdma_user_mmap_entry *rdma_entry;
struct efa_user_mmap_entry *entry;
unsigned long va; unsigned long va;
int err = 0;
u64 pfn; u64 pfn;
int err;
entry = mmap_entry_get(dev, ucontext, key, length); rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
if (!entry) { if (!rdma_entry) {
ibdev_dbg(&dev->ibdev, "key[%#llx] does not have valid entry\n", ibdev_dbg(&dev->ibdev,
key); "pgoff[%#lx] does not have valid entry\n",
vma->vm_pgoff);
return -EINVAL; return -EINVAL;
} }
entry = to_emmap(rdma_entry);
ibdev_dbg(&dev->ibdev, ibdev_dbg(&dev->ibdev,
"Mapping address[%#llx], length[%#llx], mmap_flag[%d]\n", "Mapping address[%#llx], length[%#zx], mmap_flag[%d]\n",
entry->address, length, entry->mmap_flag); entry->address, rdma_entry->npages * PAGE_SIZE,
entry->mmap_flag);
pfn = entry->address >> PAGE_SHIFT; pfn = entry->address >> PAGE_SHIFT;
switch (entry->mmap_flag) { switch (entry->mmap_flag) {
case EFA_MMAP_IO_NC: case EFA_MMAP_IO_NC:
err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length, err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
entry->rdma_entry.npages * PAGE_SIZE,
pgprot_noncached(vma->vm_page_prot), pgprot_noncached(vma->vm_page_prot),
NULL); rdma_entry);
break; break;
case EFA_MMAP_IO_WC: case EFA_MMAP_IO_WC:
err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, length, err = rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn,
entry->rdma_entry.npages * PAGE_SIZE,
pgprot_writecombine(vma->vm_page_prot), pgprot_writecombine(vma->vm_page_prot),
NULL); rdma_entry);
break; break;
case EFA_MMAP_DMA_PAGE: case EFA_MMAP_DMA_PAGE:
for (va = vma->vm_start; va < vma->vm_end; for (va = vma->vm_start; va < vma->vm_end;
...@@ -1635,12 +1598,13 @@ static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext, ...@@ -1635,12 +1598,13 @@ static int __efa_mmap(struct efa_dev *dev, struct efa_ucontext *ucontext,
if (err) { if (err) {
ibdev_dbg( ibdev_dbg(
&dev->ibdev, &dev->ibdev,
"Couldn't mmap address[%#llx] length[%#llx] mmap_flag[%d] err[%d]\n", "Couldn't mmap address[%#llx] length[%#zx] mmap_flag[%d] err[%d]\n",
entry->address, length, entry->mmap_flag, err); entry->address, rdma_entry->npages * PAGE_SIZE,
return err; entry->mmap_flag, err);
} }
return 0; rdma_user_mmap_entry_put(rdma_entry);
return err;
} }
int efa_mmap(struct ib_ucontext *ibucontext, int efa_mmap(struct ib_ucontext *ibucontext,
...@@ -1648,26 +1612,13 @@ int efa_mmap(struct ib_ucontext *ibucontext, ...@@ -1648,26 +1612,13 @@ int efa_mmap(struct ib_ucontext *ibucontext,
{ {
struct efa_ucontext *ucontext = to_eucontext(ibucontext); struct efa_ucontext *ucontext = to_eucontext(ibucontext);
struct efa_dev *dev = to_edev(ibucontext->device); struct efa_dev *dev = to_edev(ibucontext->device);
u64 length = vma->vm_end - vma->vm_start; size_t length = vma->vm_end - vma->vm_start;
u64 key = vma->vm_pgoff << PAGE_SHIFT;
ibdev_dbg(&dev->ibdev, ibdev_dbg(&dev->ibdev,
"start %#lx, end %#lx, length = %#llx, key = %#llx\n", "start %#lx, end %#lx, length = %#zx, pgoff = %#lx\n",
vma->vm_start, vma->vm_end, length, key); vma->vm_start, vma->vm_end, length, vma->vm_pgoff);
if (length % PAGE_SIZE != 0 || !(vma->vm_flags & VM_SHARED)) {
ibdev_dbg(&dev->ibdev,
"length[%#llx] is not page size aligned[%#lx] or VM_SHARED is not set [%#lx]\n",
length, PAGE_SIZE, vma->vm_flags);
return -EINVAL;
}
if (vma->vm_flags & VM_EXEC) {
ibdev_dbg(&dev->ibdev, "Mapping executable pages is not permitted\n");
return -EPERM;
}
return __efa_mmap(dev, ucontext, vma, key, length); return __efa_mmap(dev, ucontext, vma);
} }
static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah) static int efa_ah_destroy(struct efa_dev *dev, struct efa_ah *ah)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment