Commit ae967246 authored by Gautam Dawar's avatar Gautam Dawar Committed by Michael S. Tsirkin

vhost-vdpa: passing iotlb to IOMMU mapping helpers

To prepare for the ASID support for vhost-vdpa, try to pass IOTLB
object to dma helpers. No functional changes, it's just a preparation
for support multiple IOTLBs.
Signed-off-by: default avatarJason Wang <jasowang@redhat.com>
Signed-off-by: default avatarGautam Dawar <gdawar@xilinx.com>
Message-Id: <20220330180436.24644-4-gdawar@xilinx.com>
Signed-off-by: default avatarMichael S. Tsirkin <mst@redhat.com>
parent ea239a67
...@@ -537,10 +537,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep, ...@@ -537,10 +537,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
return r; return r;
} }
static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last) static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb,
u64 start, u64 last)
{ {
struct vhost_dev *dev = &v->vdev; struct vhost_dev *dev = &v->vdev;
struct vhost_iotlb *iotlb = dev->iotlb;
struct vhost_iotlb_map *map; struct vhost_iotlb_map *map;
struct page *page; struct page *page;
unsigned long pfn, pinned; unsigned long pfn, pinned;
...@@ -559,10 +560,10 @@ static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last) ...@@ -559,10 +560,10 @@ static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last)
} }
} }
static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, u64 start, u64 last) static void vhost_vdpa_va_unmap(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb,
u64 start, u64 last)
{ {
struct vhost_dev *dev = &v->vdev;
struct vhost_iotlb *iotlb = dev->iotlb;
struct vhost_iotlb_map *map; struct vhost_iotlb_map *map;
struct vdpa_map_file *map_file; struct vdpa_map_file *map_file;
...@@ -574,21 +575,24 @@ static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, u64 start, u64 last) ...@@ -574,21 +575,24 @@ static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, u64 start, u64 last)
} }
} }
static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last) static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb,
u64 start, u64 last)
{ {
struct vdpa_device *vdpa = v->vdpa; struct vdpa_device *vdpa = v->vdpa;
if (vdpa->use_va) if (vdpa->use_va)
return vhost_vdpa_va_unmap(v, start, last); return vhost_vdpa_va_unmap(v, iotlb, start, last);
return vhost_vdpa_pa_unmap(v, start, last); return vhost_vdpa_pa_unmap(v, iotlb, start, last);
} }
static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v) static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
{ {
struct vhost_dev *dev = &v->vdev; struct vhost_dev *dev = &v->vdev;
struct vhost_iotlb *iotlb = dev->iotlb;
vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1); vhost_vdpa_iotlb_unmap(v, iotlb, 0ULL, 0ULL - 1);
kfree(dev->iotlb); kfree(dev->iotlb);
dev->iotlb = NULL; dev->iotlb = NULL;
} }
...@@ -615,15 +619,15 @@ static int perm_to_iommu_flags(u32 perm) ...@@ -615,15 +619,15 @@ static int perm_to_iommu_flags(u32 perm)
return flags | IOMMU_CACHE; return flags | IOMMU_CACHE;
} }
static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova, static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
u64 size, u64 pa, u32 perm, void *opaque) u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
{ {
struct vhost_dev *dev = &v->vdev; struct vhost_dev *dev = &v->vdev;
struct vdpa_device *vdpa = v->vdpa; struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config; const struct vdpa_config_ops *ops = vdpa->config;
int r = 0; int r = 0;
r = vhost_iotlb_add_range_ctx(dev->iotlb, iova, iova + size - 1, r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
pa, perm, opaque); pa, perm, opaque);
if (r) if (r)
return r; return r;
...@@ -632,13 +636,13 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova, ...@@ -632,13 +636,13 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova,
r = ops->dma_map(vdpa, iova, size, pa, perm, opaque); r = ops->dma_map(vdpa, iova, size, pa, perm, opaque);
} else if (ops->set_map) { } else if (ops->set_map) {
if (!v->in_batch) if (!v->in_batch)
r = ops->set_map(vdpa, dev->iotlb); r = ops->set_map(vdpa, iotlb);
} else { } else {
r = iommu_map(v->domain, iova, pa, size, r = iommu_map(v->domain, iova, pa, size,
perm_to_iommu_flags(perm)); perm_to_iommu_flags(perm));
} }
if (r) { if (r) {
vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1); vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
return r; return r;
} }
...@@ -648,25 +652,27 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova, ...@@ -648,25 +652,27 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova,
return 0; return 0;
} }
static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size) static void vhost_vdpa_unmap(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb,
u64 iova, u64 size)
{ {
struct vhost_dev *dev = &v->vdev;
struct vdpa_device *vdpa = v->vdpa; struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config; const struct vdpa_config_ops *ops = vdpa->config;
vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1); vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1);
if (ops->dma_map) { if (ops->dma_map) {
ops->dma_unmap(vdpa, iova, size); ops->dma_unmap(vdpa, iova, size);
} else if (ops->set_map) { } else if (ops->set_map) {
if (!v->in_batch) if (!v->in_batch)
ops->set_map(vdpa, dev->iotlb); ops->set_map(vdpa, iotlb);
} else { } else {
iommu_unmap(v->domain, iova, size); iommu_unmap(v->domain, iova, size);
} }
} }
static int vhost_vdpa_va_map(struct vhost_vdpa *v, static int vhost_vdpa_va_map(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb,
u64 iova, u64 size, u64 uaddr, u32 perm) u64 iova, u64 size, u64 uaddr, u32 perm)
{ {
struct vhost_dev *dev = &v->vdev; struct vhost_dev *dev = &v->vdev;
...@@ -696,7 +702,7 @@ static int vhost_vdpa_va_map(struct vhost_vdpa *v, ...@@ -696,7 +702,7 @@ static int vhost_vdpa_va_map(struct vhost_vdpa *v,
offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start; offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
map_file->offset = offset; map_file->offset = offset;
map_file->file = get_file(vma->vm_file); map_file->file = get_file(vma->vm_file);
ret = vhost_vdpa_map(v, map_iova, map_size, uaddr, ret = vhost_vdpa_map(v, iotlb, map_iova, map_size, uaddr,
perm, map_file); perm, map_file);
if (ret) { if (ret) {
fput(map_file->file); fput(map_file->file);
...@@ -709,7 +715,7 @@ static int vhost_vdpa_va_map(struct vhost_vdpa *v, ...@@ -709,7 +715,7 @@ static int vhost_vdpa_va_map(struct vhost_vdpa *v,
map_iova += map_size; map_iova += map_size;
} }
if (ret) if (ret)
vhost_vdpa_unmap(v, iova, map_iova - iova); vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova);
mmap_read_unlock(dev->mm); mmap_read_unlock(dev->mm);
...@@ -717,6 +723,7 @@ static int vhost_vdpa_va_map(struct vhost_vdpa *v, ...@@ -717,6 +723,7 @@ static int vhost_vdpa_va_map(struct vhost_vdpa *v,
} }
static int vhost_vdpa_pa_map(struct vhost_vdpa *v, static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb,
u64 iova, u64 size, u64 uaddr, u32 perm) u64 iova, u64 size, u64 uaddr, u32 perm)
{ {
struct vhost_dev *dev = &v->vdev; struct vhost_dev *dev = &v->vdev;
...@@ -780,7 +787,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v, ...@@ -780,7 +787,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
if (last_pfn && (this_pfn != last_pfn + 1)) { if (last_pfn && (this_pfn != last_pfn + 1)) {
/* Pin a contiguous chunk of memory */ /* Pin a contiguous chunk of memory */
csize = PFN_PHYS(last_pfn - map_pfn + 1); csize = PFN_PHYS(last_pfn - map_pfn + 1);
ret = vhost_vdpa_map(v, iova, csize, ret = vhost_vdpa_map(v, iotlb, iova, csize,
PFN_PHYS(map_pfn), PFN_PHYS(map_pfn),
perm, NULL); perm, NULL);
if (ret) { if (ret) {
...@@ -810,7 +817,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v, ...@@ -810,7 +817,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
} }
/* Pin the rest chunk */ /* Pin the rest chunk */
ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1), ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1),
PFN_PHYS(map_pfn), perm, NULL); PFN_PHYS(map_pfn), perm, NULL);
out: out:
if (ret) { if (ret) {
...@@ -830,7 +837,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v, ...@@ -830,7 +837,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
for (pfn = map_pfn; pfn <= last_pfn; pfn++) for (pfn = map_pfn; pfn <= last_pfn; pfn++)
unpin_user_page(pfn_to_page(pfn)); unpin_user_page(pfn_to_page(pfn));
} }
vhost_vdpa_unmap(v, start, size); vhost_vdpa_unmap(v, iotlb, start, size);
} }
unlock: unlock:
mmap_read_unlock(dev->mm); mmap_read_unlock(dev->mm);
...@@ -841,11 +848,10 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v, ...@@ -841,11 +848,10 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
} }
static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
struct vhost_iotlb *iotlb,
struct vhost_iotlb_msg *msg) struct vhost_iotlb_msg *msg)
{ {
struct vhost_dev *dev = &v->vdev;
struct vdpa_device *vdpa = v->vdpa; struct vdpa_device *vdpa = v->vdpa;
struct vhost_iotlb *iotlb = dev->iotlb;
if (msg->iova < v->range.first || !msg->size || if (msg->iova < v->range.first || !msg->size ||
msg->iova > U64_MAX - msg->size + 1 || msg->iova > U64_MAX - msg->size + 1 ||
...@@ -857,10 +863,10 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, ...@@ -857,10 +863,10 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
return -EEXIST; return -EEXIST;
if (vdpa->use_va) if (vdpa->use_va)
return vhost_vdpa_va_map(v, msg->iova, msg->size, return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size,
msg->uaddr, msg->perm); msg->uaddr, msg->perm);
return vhost_vdpa_pa_map(v, msg->iova, msg->size, msg->uaddr, return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr,
msg->perm); msg->perm);
} }
...@@ -870,6 +876,7 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, ...@@ -870,6 +876,7 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev); struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
struct vdpa_device *vdpa = v->vdpa; struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config; const struct vdpa_config_ops *ops = vdpa->config;
struct vhost_iotlb *iotlb = dev->iotlb;
int r = 0; int r = 0;
mutex_lock(&dev->mutex); mutex_lock(&dev->mutex);
...@@ -880,17 +887,17 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, ...@@ -880,17 +887,17 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
switch (msg->type) { switch (msg->type) {
case VHOST_IOTLB_UPDATE: case VHOST_IOTLB_UPDATE:
r = vhost_vdpa_process_iotlb_update(v, msg); r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
break; break;
case VHOST_IOTLB_INVALIDATE: case VHOST_IOTLB_INVALIDATE:
vhost_vdpa_unmap(v, msg->iova, msg->size); vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
break; break;
case VHOST_IOTLB_BATCH_BEGIN: case VHOST_IOTLB_BATCH_BEGIN:
v->in_batch = true; v->in_batch = true;
break; break;
case VHOST_IOTLB_BATCH_END: case VHOST_IOTLB_BATCH_END:
if (v->in_batch && ops->set_map) if (v->in_batch && ops->set_map)
ops->set_map(vdpa, dev->iotlb); ops->set_map(vdpa, iotlb);
v->in_batch = false; v->in_batch = false;
break; break;
default: default:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment