Commit ccd59dce authored by Liu Yi L's avatar Liu Yi L Committed by Alex Williamson

vfio/type1: Refactor vfio_iommu_type1_ioctl()

This patch refactors the vfio_iommu_type1_ioctl() to use switch instead of
if-else, and each command got a helper function.

Cc: Kevin Tian <kevin.tian@intel.com>
CC: Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Eric Auger <eric.auger@redhat.com>
Cc: Jean-Philippe Brucker <jean-philippe@linaro.org>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: default avatarEric Auger <eric.auger@redhat.com>
Suggested-by: default avatarChristoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarLiu Yi L <yi.l.liu@intel.com>
Signed-off-by: default avatarAlex Williamson <alex.williamson@redhat.com>
parent 50173329
...@@ -2455,6 +2455,23 @@ static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu) ...@@ -2455,6 +2455,23 @@ static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
return ret; return ret;
} }
static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu,
unsigned long arg)
{
switch (arg) {
case VFIO_TYPE1_IOMMU:
case VFIO_TYPE1v2_IOMMU:
case VFIO_TYPE1_NESTING_IOMMU:
return 1;
case VFIO_DMA_CC_IOMMU:
if (!iommu)
return 0;
return vfio_domains_have_iommu_cache(iommu);
default:
return 0;
}
}
static int vfio_iommu_iova_add_cap(struct vfio_info_cap *caps, static int vfio_iommu_iova_add_cap(struct vfio_info_cap *caps,
struct vfio_iommu_type1_info_cap_iova_range *cap_iovas, struct vfio_iommu_type1_info_cap_iova_range *cap_iovas,
size_t size) size_t size)
...@@ -2531,241 +2548,256 @@ static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu, ...@@ -2531,241 +2548,256 @@ static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
return vfio_info_add_capability(caps, &cap_mig.header, sizeof(cap_mig)); return vfio_info_add_capability(caps, &cap_mig.header, sizeof(cap_mig));
} }
static long vfio_iommu_type1_ioctl(void *iommu_data, static int vfio_iommu_type1_get_info(struct vfio_iommu *iommu,
unsigned int cmd, unsigned long arg) unsigned long arg)
{ {
struct vfio_iommu *iommu = iommu_data; struct vfio_iommu_type1_info info;
unsigned long minsz; unsigned long minsz;
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
unsigned long capsz;
int ret;
if (cmd == VFIO_CHECK_EXTENSION) { minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
switch (arg) {
case VFIO_TYPE1_IOMMU:
case VFIO_TYPE1v2_IOMMU:
case VFIO_TYPE1_NESTING_IOMMU:
return 1;
case VFIO_DMA_CC_IOMMU:
if (!iommu)
return 0;
return vfio_domains_have_iommu_cache(iommu);
default:
return 0;
}
} else if (cmd == VFIO_IOMMU_GET_INFO) {
struct vfio_iommu_type1_info info;
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
unsigned long capsz;
int ret;
minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
/* For backward compatibility, cannot require this */ /* For backward compatibility, cannot require this */
capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset); capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
if (copy_from_user(&info, (void __user *)arg, minsz)) if (copy_from_user(&info, (void __user *)arg, minsz))
return -EFAULT; return -EFAULT;
if (info.argsz < minsz) if (info.argsz < minsz)
return -EINVAL; return -EINVAL;
if (info.argsz >= capsz) { if (info.argsz >= capsz) {
minsz = capsz; minsz = capsz;
info.cap_offset = 0; /* output, no-recopy necessary */ info.cap_offset = 0; /* output, no-recopy necessary */
} }
mutex_lock(&iommu->lock); mutex_lock(&iommu->lock);
info.flags = VFIO_IOMMU_INFO_PGSIZES; info.flags = VFIO_IOMMU_INFO_PGSIZES;
info.iova_pgsizes = iommu->pgsize_bitmap; info.iova_pgsizes = iommu->pgsize_bitmap;
ret = vfio_iommu_migration_build_caps(iommu, &caps); ret = vfio_iommu_migration_build_caps(iommu, &caps);
if (!ret) if (!ret)
ret = vfio_iommu_iova_build_caps(iommu, &caps); ret = vfio_iommu_iova_build_caps(iommu, &caps);
mutex_unlock(&iommu->lock); mutex_unlock(&iommu->lock);
if (ret) if (ret)
return ret; return ret;
if (caps.size) { if (caps.size) {
info.flags |= VFIO_IOMMU_INFO_CAPS; info.flags |= VFIO_IOMMU_INFO_CAPS;
if (info.argsz < sizeof(info) + caps.size) { if (info.argsz < sizeof(info) + caps.size) {
info.argsz = sizeof(info) + caps.size; info.argsz = sizeof(info) + caps.size;
} else { } else {
vfio_info_cap_shift(&caps, sizeof(info)); vfio_info_cap_shift(&caps, sizeof(info));
if (copy_to_user((void __user *)arg + if (copy_to_user((void __user *)arg +
sizeof(info), caps.buf, sizeof(info), caps.buf,
caps.size)) { caps.size)) {
kfree(caps.buf); kfree(caps.buf);
return -EFAULT; return -EFAULT;
}
info.cap_offset = sizeof(info);
} }
info.cap_offset = sizeof(info);
kfree(caps.buf);
} }
return copy_to_user((void __user *)arg, &info, minsz) ? kfree(caps.buf);
-EFAULT : 0; }
} else if (cmd == VFIO_IOMMU_MAP_DMA) { return copy_to_user((void __user *)arg, &info, minsz) ?
struct vfio_iommu_type1_dma_map map; -EFAULT : 0;
uint32_t mask = VFIO_DMA_MAP_FLAG_READ | }
VFIO_DMA_MAP_FLAG_WRITE;
minsz = offsetofend(struct vfio_iommu_type1_dma_map, size); static int vfio_iommu_type1_map_dma(struct vfio_iommu *iommu,
unsigned long arg)
{
struct vfio_iommu_type1_dma_map map;
unsigned long minsz;
uint32_t mask = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE;
if (copy_from_user(&map, (void __user *)arg, minsz)) minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
return -EFAULT;
if (map.argsz < minsz || map.flags & ~mask) if (copy_from_user(&map, (void __user *)arg, minsz))
return -EINVAL; return -EFAULT;
return vfio_dma_do_map(iommu, &map); if (map.argsz < minsz || map.flags & ~mask)
return -EINVAL;
} else if (cmd == VFIO_IOMMU_UNMAP_DMA) { return vfio_dma_do_map(iommu, &map);
struct vfio_iommu_type1_dma_unmap unmap; }
struct vfio_bitmap bitmap = { 0 };
int ret;
minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size); static int vfio_iommu_type1_unmap_dma(struct vfio_iommu *iommu,
unsigned long arg)
{
struct vfio_iommu_type1_dma_unmap unmap;
struct vfio_bitmap bitmap = { 0 };
unsigned long minsz;
int ret;
if (copy_from_user(&unmap, (void __user *)arg, minsz)) minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
return -EFAULT;
if (unmap.argsz < minsz || if (copy_from_user(&unmap, (void __user *)arg, minsz))
unmap.flags & ~VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) return -EFAULT;
return -EINVAL;
if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) { if (unmap.argsz < minsz ||
unsigned long pgshift; unmap.flags & ~VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP)
return -EINVAL;
if (unmap.argsz < (minsz + sizeof(bitmap))) if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
return -EINVAL; unsigned long pgshift;
if (copy_from_user(&bitmap, if (unmap.argsz < (minsz + sizeof(bitmap)))
(void __user *)(arg + minsz), return -EINVAL;
sizeof(bitmap)))
return -EFAULT;
if (!access_ok((void __user *)bitmap.data, bitmap.size)) if (copy_from_user(&bitmap,
return -EINVAL; (void __user *)(arg + minsz),
sizeof(bitmap)))
return -EFAULT;
pgshift = __ffs(bitmap.pgsize); if (!access_ok((void __user *)bitmap.data, bitmap.size))
ret = verify_bitmap_size(unmap.size >> pgshift, return -EINVAL;
bitmap.size);
if (ret)
return ret;
}
ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap); pgshift = __ffs(bitmap.pgsize);
ret = verify_bitmap_size(unmap.size >> pgshift,
bitmap.size);
if (ret) if (ret)
return ret; return ret;
}
ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap);
if (ret)
return ret;
return copy_to_user((void __user *)arg, &unmap, minsz) ? return copy_to_user((void __user *)arg, &unmap, minsz) ?
-EFAULT : 0; -EFAULT : 0;
} else if (cmd == VFIO_IOMMU_DIRTY_PAGES) { }
struct vfio_iommu_type1_dirty_bitmap dirty;
uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START |
VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP |
VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
int ret = 0;
if (!iommu->v2) static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
return -EACCES; unsigned long arg)
{
struct vfio_iommu_type1_dirty_bitmap dirty;
uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START |
VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP |
VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
unsigned long minsz;
int ret = 0;
minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap, if (!iommu->v2)
flags); return -EACCES;
if (copy_from_user(&dirty, (void __user *)arg, minsz)) minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap, flags);
return -EFAULT;
if (dirty.argsz < minsz || dirty.flags & ~mask) if (copy_from_user(&dirty, (void __user *)arg, minsz))
return -EINVAL; return -EFAULT;
/* only one flag should be set at a time */ if (dirty.argsz < minsz || dirty.flags & ~mask)
if (__ffs(dirty.flags) != __fls(dirty.flags)) return -EINVAL;
return -EINVAL;
if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) { /* only one flag should be set at a time */
size_t pgsize; if (__ffs(dirty.flags) != __fls(dirty.flags))
return -EINVAL;
mutex_lock(&iommu->lock); if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) {
pgsize = 1 << __ffs(iommu->pgsize_bitmap); size_t pgsize;
if (!iommu->dirty_page_tracking) {
ret = vfio_dma_bitmap_alloc_all(iommu, pgsize);
if (!ret)
iommu->dirty_page_tracking = true;
}
mutex_unlock(&iommu->lock);
return ret;
} else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) {
mutex_lock(&iommu->lock);
if (iommu->dirty_page_tracking) {
iommu->dirty_page_tracking = false;
vfio_dma_bitmap_free_all(iommu);
}
mutex_unlock(&iommu->lock);
return 0;
} else if (dirty.flags &
VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) {
struct vfio_iommu_type1_dirty_bitmap_get range;
unsigned long pgshift;
size_t data_size = dirty.argsz - minsz;
size_t iommu_pgsize;
if (!data_size || data_size < sizeof(range))
return -EINVAL;
if (copy_from_user(&range, (void __user *)(arg + minsz),
sizeof(range)))
return -EFAULT;
if (range.iova + range.size < range.iova) mutex_lock(&iommu->lock);
return -EINVAL; pgsize = 1 << __ffs(iommu->pgsize_bitmap);
if (!access_ok((void __user *)range.bitmap.data, if (!iommu->dirty_page_tracking) {
range.bitmap.size)) ret = vfio_dma_bitmap_alloc_all(iommu, pgsize);
return -EINVAL; if (!ret)
iommu->dirty_page_tracking = true;
}
mutex_unlock(&iommu->lock);
return ret;
} else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) {
mutex_lock(&iommu->lock);
if (iommu->dirty_page_tracking) {
iommu->dirty_page_tracking = false;
vfio_dma_bitmap_free_all(iommu);
}
mutex_unlock(&iommu->lock);
return 0;
} else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) {
struct vfio_iommu_type1_dirty_bitmap_get range;
unsigned long pgshift;
size_t data_size = dirty.argsz - minsz;
size_t iommu_pgsize;
pgshift = __ffs(range.bitmap.pgsize); if (!data_size || data_size < sizeof(range))
ret = verify_bitmap_size(range.size >> pgshift, return -EINVAL;
range.bitmap.size);
if (ret)
return ret;
mutex_lock(&iommu->lock); if (copy_from_user(&range, (void __user *)(arg + minsz),
sizeof(range)))
return -EFAULT;
iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap); if (range.iova + range.size < range.iova)
return -EINVAL;
if (!access_ok((void __user *)range.bitmap.data,
range.bitmap.size))
return -EINVAL;
/* allow only smallest supported pgsize */ pgshift = __ffs(range.bitmap.pgsize);
if (range.bitmap.pgsize != iommu_pgsize) { ret = verify_bitmap_size(range.size >> pgshift,
ret = -EINVAL; range.bitmap.size);
goto out_unlock; if (ret)
} return ret;
if (range.iova & (iommu_pgsize - 1)) {
ret = -EINVAL;
goto out_unlock;
}
if (!range.size || range.size & (iommu_pgsize - 1)) {
ret = -EINVAL;
goto out_unlock;
}
if (iommu->dirty_page_tracking) mutex_lock(&iommu->lock);
ret = vfio_iova_dirty_bitmap(range.bitmap.data,
iommu, range.iova, range.size,
range.bitmap.pgsize);
else
ret = -EINVAL;
out_unlock:
mutex_unlock(&iommu->lock);
return ret; iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
/* allow only smallest supported pgsize */
if (range.bitmap.pgsize != iommu_pgsize) {
ret = -EINVAL;
goto out_unlock;
}
if (range.iova & (iommu_pgsize - 1)) {
ret = -EINVAL;
goto out_unlock;
}
if (!range.size || range.size & (iommu_pgsize - 1)) {
ret = -EINVAL;
goto out_unlock;
} }
if (iommu->dirty_page_tracking)
ret = vfio_iova_dirty_bitmap(range.bitmap.data,
iommu, range.iova,
range.size,
range.bitmap.pgsize);
else
ret = -EINVAL;
out_unlock:
mutex_unlock(&iommu->lock);
return ret;
} }
return -ENOTTY; return -EINVAL;
}
static long vfio_iommu_type1_ioctl(void *iommu_data,
unsigned int cmd, unsigned long arg)
{
struct vfio_iommu *iommu = iommu_data;
switch (cmd) {
case VFIO_CHECK_EXTENSION:
return vfio_iommu_type1_check_extension(iommu, arg);
case VFIO_IOMMU_GET_INFO:
return vfio_iommu_type1_get_info(iommu, arg);
case VFIO_IOMMU_MAP_DMA:
return vfio_iommu_type1_map_dma(iommu, arg);
case VFIO_IOMMU_UNMAP_DMA:
return vfio_iommu_type1_unmap_dma(iommu, arg);
case VFIO_IOMMU_DIRTY_PAGES:
return vfio_iommu_type1_dirty_pages(iommu, arg);
default:
return -ENOTTY;
}
} }
static int vfio_iommu_type1_register_notifier(void *iommu_data, static int vfio_iommu_type1_register_notifier(void *iommu_data,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment