Commit e7192f01 authored by Nirmoy Das's avatar Nirmoy Das

drm/xe: Move vm bind bo validation to a helper function

Move vm bind bo validation to a helper function to make the
xe_vm_bind_ioctl() more readable.

v2: Capture ret value of xe_vm_bind_ioctl_validate_bo(Matt B).
    Remove redundant coh_mode param.
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarOak Zeng <oak.zeng@intel.com>
Reviewed-by: default avatarJosé Roberto de Souza <jose.souza@intel.com>
Acked-by: default avatarLionel Landwerlin <lionel.g.landwerlin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240430162529.21588-3-nirmoy.das@intel.comSigned-off-by: default avatarNirmoy Das <nirmoy.das@intel.com>
parent c462f81b
......@@ -3057,6 +3057,46 @@ static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
vops->num_syncs = num_syncs;
}
static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
u64 addr, u64 range, u64 obj_offset,
u16 pat_index)
{
u16 coh_mode;
if (XE_IOCTL_DBG(xe, range > bo->size) ||
XE_IOCTL_DBG(xe, obj_offset >
bo->size - range)) {
return -EINVAL;
}
if (bo->flags & XE_BO_FLAG_INTERNAL_64K) {
if (XE_IOCTL_DBG(xe, obj_offset &
XE_64K_PAGE_MASK) ||
XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
return -EINVAL;
}
}
coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
if (bo->cpu_caching) {
if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
return -EINVAL;
}
} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
/*
* Imported dma-buf from a different device should
* require 1way or 2way coherency since we don't know
* how it was mapped on the CPU. Just assume is it
* potentially cached on CPU side.
*/
return -EINVAL;
}
return 0;
}
int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
struct xe_device *xe = to_xe_device(dev);
......@@ -3140,7 +3180,6 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u32 obj = bind_ops[i].obj;
u64 obj_offset = bind_ops[i].obj_offset;
u16 pat_index = bind_ops[i].pat_index;
u16 coh_mode;
if (!obj)
continue;
......@@ -3152,40 +3191,10 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
bos[i] = gem_to_xe_bo(gem_obj);
if (XE_IOCTL_DBG(xe, range > bos[i]->size) ||
XE_IOCTL_DBG(xe, obj_offset >
bos[i]->size - range)) {
err = -EINVAL;
goto put_obj;
}
if (bos[i]->flags & XE_BO_FLAG_INTERNAL_64K) {
if (XE_IOCTL_DBG(xe, obj_offset &
XE_64K_PAGE_MASK) ||
XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) ||
XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) {
err = -EINVAL;
goto put_obj;
}
}
coh_mode = xe_pat_index_get_coh_mode(xe, pat_index);
if (bos[i]->cpu_caching) {
if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE &&
bos[i]->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) {
err = -EINVAL;
goto put_obj;
}
} else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) {
/*
* Imported dma-buf from a different device should
* require 1way or 2way coherency since we don't know
* how it was mapped on the CPU. Just assume is it
* potentially cached on CPU side.
*/
err = -EINVAL;
err = xe_vm_bind_ioctl_validate_bo(xe, bos[i], addr, range,
obj_offset, pat_index);
if (err)
goto put_obj;
}
}
if (args->num_syncs) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment