Commit ea0640fc authored by Francois Dugast's avatar Francois Dugast Committed by Rodrigo Vivi

drm/xe/uapi: Separate VM_BIND's operation and flag

Use different members in the drm_xe_vm_bind_op for op and for flags as
it is done in other structures.

Type is left to u32 to leave enough room for future operations and flags.

v2: Remove the XE_VM_BIND_* flags shift (Rodrigo Vivi)

Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/303Signed-off-by: default avatarFrancois Dugast <francois.dugast@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
parent 7793d00d
...@@ -2282,11 +2282,11 @@ static void vm_set_async_error(struct xe_vm *vm, int err) ...@@ -2282,11 +2282,11 @@ static void vm_set_async_error(struct xe_vm *vm, int err)
} }
static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo, static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
u64 addr, u64 range, u32 op) u64 addr, u64 range, u32 op, u32 flags)
{ {
struct xe_device *xe = vm->xe; struct xe_device *xe = vm->xe;
struct xe_vma *vma; struct xe_vma *vma;
bool async = !!(op & XE_VM_BIND_FLAG_ASYNC); bool async = !!(flags & XE_VM_BIND_FLAG_ASYNC);
lockdep_assert_held(&vm->lock); lockdep_assert_held(&vm->lock);
...@@ -2387,7 +2387,7 @@ static void print_op(struct xe_device *xe, struct drm_gpuva_op *op) ...@@ -2387,7 +2387,7 @@ static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
static struct drm_gpuva_ops * static struct drm_gpuva_ops *
vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
u64 bo_offset_or_userptr, u64 addr, u64 range, u64 bo_offset_or_userptr, u64 addr, u64 range,
u32 operation, u8 tile_mask, u32 region) u32 operation, u32 flags, u8 tile_mask, u32 region)
{ {
struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL; struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
struct drm_gpuva_ops *ops; struct drm_gpuva_ops *ops;
...@@ -2416,10 +2416,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, ...@@ -2416,10 +2416,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
op->tile_mask = tile_mask; op->tile_mask = tile_mask;
op->map.immediate = op->map.immediate =
operation & XE_VM_BIND_FLAG_IMMEDIATE; flags & XE_VM_BIND_FLAG_IMMEDIATE;
op->map.read_only = op->map.read_only =
operation & XE_VM_BIND_FLAG_READONLY; flags & XE_VM_BIND_FLAG_READONLY;
op->map.is_null = operation & XE_VM_BIND_FLAG_NULL; op->map.is_null = flags & XE_VM_BIND_FLAG_NULL;
} }
break; break;
case XE_VM_BIND_OP_UNMAP: case XE_VM_BIND_OP_UNMAP:
...@@ -3236,15 +3236,16 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, ...@@ -3236,15 +3236,16 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
u64 range = (*bind_ops)[i].range; u64 range = (*bind_ops)[i].range;
u64 addr = (*bind_ops)[i].addr; u64 addr = (*bind_ops)[i].addr;
u32 op = (*bind_ops)[i].op; u32 op = (*bind_ops)[i].op;
u32 flags = (*bind_ops)[i].flags;
u32 obj = (*bind_ops)[i].obj; u32 obj = (*bind_ops)[i].obj;
u64 obj_offset = (*bind_ops)[i].obj_offset; u64 obj_offset = (*bind_ops)[i].obj_offset;
u32 region = (*bind_ops)[i].region; u32 region = (*bind_ops)[i].region;
bool is_null = op & XE_VM_BIND_FLAG_NULL; bool is_null = flags & XE_VM_BIND_FLAG_NULL;
if (i == 0) { if (i == 0) {
*async = !!(op & XE_VM_BIND_FLAG_ASYNC); *async = !!(flags & XE_VM_BIND_FLAG_ASYNC);
} else if (XE_IOCTL_DBG(xe, !*async) || } else if (XE_IOCTL_DBG(xe, !*async) ||
XE_IOCTL_DBG(xe, !(op & XE_VM_BIND_FLAG_ASYNC)) || XE_IOCTL_DBG(xe, !(flags & XE_VM_BIND_FLAG_ASYNC)) ||
XE_IOCTL_DBG(xe, VM_BIND_OP(op) == XE_IOCTL_DBG(xe, VM_BIND_OP(op) ==
XE_VM_BIND_OP_RESTART)) { XE_VM_BIND_OP_RESTART)) {
err = -EINVAL; err = -EINVAL;
...@@ -3265,7 +3266,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, ...@@ -3265,7 +3266,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
if (XE_IOCTL_DBG(xe, VM_BIND_OP(op) > if (XE_IOCTL_DBG(xe, VM_BIND_OP(op) >
XE_VM_BIND_OP_PREFETCH) || XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, op & ~SUPPORTED_FLAGS) || XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
XE_IOCTL_DBG(xe, obj && is_null) || XE_IOCTL_DBG(xe, obj && is_null) ||
XE_IOCTL_DBG(xe, obj_offset && is_null) || XE_IOCTL_DBG(xe, obj_offset && is_null) ||
XE_IOCTL_DBG(xe, VM_BIND_OP(op) != XE_VM_BIND_OP_MAP && XE_IOCTL_DBG(xe, VM_BIND_OP(op) != XE_VM_BIND_OP_MAP &&
...@@ -3480,8 +3481,9 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -3480,8 +3481,9 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u64 range = bind_ops[i].range; u64 range = bind_ops[i].range;
u64 addr = bind_ops[i].addr; u64 addr = bind_ops[i].addr;
u32 op = bind_ops[i].op; u32 op = bind_ops[i].op;
u32 flags = bind_ops[i].flags;
err = vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op); err = vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op, flags);
if (err) if (err)
goto free_syncs; goto free_syncs;
} }
...@@ -3490,13 +3492,14 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -3490,13 +3492,14 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u64 range = bind_ops[i].range; u64 range = bind_ops[i].range;
u64 addr = bind_ops[i].addr; u64 addr = bind_ops[i].addr;
u32 op = bind_ops[i].op; u32 op = bind_ops[i].op;
u32 flags = bind_ops[i].flags;
u64 obj_offset = bind_ops[i].obj_offset; u64 obj_offset = bind_ops[i].obj_offset;
u8 tile_mask = bind_ops[i].tile_mask; u8 tile_mask = bind_ops[i].tile_mask;
u32 region = bind_ops[i].region; u32 region = bind_ops[i].region;
ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset, ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
addr, range, op, tile_mask, addr, range, op, flags,
region); tile_mask, region);
if (IS_ERR(ops[i])) { if (IS_ERR(ops[i])) {
err = PTR_ERR(ops[i]); err = PTR_ERR(ops[i]);
ops[i] = NULL; ops[i] = NULL;
......
...@@ -660,8 +660,10 @@ struct drm_xe_vm_bind_op { ...@@ -660,8 +660,10 @@ struct drm_xe_vm_bind_op {
#define XE_VM_BIND_OP_RESTART 0x3 #define XE_VM_BIND_OP_RESTART 0x3
#define XE_VM_BIND_OP_UNMAP_ALL 0x4 #define XE_VM_BIND_OP_UNMAP_ALL 0x4
#define XE_VM_BIND_OP_PREFETCH 0x5 #define XE_VM_BIND_OP_PREFETCH 0x5
/** @op: Bind operation to perform */
__u32 op;
#define XE_VM_BIND_FLAG_READONLY (0x1 << 16) #define XE_VM_BIND_FLAG_READONLY (0x1 << 0)
/* /*
* A bind ops completions are always async, hence the support for out * A bind ops completions are always async, hence the support for out
* sync. This flag indicates the allocation of the memory for new page * sync. This flag indicates the allocation of the memory for new page
...@@ -686,12 +688,12 @@ struct drm_xe_vm_bind_op { ...@@ -686,12 +688,12 @@ struct drm_xe_vm_bind_op {
* configured in the VM and must be set if the VM is configured with * configured in the VM and must be set if the VM is configured with
* DRM_XE_VM_CREATE_ASYNC_BIND_OPS and not in an error state. * DRM_XE_VM_CREATE_ASYNC_BIND_OPS and not in an error state.
*/ */
#define XE_VM_BIND_FLAG_ASYNC (0x1 << 17) #define XE_VM_BIND_FLAG_ASYNC (0x1 << 1)
/* /*
* Valid on a faulting VM only, do the MAP operation immediately rather * Valid on a faulting VM only, do the MAP operation immediately rather
* than deferring the MAP to the page fault handler. * than deferring the MAP to the page fault handler.
*/ */
#define XE_VM_BIND_FLAG_IMMEDIATE (0x1 << 18) #define XE_VM_BIND_FLAG_IMMEDIATE (0x1 << 2)
/* /*
* When the NULL flag is set, the page tables are setup with a special * When the NULL flag is set, the page tables are setup with a special
* bit which indicates writes are dropped and all reads return zero. In * bit which indicates writes are dropped and all reads return zero. In
...@@ -699,9 +701,9 @@ struct drm_xe_vm_bind_op { ...@@ -699,9 +701,9 @@ struct drm_xe_vm_bind_op {
* operations, the BO handle MBZ, and the BO offset MBZ. This flag is * operations, the BO handle MBZ, and the BO offset MBZ. This flag is
* intended to implement VK sparse bindings. * intended to implement VK sparse bindings.
*/ */
#define XE_VM_BIND_FLAG_NULL (0x1 << 19) #define XE_VM_BIND_FLAG_NULL (0x1 << 3)
/** @op: Operation to perform (lower 16 bits) and flags (upper 16 bits) */ /** @flags: Bind flags */
__u32 op; __u32 flags;
/** @mem_region: Memory region to prefetch VMA to, instance not a mask */ /** @mem_region: Memory region to prefetch VMA to, instance not a mask */
__u32 region; __u32 region;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment