Commit 31ced035 authored by Francois Dugast's avatar Francois Dugast Committed by Lucas De Marchi

drm/xe/uapi: Restore flags VM_BIND_FLAG_READONLY and VM_BIND_FLAG_IMMEDIATE

The commit 84a1ed5e ("drm/xe/uapi: Remove unused flags") is partially
reverted. At the time, flags not used by user space were removed during
cleanup. Some flags now needed by the compute runtime are brought back in
this commit:
- DRM_XE_VM_BIND_FLAG_READONLY is used to write protect kernel ISA thus
preventing accidental overwrites.
- DRM_XE_VM_BIND_FLAG_IMMEDIATE is used to trigger mapping at the time of
binding in order to prevent faulting at execution time.

The changes in the compute runtime are ready and approved, see link below.

v2: Include a link to the PR in the commit message (Matthew Brost)

v3: Update kernel doc and improve commit message (Lucas De Marchi)

Cc: Mateusz Jablonski <mateusz.jablonski@intel.com>
Cc: Michal Mrozek <michal.mrozek@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://github.com/intel/compute-runtime/pull/717Signed-off-by: default avatarFrancois Dugast <francois.dugast@intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240329124403.7-1-francois.dugast@intel.comSigned-off-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
parent b611dad0
...@@ -2213,6 +2213,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, ...@@ -2213,6 +2213,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
struct xe_vma_op *op = gpuva_op_to_vma_op(__op); struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
if (__op->op == DRM_GPUVA_OP_MAP) { if (__op->op == DRM_GPUVA_OP_MAP) {
op->map.immediate =
flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE;
op->map.read_only =
flags & DRM_XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL; op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL;
op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE; op->map.dumpable = flags & DRM_XE_VM_BIND_FLAG_DUMPABLE;
op->map.pat_index = pat_index; op->map.pat_index = pat_index;
...@@ -2407,6 +2411,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, ...@@ -2407,6 +2411,8 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q,
switch (op->base.op) { switch (op->base.op) {
case DRM_GPUVA_OP_MAP: case DRM_GPUVA_OP_MAP:
{ {
flags |= op->map.read_only ?
VMA_CREATE_FLAG_READ_ONLY : 0;
flags |= op->map.is_null ? flags |= op->map.is_null ?
VMA_CREATE_FLAG_IS_NULL : 0; VMA_CREATE_FLAG_IS_NULL : 0;
flags |= op->map.dumpable ? flags |= op->map.dumpable ?
...@@ -2551,7 +2557,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm, ...@@ -2551,7 +2557,7 @@ static int op_execute(struct drm_exec *exec, struct xe_vm *vm,
case DRM_GPUVA_OP_MAP: case DRM_GPUVA_OP_MAP:
err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma), err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma),
op->syncs, op->num_syncs, op->syncs, op->num_syncs,
!xe_vm_in_fault_mode(vm), op->map.immediate || !xe_vm_in_fault_mode(vm),
op->flags & XE_VMA_OP_FIRST, op->flags & XE_VMA_OP_FIRST,
op->flags & XE_VMA_OP_LAST); op->flags & XE_VMA_OP_LAST);
break; break;
...@@ -2826,7 +2832,10 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm, ...@@ -2826,7 +2832,10 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
return 0; return 0;
} }
#define SUPPORTED_FLAGS (DRM_XE_VM_BIND_FLAG_NULL | \ #define SUPPORTED_FLAGS \
(DRM_XE_VM_BIND_FLAG_READONLY | \
DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
DRM_XE_VM_BIND_FLAG_NULL | \
DRM_XE_VM_BIND_FLAG_DUMPABLE) DRM_XE_VM_BIND_FLAG_DUMPABLE)
#define XE_64K_PAGE_MASK 0xffffull #define XE_64K_PAGE_MASK 0xffffull
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP) #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
......
...@@ -276,6 +276,10 @@ struct xe_vm { ...@@ -276,6 +276,10 @@ struct xe_vm {
struct xe_vma_op_map { struct xe_vma_op_map {
/** @vma: VMA to map */ /** @vma: VMA to map */
struct xe_vma *vma; struct xe_vma *vma;
/** @immediate: Immediate bind */
bool immediate;
/** @read_only: Read only */
bool read_only;
/** @is_null: is NULL binding */ /** @is_null: is NULL binding */
bool is_null; bool is_null;
/** @dumpable: whether BO is dumped on GPU hang */ /** @dumpable: whether BO is dumped on GPU hang */
......
...@@ -871,6 +871,12 @@ struct drm_xe_vm_destroy { ...@@ -871,6 +871,12 @@ struct drm_xe_vm_destroy {
* - %DRM_XE_VM_BIND_OP_PREFETCH * - %DRM_XE_VM_BIND_OP_PREFETCH
* *
* and the @flags can be: * and the @flags can be:
* - %DRM_XE_VM_BIND_FLAG_READONLY - Setup the page tables as read-only
* to ensure write protection
* - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - On a faulting VM, do the
* MAP operation immediately rather than deferring the MAP to the page
* fault handler. This is implied on a non-faulting VM as there is no
* fault handler to defer to.
* - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page * - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
* tables are setup with a special bit which indicates writes are * tables are setup with a special bit which indicates writes are
* dropped and all reads return zero. In the future, the NULL flags * dropped and all reads return zero. In the future, the NULL flags
...@@ -963,6 +969,8 @@ struct drm_xe_vm_bind_op { ...@@ -963,6 +969,8 @@ struct drm_xe_vm_bind_op {
/** @op: Bind operation to perform */ /** @op: Bind operation to perform */
__u32 op; __u32 op;
#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0)
#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1)
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2) #define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
#define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3) #define DRM_XE_VM_BIND_FLAG_DUMPABLE (1 << 3)
/** @flags: Bind flags */ /** @flags: Bind flags */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment