Commit 3534b18c authored by Matthew Brost's avatar Matthew Brost Committed by Rodrigo Vivi

drm/xe: s/XE_PTE_READ_ONLY/XE_PTE_FLAG_READ_ONLY

This define is for internal PTE flags rather than fields in the hardware
PTEs, rename as such. This will help in an upcoming patch to avoid
further confusion.
Reviewed-by: default avatarFrancois Dugast <francois.dugast@intel.com>
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 5e3220de
...@@ -65,7 +65,7 @@ ...@@ -65,7 +65,7 @@
#define XE_PAGE_PRESENT BIT_ULL(0) #define XE_PAGE_PRESENT BIT_ULL(0)
#define XE_PAGE_RW BIT_ULL(1) #define XE_PAGE_RW BIT_ULL(1)
#define XE_PTE_READ_ONLY BIT(0) #define XE_PTE_FLAG_READ_ONLY BIT(0)
#define XE_PL_SYSTEM TTM_PL_SYSTEM #define XE_PL_SYSTEM TTM_PL_SYSTEM
#define XE_PL_TT TTM_PL_TT #define XE_PL_TT TTM_PL_TT
......
...@@ -102,7 +102,7 @@ static u64 __gen8_pte_encode(u64 pte, enum xe_cache_level cache, u32 flags, ...@@ -102,7 +102,7 @@ static u64 __gen8_pte_encode(u64 pte, enum xe_cache_level cache, u32 flags,
{ {
pte |= XE_PAGE_PRESENT | XE_PAGE_RW; pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
if (unlikely(flags & XE_PTE_READ_ONLY)) if (unlikely(flags & XE_PTE_FLAG_READ_ONLY))
pte &= ~XE_PAGE_RW; pte &= ~XE_PAGE_RW;
/* FIXME: I don't think the PPAT handling is correct for MTL */ /* FIXME: I don't think the PPAT handling is correct for MTL */
......
...@@ -61,7 +61,7 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma) ...@@ -61,7 +61,7 @@ int xe_vma_userptr_pin_pages(struct xe_vma *vma)
bool in_kthread = !current->mm; bool in_kthread = !current->mm;
unsigned long notifier_seq; unsigned long notifier_seq;
int pinned, ret, i; int pinned, ret, i;
bool read_only = vma->pte_flags & XE_PTE_READ_ONLY; bool read_only = vma->pte_flags & XE_PTE_FLAG_READ_ONLY;
lockdep_assert_held(&vm->lock); lockdep_assert_held(&vm->lock);
XE_BUG_ON(!xe_vma_is_userptr(vma)); XE_BUG_ON(!xe_vma_is_userptr(vma));
...@@ -869,7 +869,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm, ...@@ -869,7 +869,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
vma->start = start; vma->start = start;
vma->end = end; vma->end = end;
if (read_only) if (read_only)
vma->pte_flags = XE_PTE_READ_ONLY; vma->pte_flags = XE_PTE_FLAG_READ_ONLY;
if (tile_mask) { if (tile_mask) {
vma->tile_mask = tile_mask; vma->tile_mask = tile_mask;
...@@ -923,7 +923,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma) ...@@ -923,7 +923,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
{ {
struct xe_vm *vm = vma->vm; struct xe_vm *vm = vma->vm;
struct xe_device *xe = vm->xe; struct xe_device *xe = vm->xe;
bool read_only = vma->pte_flags & XE_PTE_READ_ONLY; bool read_only = vma->pte_flags & XE_PTE_FLAG_READ_ONLY;
if (xe_vma_is_userptr(vma)) { if (xe_vma_is_userptr(vma)) {
if (vma->userptr.sg) { if (vma->userptr.sg) {
...@@ -2643,7 +2643,8 @@ static struct xe_vma *vm_unbind_lookup_vmas(struct xe_vm *vm, ...@@ -2643,7 +2643,8 @@ static struct xe_vma *vm_unbind_lookup_vmas(struct xe_vm *vm,
first->userptr.ptr, first->userptr.ptr,
first->start, first->start,
lookup->start - 1, lookup->start - 1,
(first->pte_flags & XE_PTE_READ_ONLY), (first->pte_flags &
XE_PTE_FLAG_READ_ONLY),
first->tile_mask); first->tile_mask);
if (first->bo) if (first->bo)
xe_bo_unlock(first->bo, &ww); xe_bo_unlock(first->bo, &ww);
...@@ -2674,7 +2675,8 @@ static struct xe_vma *vm_unbind_lookup_vmas(struct xe_vm *vm, ...@@ -2674,7 +2675,8 @@ static struct xe_vma *vm_unbind_lookup_vmas(struct xe_vm *vm,
last->userptr.ptr + chunk, last->userptr.ptr + chunk,
last->start + chunk, last->start + chunk,
last->end, last->end,
(last->pte_flags & XE_PTE_READ_ONLY), (last->pte_flags &
XE_PTE_FLAG_READ_ONLY),
last->tile_mask); last->tile_mask);
if (last->bo) if (last->bo)
xe_bo_unlock(last->bo, &ww); xe_bo_unlock(last->bo, &ww);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment