Commit fcd75139 authored by Lucas De Marchi's avatar Lucas De Marchi Committed by Rodrigo Vivi

drm/xe: Use pat_index to encode pde/pte

Change the xelp_pte_encode() and xelp_pde_encode() functions to use the
platform-dependent pat_index.  The same function can be used for all
platforms as they only need to encode the pat_index bits in the same
pte/pde layout. For platforms that don't have the most significant bit,
as long as they don't return a bogus index they should be fine.

v2: Use the same logic to encode pde as it's compatible with previous
    logic, it's more future proof and also fixes the cache setting for
    PVC (Matt Roper)
Reviewed-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Link: https://lore.kernel.org/r/20230927193902.2849159-10-lucas.demarchi@intel.comSigned-off-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 0d68247e
...@@ -48,10 +48,10 @@ ...@@ -48,10 +48,10 @@
#define XE_BO_INTERNAL_TEST BIT(30) #define XE_BO_INTERNAL_TEST BIT(30)
#define XE_BO_INTERNAL_64K BIT(31) #define XE_BO_INTERNAL_64K BIT(31)
#define PPAT_UNCACHED GENMASK_ULL(4, 3) #define XELPG_PPGTT_PTE_PAT3 BIT_ULL(62)
#define PPAT_CACHED_PDE 0 #define XE_PPGTT_PTE_PAT2 BIT_ULL(7)
#define PPAT_CACHED BIT_ULL(7) #define XE_PPGTT_PTE_PAT1 BIT_ULL(4)
#define PPAT_DISPLAY_ELLC BIT_ULL(4) #define XE_PPGTT_PTE_PAT0 BIT_ULL(3)
#define XE_PTE_SHIFT 12 #define XE_PTE_SHIFT 12
#define XE_PAGE_SIZE (1 << XE_PTE_SHIFT) #define XE_PAGE_SIZE (1 << XE_PTE_SHIFT)
......
...@@ -261,7 +261,8 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, ...@@ -261,7 +261,8 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
level = 2; level = 2;
ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8; ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8;
flags = vm->pt_ops->pte_encode_addr(0, XE_CACHE_WB, level, true, 0); flags = vm->pt_ops->pte_encode_addr(xe, 0, XE_CACHE_WB, level,
true, 0);
/* /*
* Use 1GB pages, it shouldn't matter the physical amount of * Use 1GB pages, it shouldn't matter the physical amount of
...@@ -498,7 +499,8 @@ static void emit_pte(struct xe_migrate *m, ...@@ -498,7 +499,8 @@ static void emit_pte(struct xe_migrate *m,
devmem = true; devmem = true;
} }
addr = m->q->vm->pt_ops->pte_encode_addr(addr, XE_CACHE_WB, addr = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
addr, XE_CACHE_WB,
0, devmem, flags); 0, devmem, flags);
bb->cs[bb->len++] = lower_32_bits(addr); bb->cs[bb->len++] = lower_32_bits(addr);
bb->cs[bb->len++] = upper_32_bits(addr); bb->cs[bb->len++] = upper_32_bits(addr);
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include "xe_pt_walk.h" #include "xe_pt_walk.h"
struct xe_bo; struct xe_bo;
struct xe_device;
struct xe_vma; struct xe_vma;
enum xe_cache_level { enum xe_cache_level {
...@@ -40,7 +41,8 @@ struct xe_pt_ops { ...@@ -40,7 +41,8 @@ struct xe_pt_ops {
enum xe_cache_level cache, u32 pt_level); enum xe_cache_level cache, u32 pt_level);
u64 (*pte_encode_vma)(u64 pte, struct xe_vma *vma, u64 (*pte_encode_vma)(u64 pte, struct xe_vma *vma,
enum xe_cache_level cache, u32 pt_level); enum xe_cache_level cache, u32 pt_level);
u64 (*pte_encode_addr)(u64 addr, enum xe_cache_level cache, u64 (*pte_encode_addr)(struct xe_device *xe, u64 addr,
enum xe_cache_level cache,
u32 pt_level, bool devmem, u64 flags); u32 pt_level, bool devmem, u64 flags);
u64 (*pde_encode_bo)(struct xe_bo *bo, u64 bo_offset, u64 (*pde_encode_bo)(struct xe_bo *bo, u64 bo_offset,
const enum xe_cache_level cache); const enum xe_cache_level cache);
......
...@@ -1210,27 +1210,38 @@ static struct drm_gpuvm_ops gpuvm_ops = { ...@@ -1210,27 +1210,38 @@ static struct drm_gpuvm_ops gpuvm_ops = {
.vm_free = xe_vm_free, .vm_free = xe_vm_free,
}; };
static u64 pde_encode_cache(enum xe_cache_level cache) static u64 pde_encode_cache(struct xe_device *xe, enum xe_cache_level cache)
{ {
/* FIXME: I don't think the PPAT handling is correct for MTL */ u32 pat_index = xe->pat.idx[cache];
u64 pte = 0;
if (cache != XE_CACHE_NONE) if (pat_index & BIT(0))
return PPAT_CACHED_PDE; pte |= XE_PPGTT_PTE_PAT0;
return PPAT_UNCACHED; if (pat_index & BIT(1))
pte |= XE_PPGTT_PTE_PAT1;
return pte;
} }
static u64 pte_encode_cache(enum xe_cache_level cache) static u64 pte_encode_cache(struct xe_device *xe, enum xe_cache_level cache)
{ {
/* FIXME: I don't think the PPAT handling is correct for MTL */ u32 pat_index = xe->pat.idx[cache];
switch (cache) { u64 pte = 0;
case XE_CACHE_NONE:
return PPAT_UNCACHED; if (pat_index & BIT(0))
case XE_CACHE_WT: pte |= XE_PPGTT_PTE_PAT0;
return PPAT_DISPLAY_ELLC;
default: if (pat_index & BIT(1))
return PPAT_CACHED; pte |= XE_PPGTT_PTE_PAT1;
}
if (pat_index & BIT(2))
pte |= XE_PPGTT_PTE_PAT2;
if (pat_index & BIT(3))
pte |= XELPG_PPGTT_PTE_PAT3;
return pte;
} }
static u64 pte_encode_ps(u32 pt_level) static u64 pte_encode_ps(u32 pt_level)
...@@ -1248,11 +1259,12 @@ static u64 pte_encode_ps(u32 pt_level) ...@@ -1248,11 +1259,12 @@ static u64 pte_encode_ps(u32 pt_level)
static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset, static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
const enum xe_cache_level cache) const enum xe_cache_level cache)
{ {
struct xe_device *xe = xe_bo_device(bo);
u64 pde; u64 pde;
pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE); pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
pde |= XE_PAGE_PRESENT | XE_PAGE_RW; pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
pde |= pde_encode_cache(cache); pde |= pde_encode_cache(xe, cache);
return pde; return pde;
} }
...@@ -1260,11 +1272,12 @@ static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset, ...@@ -1260,11 +1272,12 @@ static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset, static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
enum xe_cache_level cache, u32 pt_level) enum xe_cache_level cache, u32 pt_level)
{ {
struct xe_device *xe = xe_bo_device(bo);
u64 pte; u64 pte;
pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE); pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
pte |= XE_PAGE_PRESENT | XE_PAGE_RW; pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
pte |= pte_encode_cache(cache); pte |= pte_encode_cache(xe, cache);
pte |= pte_encode_ps(pt_level); pte |= pte_encode_ps(pt_level);
if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo)) if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
...@@ -1276,12 +1289,14 @@ static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset, ...@@ -1276,12 +1289,14 @@ static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma, static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
enum xe_cache_level cache, u32 pt_level) enum xe_cache_level cache, u32 pt_level)
{ {
struct xe_device *xe = xe_vma_vm(vma)->xe;
pte |= XE_PAGE_PRESENT; pte |= XE_PAGE_PRESENT;
if (likely(!xe_vma_read_only(vma))) if (likely(!xe_vma_read_only(vma)))
pte |= XE_PAGE_RW; pte |= XE_PAGE_RW;
pte |= pte_encode_cache(cache); pte |= pte_encode_cache(xe, cache);
pte |= pte_encode_ps(pt_level); pte |= pte_encode_ps(pt_level);
if (unlikely(xe_vma_is_null(vma))) if (unlikely(xe_vma_is_null(vma)))
...@@ -1290,7 +1305,8 @@ static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma, ...@@ -1290,7 +1305,8 @@ static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
return pte; return pte;
} }
static u64 xelp_pte_encode_addr(u64 addr, enum xe_cache_level cache, static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
enum xe_cache_level cache,
u32 pt_level, bool devmem, u64 flags) u32 pt_level, bool devmem, u64 flags)
{ {
u64 pte; u64 pte;
...@@ -1300,7 +1316,7 @@ static u64 xelp_pte_encode_addr(u64 addr, enum xe_cache_level cache, ...@@ -1300,7 +1316,7 @@ static u64 xelp_pte_encode_addr(u64 addr, enum xe_cache_level cache,
pte = addr; pte = addr;
pte |= XE_PAGE_PRESENT | XE_PAGE_RW; pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
pte |= pte_encode_cache(cache); pte |= pte_encode_cache(xe, cache);
pte |= pte_encode_ps(pt_level); pte |= pte_encode_ps(pt_level);
if (devmem) if (devmem)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment