Commit 65ef8dba authored by Himal Prasad Ghimiray's avatar Himal Prasad Ghimiray Committed by Rodrigo Vivi

drm/xe/xe2: Update emit_pte to use compression enabled PAT index

For indirect accessed buffer use compression enabled PAT index.

v2:
 - Fix parameter name.

v3:
 - use a relevant define instead of fix number.

Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Signed-off-by: default avatarHimal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 09427526
...@@ -330,7 +330,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) ...@@ -330,7 +330,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
else else
xe_res_first_sg(xe_bo_sg(pt), 0, pt->size, &src_it); xe_res_first_sg(xe_bo_sg(pt), 0, pt->size, &src_it);
emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt), emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt), false,
&src_it, XE_PAGE_SIZE, pt); &src_it, XE_PAGE_SIZE, pt);
run_sanity_job(m, xe, bb, bb->len, "Writing PTE for our fake PT", test); run_sanity_job(m, xe, bb, bb->len, "Writing PTE for our fake PT", test);
......
...@@ -425,15 +425,24 @@ static u32 pte_update_size(struct xe_migrate *m, ...@@ -425,15 +425,24 @@ static u32 pte_update_size(struct xe_migrate *m,
static void emit_pte(struct xe_migrate *m, static void emit_pte(struct xe_migrate *m,
struct xe_bb *bb, u32 at_pt, struct xe_bb *bb, u32 at_pt,
bool is_vram, bool is_vram, bool is_comp_pte,
struct xe_res_cursor *cur, struct xe_res_cursor *cur,
u32 size, struct xe_bo *bo) u32 size, struct xe_bo *bo)
{ {
u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB]; struct xe_device *xe = tile_to_xe(m->tile);
u16 pat_index;
u32 ptes; u32 ptes;
u64 ofs = at_pt * XE_PAGE_SIZE; u64 ofs = at_pt * XE_PAGE_SIZE;
u64 cur_ofs; u64 cur_ofs;
/* Indirect access needs compression enabled uncached PAT index */
if (GRAPHICS_VERx100(xe) >= 2000)
pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
xe->pat.idx[XE_CACHE_NONE];
else
pat_index = xe->pat.idx[XE_CACHE_WB];
/* /*
* FIXME: Emitting VRAM PTEs to L0 PTs is forbidden. Currently * FIXME: Emitting VRAM PTEs to L0 PTs is forbidden. Currently
* we're only emitting VRAM PTEs during sanity tests, so when * we're only emitting VRAM PTEs during sanity tests, so when
...@@ -720,19 +729,19 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, ...@@ -720,19 +729,19 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
} }
if (!src_is_vram) if (!src_is_vram)
emit_pte(m, bb, src_L0_pt, src_is_vram, &src_it, src_L0, emit_pte(m, bb, src_L0_pt, src_is_vram, true, &src_it, src_L0,
src_bo); src_bo);
else else
xe_res_next(&src_it, src_L0); xe_res_next(&src_it, src_L0);
if (!dst_is_vram) if (!dst_is_vram)
emit_pte(m, bb, dst_L0_pt, dst_is_vram, &dst_it, src_L0, emit_pte(m, bb, dst_L0_pt, dst_is_vram, true, &dst_it, src_L0,
dst_bo); dst_bo);
else else
xe_res_next(&dst_it, src_L0); xe_res_next(&dst_it, src_L0);
if (copy_system_ccs) if (copy_system_ccs)
emit_pte(m, bb, ccs_pt, false, &ccs_it, ccs_size, src_bo); emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src_bo);
bb->cs[bb->len++] = MI_BATCH_BUFFER_END; bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len; update_idx = bb->len;
...@@ -965,7 +974,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m, ...@@ -965,7 +974,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
/* Preemption is enabled again by the ring ops. */ /* Preemption is enabled again by the ring ops. */
if (!clear_vram) { if (!clear_vram) {
emit_pte(m, bb, clear_L0_pt, clear_vram, &src_it, clear_L0, emit_pte(m, bb, clear_L0_pt, clear_vram, true, &src_it, clear_L0,
bo); bo);
} else { } else {
xe_res_next(&src_it, clear_L0); xe_res_next(&src_it, clear_L0);
......
...@@ -387,6 +387,7 @@ void xe_pat_init_early(struct xe_device *xe) ...@@ -387,6 +387,7 @@ void xe_pat_init_early(struct xe_device *xe)
xe->pat.idx[XE_CACHE_NONE] = 3; xe->pat.idx[XE_CACHE_NONE] = 3;
xe->pat.idx[XE_CACHE_WT] = 15; xe->pat.idx[XE_CACHE_WT] = 15;
xe->pat.idx[XE_CACHE_WB] = 2; xe->pat.idx[XE_CACHE_WB] = 2;
xe->pat.idx[XE_CACHE_NONE_COMPRESSION] = 12; /*Applicable on xe2 and beyond */
} else if (xe->info.platform == XE_METEORLAKE) { } else if (xe->info.platform == XE_METEORLAKE) {
xe->pat.ops = &xelpg_pat_ops; xe->pat.ops = &xelpg_pat_ops;
xe->pat.table = xelpg_pat_table; xe->pat.table = xelpg_pat_table;
......
...@@ -18,6 +18,7 @@ enum xe_cache_level { ...@@ -18,6 +18,7 @@ enum xe_cache_level {
XE_CACHE_NONE, XE_CACHE_NONE,
XE_CACHE_WT, XE_CACHE_WT,
XE_CACHE_WB, XE_CACHE_WB,
XE_CACHE_NONE_COMPRESSION, /*UC + COH_NONE + COMPRESSION */
__XE_CACHE_LEVEL_COUNT, __XE_CACHE_LEVEL_COUNT,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment