Commit 03fe4b87 authored by Andrzej Hajda's avatar Andrzej Hajda

drm/i915: Add WABB blit for Wa_16018031267 / Wa_16018063123

Apply WABB blit for Wa_16018031267 / Wa_16018063123.

v3: drop unused enum definition
v4: move selftest to separate patch, use wa only on BCS0.
v5: fixed selftest caller to context_wabb
Signed-off-by: default avatarNirmoy Das <nirmoy.das@intel.com>
Signed-off-by: default avatarJonathan Cavitt <jonathan.cavitt@intel.com>
Signed-off-by: default avatarAndrzej Hajda <andrzej.hajda@intel.com>
Reviewed-by: default avatarAndi Shyti <andi.shyti@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231026-wabb-v6-2-4aa7d55d0a8a@intel.com
parent 9bb66c17
......@@ -118,6 +118,9 @@
#define CCID_EXTENDED_STATE_RESTORE BIT(2)
#define CCID_EXTENDED_STATE_SAVE BIT(3)
#define RING_BB_PER_CTX_PTR(base) _MMIO((base) + 0x1c0) /* gen8+ */
#define PER_CTX_BB_FORCE BIT(2)
#define PER_CTX_BB_VALID BIT(0)
#define RING_INDIRECT_CTX(base) _MMIO((base) + 0x1c4) /* gen8+ */
#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base) + 0x1c8) /* gen8+ */
#define ECOSKPD(base) _MMIO((base) + 0x1d0)
......
......@@ -82,6 +82,10 @@ struct drm_printer;
##__VA_ARGS__); \
} while (0)
#define NEEDS_FASTCOLOR_BLT_WABB(engine) ( \
IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 55), IP_VER(12, 71)) && \
engine->class == COPY_ENGINE_CLASS && engine->instance == 0)
static inline bool gt_is_root(struct intel_gt *gt)
{
return !gt->info.id;
......
......@@ -828,6 +828,18 @@ lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
return 0;
}
static void
lrc_setup_bb_per_ctx(u32 *regs,
const struct intel_engine_cs *engine,
u32 ctx_bb_ggtt_addr)
{
GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1);
regs[lrc_ring_wa_bb_per_ctx(engine) + 1] =
ctx_bb_ggtt_addr |
PER_CTX_BB_FORCE |
PER_CTX_BB_VALID;
}
static void
lrc_setup_indirect_ctx(u32 *regs,
const struct intel_engine_cs *engine,
......@@ -1020,7 +1032,13 @@ static u32 context_wa_bb_offset(const struct intel_context *ce)
return PAGE_SIZE * ce->wa_bb_page;
}
static u32 *context_indirect_bb(const struct intel_context *ce)
/*
* per_ctx below determines which WABB section is used.
* When true, the function returns the location of the
* PER_CTX_BB. When false, the function returns the
* location of the INDIRECT_CTX.
*/
static u32 *context_wabb(const struct intel_context *ce, bool per_ctx)
{
void *ptr;
......@@ -1029,6 +1047,7 @@ static u32 *context_indirect_bb(const struct intel_context *ce)
ptr = ce->lrc_reg_state;
ptr -= LRC_STATE_OFFSET; /* back to start of context image */
ptr += context_wa_bb_offset(ce);
ptr += per_ctx ? PAGE_SIZE : 0;
return ptr;
}
......@@ -1105,7 +1124,8 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
if (GRAPHICS_VER(engine->i915) >= 12) {
ce->wa_bb_page = context_size / PAGE_SIZE;
context_size += PAGE_SIZE;
/* INDIRECT_CTX and PER_CTX_BB need separate pages. */
context_size += PAGE_SIZE * 2;
}
if (intel_context_is_parent(ce) && intel_engine_uses_guc(engine)) {
......@@ -1407,12 +1427,85 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
return gen12_emit_aux_table_inv(ce->engine, cs);
}
static u32 *xehp_emit_fastcolor_blt_wabb(const struct intel_context *ce, u32 *cs)
{
struct intel_gt *gt = ce->engine->gt;
int mocs = gt->mocs.uc_index << 1;
/**
* Wa_16018031267 / Wa_16018063123 requires that SW forces the
* main copy engine arbitration into round robin mode. We
* additionally need to submit the following WABB blt command
* to produce 4 subblits with each subblit generating 0 byte
* write requests as WABB:
*
* XY_FASTCOLOR_BLT
* BG0 -> 5100000E
* BG1 -> 0000003F (Dest pitch)
* BG2 -> 00000000 (X1, Y1) = (0, 0)
* BG3 -> 00040001 (X2, Y2) = (1, 4)
* BG4 -> scratch
* BG5 -> scratch
* BG6-12 -> 00000000
* BG13 -> 20004004 (Surf. Width= 2,Surf. Height = 5 )
* BG14 -> 00000010 (Qpitch = 4)
* BG15 -> 00000000
*/
*cs++ = XY_FAST_COLOR_BLT_CMD | (16 - 2);
*cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) | 0x3f;
*cs++ = 0;
*cs++ = 4 << 16 | 1;
*cs++ = lower_32_bits(i915_vma_offset(ce->vm->rsvd.vma));
*cs++ = upper_32_bits(i915_vma_offset(ce->vm->rsvd.vma));
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0;
*cs++ = 0x20004004;
*cs++ = 0x10;
*cs++ = 0;
return cs;
}
static u32 *
xehp_emit_per_ctx_bb(const struct intel_context *ce, u32 *cs)
{
/* Wa_16018031267, Wa_16018063123 */
if (NEEDS_FASTCOLOR_BLT_WABB(ce->engine))
cs = xehp_emit_fastcolor_blt_wabb(ce, cs);
return cs;
}
static void
setup_per_ctx_bb(const struct intel_context *ce,
const struct intel_engine_cs *engine,
u32 *(*emit)(const struct intel_context *, u32 *))
{
/* Place PER_CTX_BB on next page after INDIRECT_CTX */
u32 * const start = context_wabb(ce, true);
u32 *cs;
cs = emit(ce, start);
/* PER_CTX_BB must manually terminate */
*cs++ = MI_BATCH_BUFFER_END;
GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs));
lrc_setup_bb_per_ctx(ce->lrc_reg_state, engine,
lrc_indirect_bb(ce) + PAGE_SIZE);
}
static void
setup_indirect_ctx_bb(const struct intel_context *ce,
const struct intel_engine_cs *engine,
u32 *(*emit)(const struct intel_context *, u32 *))
{
u32 * const start = context_indirect_bb(ce);
u32 * const start = context_wabb(ce, false);
u32 *cs;
cs = emit(ce, start);
......@@ -1511,6 +1604,7 @@ u32 lrc_update_regs(const struct intel_context *ce,
/* Mutually exclusive wrt to global indirect bb */
GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size);
setup_indirect_ctx_bb(ce, engine, fn);
setup_per_ctx_bb(ce, engine, xehp_emit_per_ctx_bb);
}
return lrc_descriptor(ce) | CTX_DESC_FORCE_RESTORE;
......
......@@ -1596,7 +1596,7 @@ emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
static void
indirect_ctx_bb_setup(struct intel_context *ce)
{
u32 *cs = context_indirect_bb(ce);
u32 *cs = context_wabb(ce, false);
cs[CTX_BB_CANARY_INDEX] = 0xdeadf00d;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment