Commit 48ba4a6d authored by Lucas De Marchi's avatar Lucas De Marchi

drm/i915: Update IP_VER(12, 50)

With no platform using graphics/media IP_VER(12, 50), replace the
checks throughout the code with IP_VER(12, 55) so the code makes sense
by itself with no additional explanation of previous baggage.
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Acked-by: default avatarTvrtko Ursulin <tursulin@ursulin.net>
Link: https://patchwork.freedesktop.org/patch/msgid/20240320060543.4034215-5-lucas.demarchi@intel.comSigned-off-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
parent b183bdf2
...@@ -713,7 +713,7 @@ static int igt_ppgtt_huge_fill(void *arg) ...@@ -713,7 +713,7 @@ static int igt_ppgtt_huge_fill(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
unsigned int supported = RUNTIME_INFO(i915)->page_sizes; unsigned int supported = RUNTIME_INFO(i915)->page_sizes;
bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50); bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55);
struct i915_address_space *vm; struct i915_address_space *vm;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
unsigned long max_pages; unsigned long max_pages;
...@@ -857,7 +857,7 @@ static int igt_ppgtt_huge_fill(void *arg) ...@@ -857,7 +857,7 @@ static int igt_ppgtt_huge_fill(void *arg)
static int igt_ppgtt_64K(void *arg) static int igt_ppgtt_64K(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50); bool has_pte64 = GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55);
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_address_space *vm; struct i915_address_space *vm;
struct i915_gem_context *ctx; struct i915_gem_context *ctx;
......
...@@ -117,7 +117,7 @@ static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915) ...@@ -117,7 +117,7 @@ static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915)
if (gen < 12) if (gen < 12)
return true; return true;
if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
return false; return false;
return HAS_DISPLAY(i915); return HAS_DISPLAY(i915);
...@@ -166,7 +166,7 @@ static int prepare_blit(const struct tiled_blits *t, ...@@ -166,7 +166,7 @@ static int prepare_blit(const struct tiled_blits *t,
src_pitch = t->width; /* in dwords */ src_pitch = t->width; /* in dwords */
if (src->tiling == CLIENT_TILING_Y) { if (src->tiling == CLIENT_TILING_Y) {
src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR); src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(YMAJOR);
if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 50)) if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 55))
src_4t = XY_FAST_COPY_BLT_D1_SRC_TILE4; src_4t = XY_FAST_COPY_BLT_D1_SRC_TILE4;
} else if (src->tiling == CLIENT_TILING_X) { } else if (src->tiling == CLIENT_TILING_X) {
src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(TILE_X); src_tiles = XY_FAST_COPY_BLT_D0_SRC_TILE_MODE(TILE_X);
...@@ -177,7 +177,7 @@ static int prepare_blit(const struct tiled_blits *t, ...@@ -177,7 +177,7 @@ static int prepare_blit(const struct tiled_blits *t,
dst_pitch = t->width; /* in dwords */ dst_pitch = t->width; /* in dwords */
if (dst->tiling == CLIENT_TILING_Y) { if (dst->tiling == CLIENT_TILING_Y) {
dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR); dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(YMAJOR);
if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 50)) if (GRAPHICS_VER_FULL(to_i915(batch->base.dev)) >= IP_VER(12, 55))
dst_4t = XY_FAST_COPY_BLT_D1_DST_TILE4; dst_4t = XY_FAST_COPY_BLT_D1_DST_TILE4;
} else if (dst->tiling == CLIENT_TILING_X) { } else if (dst->tiling == CLIENT_TILING_X) {
dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(TILE_X); dst_tiles = XY_FAST_COPY_BLT_D0_DST_TILE_MODE(TILE_X);
...@@ -365,7 +365,7 @@ static u64 tiled_offset(const struct intel_gt *gt, ...@@ -365,7 +365,7 @@ static u64 tiled_offset(const struct intel_gt *gt,
v += x; v += x;
swizzle = gt->ggtt->bit_6_swizzle_x; swizzle = gt->ggtt->bit_6_swizzle_x;
} else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) { } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55)) {
/* Y-major tiling layout is Tile4 for Xe_HP and beyond */ /* Y-major tiling layout is Tile4 for Xe_HP and beyond */
v = linear_x_y_to_ftiled_pos(x_pos, y_pos, stride, 32); v = linear_x_y_to_ftiled_pos(x_pos, y_pos, stride, 32);
......
...@@ -827,7 +827,7 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs) ...@@ -827,7 +827,7 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
cs = gen12_emit_pipe_control(cs, 0, cs = gen12_emit_pipe_control(cs, 0,
PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0); PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0);
if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
/* Wa_1409600907 */ /* Wa_1409600907 */
flags |= PIPE_CONTROL_DEPTH_STALL; flags |= PIPE_CONTROL_DEPTH_STALL;
......
...@@ -765,14 +765,14 @@ static void engine_mask_apply_media_fuses(struct intel_gt *gt) ...@@ -765,14 +765,14 @@ static void engine_mask_apply_media_fuses(struct intel_gt *gt)
* and bits have disable semantices. * and bits have disable semantices.
*/ */
media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE); media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
if (MEDIA_VER_FULL(i915) < IP_VER(12, 50)) if (MEDIA_VER_FULL(i915) < IP_VER(12, 55))
media_fuse = ~media_fuse; media_fuse = ~media_fuse;
vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK; vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >> vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
GEN11_GT_VEBOX_DISABLE_SHIFT; GEN11_GT_VEBOX_DISABLE_SHIFT;
if (MEDIA_VER_FULL(i915) >= IP_VER(12, 50)) { if (MEDIA_VER_FULL(i915) >= IP_VER(12, 55)) {
fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1); fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1); gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
} else { } else {
...@@ -1193,7 +1193,6 @@ static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine) ...@@ -1193,7 +1193,6 @@ static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine)
if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 74) || if (GRAPHICS_VER_FULL(i915) == IP_VER(12, 74) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) || GRAPHICS_VER_FULL(i915) == IP_VER(12, 71) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 70) || GRAPHICS_VER_FULL(i915) == IP_VER(12, 70) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 50) ||
GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) { GRAPHICS_VER_FULL(i915) == IP_VER(12, 55)) {
regs = xehp_regs; regs = xehp_regs;
num = ARRAY_SIZE(xehp_regs); num = ARRAY_SIZE(xehp_regs);
......
...@@ -493,7 +493,7 @@ __execlists_schedule_in(struct i915_request *rq) ...@@ -493,7 +493,7 @@ __execlists_schedule_in(struct i915_request *rq)
/* Use a fixed tag for OA and friends */ /* Use a fixed tag for OA and friends */
GEM_BUG_ON(ce->tag <= BITS_PER_LONG); GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
ce->lrc.ccid = ce->tag; ce->lrc.ccid = ce->tag;
} else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) { } else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) {
/* We don't need a strict matching tag, just different values */ /* We don't need a strict matching tag, just different values */
unsigned int tag = ffs(READ_ONCE(engine->context_tag)); unsigned int tag = ffs(READ_ONCE(engine->context_tag));
...@@ -613,7 +613,7 @@ static void __execlists_schedule_out(struct i915_request * const rq, ...@@ -613,7 +613,7 @@ static void __execlists_schedule_out(struct i915_request * const rq,
intel_engine_add_retire(engine, ce->timeline); intel_engine_add_retire(engine, ce->timeline);
ccid = ce->lrc.ccid; ccid = ce->lrc.ccid;
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) { if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) {
ccid >>= XEHP_SW_CTX_ID_SHIFT - 32; ccid >>= XEHP_SW_CTX_ID_SHIFT - 32;
ccid &= XEHP_MAX_CONTEXT_HW_ID; ccid &= XEHP_MAX_CONTEXT_HW_ID;
} else { } else {
...@@ -1907,7 +1907,7 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive) ...@@ -1907,7 +1907,7 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n", ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
head, upper_32_bits(csb), lower_32_bits(csb)); head, upper_32_bits(csb), lower_32_bits(csb));
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
promote = xehp_csb_parse(csb); promote = xehp_csb_parse(csb);
else if (GRAPHICS_VER(engine->i915) >= 12) else if (GRAPHICS_VER(engine->i915) >= 12)
promote = gen12_csb_parse(csb); promote = gen12_csb_parse(csb);
...@@ -3479,7 +3479,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) ...@@ -3479,7 +3479,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
} }
} }
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) { if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) {
if (intel_engine_has_preemption(engine)) if (intel_engine_has_preemption(engine))
engine->emit_bb_start = xehp_emit_bb_start; engine->emit_bb_start = xehp_emit_bb_start;
else else
...@@ -3582,7 +3582,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) ...@@ -3582,7 +3582,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0); engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
if (GRAPHICS_VER(engine->i915) >= 11 && if (GRAPHICS_VER(engine->i915) >= 11 &&
GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 50)) { GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 55)) {
execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32); execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32); execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
} }
......
...@@ -278,7 +278,7 @@ intel_gt_clear_error_registers(struct intel_gt *gt, ...@@ -278,7 +278,7 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
intel_uncore_posting_read(uncore, intel_uncore_posting_read(uncore,
XELPMP_RING_FAULT_REG); XELPMP_RING_FAULT_REG);
} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
intel_gt_mcr_multicast_rmw(gt, XEHP_RING_FAULT_REG, intel_gt_mcr_multicast_rmw(gt, XEHP_RING_FAULT_REG,
RING_FAULT_VALID, 0); RING_FAULT_VALID, 0);
intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG); intel_gt_mcr_read_any(gt, XEHP_RING_FAULT_REG);
...@@ -403,7 +403,7 @@ void intel_gt_check_and_clear_faults(struct intel_gt *gt) ...@@ -403,7 +403,7 @@ void intel_gt_check_and_clear_faults(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915; struct drm_i915_private *i915 = gt->i915;
/* From GEN8 onwards we only have one 'All Engine Fault Register' */ /* From GEN8 onwards we only have one 'All Engine Fault Register' */
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
xehp_check_faults(gt); xehp_check_faults(gt);
else if (GRAPHICS_VER(i915) >= 8) else if (GRAPHICS_VER(i915) >= 8)
gen8_check_faults(gt); gen8_check_faults(gt);
......
...@@ -184,7 +184,7 @@ void intel_gt_mcr_init(struct intel_gt *gt) ...@@ -184,7 +184,7 @@ void intel_gt_mcr_init(struct intel_gt *gt)
* steering. * steering.
*/ */
} else if (GRAPHICS_VER(i915) >= 11 && } else if (GRAPHICS_VER(i915) >= 11 &&
GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) { GRAPHICS_VER_FULL(i915) < IP_VER(12, 55)) {
gt->steering_table[L3BANK] = icl_l3bank_steering_table; gt->steering_table[L3BANK] = icl_l3bank_steering_table;
gt->info.l3bank_mask = gt->info.l3bank_mask =
~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) & ~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
...@@ -829,7 +829,7 @@ void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss, ...@@ -829,7 +829,7 @@ void intel_gt_mcr_get_ss_steering(struct intel_gt *gt, unsigned int dss,
if (IS_PONTEVECCHIO(gt->i915)) { if (IS_PONTEVECCHIO(gt->i915)) {
*group = dss / GEN_DSS_PER_CSLICE; *group = dss / GEN_DSS_PER_CSLICE;
*instance = dss % GEN_DSS_PER_CSLICE; *instance = dss % GEN_DSS_PER_CSLICE;
} else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) { } else if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55)) {
*group = dss / GEN_DSS_PER_GSLICE; *group = dss / GEN_DSS_PER_GSLICE;
*instance = dss % GEN_DSS_PER_GSLICE; *instance = dss % GEN_DSS_PER_GSLICE;
} else { } else {
......
...@@ -54,7 +54,7 @@ int intel_gt_mcr_wait_for_reg(struct intel_gt *gt, ...@@ -54,7 +54,7 @@ int intel_gt_mcr_wait_for_reg(struct intel_gt *gt,
* the topology, so we lookup the DSS ID directly in "slice 0." * the topology, so we lookup the DSS ID directly in "slice 0."
*/ */
#define _HAS_SS(ss_, gt_, group_, instance_) ( \ #define _HAS_SS(ss_, gt_, group_, instance_) ( \
GRAPHICS_VER_FULL(gt_->i915) >= IP_VER(12, 50) ? \ GRAPHICS_VER_FULL(gt_->i915) >= IP_VER(12, 55) ? \
intel_sseu_has_subslice(&(gt_)->info.sseu, 0, ss_) : \ intel_sseu_has_subslice(&(gt_)->info.sseu, 0, ss_) : \
intel_sseu_has_subslice(&(gt_)->info.sseu, group_, instance_)) intel_sseu_has_subslice(&(gt_)->info.sseu, group_, instance_))
......
...@@ -680,7 +680,7 @@ void setup_private_pat(struct intel_gt *gt) ...@@ -680,7 +680,7 @@ void setup_private_pat(struct intel_gt *gt)
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
xelpg_setup_private_ppat(gt); xelpg_setup_private_ppat(gt);
else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
xehp_setup_private_ppat(gt); xehp_setup_private_ppat(gt);
else if (GRAPHICS_VER(i915) >= 12) else if (GRAPHICS_VER(i915) >= 12)
tgl_setup_private_ppat(uncore); tgl_setup_private_ppat(uncore);
......
...@@ -676,7 +676,7 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine) ...@@ -676,7 +676,7 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
static int lrc_ring_mi_mode(const struct intel_engine_cs *engine) static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
{ {
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
return 0x70; return 0x70;
else if (GRAPHICS_VER(engine->i915) >= 12) else if (GRAPHICS_VER(engine->i915) >= 12)
return 0x60; return 0x60;
...@@ -690,7 +690,7 @@ static int lrc_ring_mi_mode(const struct intel_engine_cs *engine) ...@@ -690,7 +690,7 @@ static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
static int lrc_ring_bb_offset(const struct intel_engine_cs *engine) static int lrc_ring_bb_offset(const struct intel_engine_cs *engine)
{ {
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
return 0x80; return 0x80;
else if (GRAPHICS_VER(engine->i915) >= 12) else if (GRAPHICS_VER(engine->i915) >= 12)
return 0x70; return 0x70;
...@@ -705,7 +705,7 @@ static int lrc_ring_bb_offset(const struct intel_engine_cs *engine) ...@@ -705,7 +705,7 @@ static int lrc_ring_bb_offset(const struct intel_engine_cs *engine)
static int lrc_ring_gpr0(const struct intel_engine_cs *engine) static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
{ {
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
return 0x84; return 0x84;
else if (GRAPHICS_VER(engine->i915) >= 12) else if (GRAPHICS_VER(engine->i915) >= 12)
return 0x74; return 0x74;
...@@ -752,7 +752,7 @@ static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine) ...@@ -752,7 +752,7 @@ static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine) static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
{ {
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
/* /*
* Note that the CSFE context has a dummy slot for CMD_BUF_CCTL * Note that the CSFE context has a dummy slot for CMD_BUF_CCTL
* simply to match the RCS context image layout. * simply to match the RCS context image layout.
......
...@@ -925,7 +925,7 @@ static int emit_clear(struct i915_request *rq, u32 offset, int size, ...@@ -925,7 +925,7 @@ static int emit_clear(struct i915_request *rq, u32 offset, int size,
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX); GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
ring_sz = XY_FAST_COLOR_BLT_DW; ring_sz = XY_FAST_COLOR_BLT_DW;
else if (ver >= 8) else if (ver >= 8)
ring_sz = 8; ring_sz = 8;
...@@ -936,7 +936,7 @@ static int emit_clear(struct i915_request *rq, u32 offset, int size, ...@@ -936,7 +936,7 @@ static int emit_clear(struct i915_request *rq, u32 offset, int size,
if (IS_ERR(cs)) if (IS_ERR(cs))
return PTR_ERR(cs); return PTR_ERR(cs);
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
*cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 | *cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
(XY_FAST_COLOR_BLT_DW - 2); (XY_FAST_COLOR_BLT_DW - 2);
*cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) | *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) |
......
...@@ -639,7 +639,7 @@ static void init_l3cc_table(struct intel_gt *gt, ...@@ -639,7 +639,7 @@ static void init_l3cc_table(struct intel_gt *gt,
intel_gt_mcr_lock(gt, &flags); intel_gt_mcr_lock(gt, &flags);
for_each_l3cc(l3cc, table, i) for_each_l3cc(l3cc, table, i)
if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50)) if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55))
intel_gt_mcr_multicast_write_fw(gt, XEHP_LNCFCMOCS(i), l3cc); intel_gt_mcr_multicast_write_fw(gt, XEHP_LNCFCMOCS(i), l3cc);
else else
intel_uncore_write_fw(gt->uncore, GEN9_LNCFCMOCS(i), l3cc); intel_uncore_write_fw(gt->uncore, GEN9_LNCFCMOCS(i), l3cc);
......
...@@ -642,7 +642,7 @@ void intel_sseu_info_init(struct intel_gt *gt) ...@@ -642,7 +642,7 @@ void intel_sseu_info_init(struct intel_gt *gt)
{ {
struct drm_i915_private *i915 = gt->i915; struct drm_i915_private *i915 = gt->i915;
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
xehp_sseu_info_init(gt); xehp_sseu_info_init(gt);
else if (GRAPHICS_VER(i915) >= 12) else if (GRAPHICS_VER(i915) >= 12)
gen12_sseu_info_init(gt); gen12_sseu_info_init(gt);
...@@ -851,7 +851,7 @@ void intel_sseu_print_topology(struct drm_i915_private *i915, ...@@ -851,7 +851,7 @@ void intel_sseu_print_topology(struct drm_i915_private *i915,
{ {
if (sseu->max_slices == 0) if (sseu->max_slices == 0)
drm_printf(p, "Unavailable\n"); drm_printf(p, "Unavailable\n");
else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
sseu_print_xehp_topology(sseu, p); sseu_print_xehp_topology(sseu, p);
else else
sseu_print_hsw_topology(sseu, p); sseu_print_hsw_topology(sseu, p);
......
...@@ -2770,7 +2770,7 @@ add_render_compute_tuning_settings(struct intel_gt *gt, ...@@ -2770,7 +2770,7 @@ add_render_compute_tuning_settings(struct intel_gt *gt,
wa_mcr_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE, wa_mcr_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE,
THREAD_EX_ARB_MODE_RR_AFTER_DEP); THREAD_EX_ARB_MODE_RR_AFTER_DEP);
if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC); wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC);
} }
...@@ -2973,7 +2973,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset) ...@@ -2973,7 +2973,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset)
const struct i915_range *mcr_ranges; const struct i915_range *mcr_ranges;
int i; int i;
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
mcr_ranges = mcr_ranges_xehp; mcr_ranges = mcr_ranges_xehp;
else if (GRAPHICS_VER(i915) >= 12) else if (GRAPHICS_VER(i915) >= 12)
mcr_ranges = mcr_ranges_gen12; mcr_ranges = mcr_ranges_gen12;
......
...@@ -286,7 +286,7 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc) ...@@ -286,7 +286,7 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
/* Wa_22012773006:gen11,gen12 < XeHP */ /* Wa_22012773006:gen11,gen12 < XeHP */
if (GRAPHICS_VER(gt->i915) >= 11 && if (GRAPHICS_VER(gt->i915) >= 11 &&
GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 50)) GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 55))
flags |= GUC_WA_POLLCS; flags |= GUC_WA_POLLCS;
/* Wa_14014475959 */ /* Wa_14014475959 */
......
...@@ -393,7 +393,7 @@ static int guc_mmio_regset_init(struct temp_regset *regset, ...@@ -393,7 +393,7 @@ static int guc_mmio_regset_init(struct temp_regset *regset,
/* add in local MOCS registers */ /* add in local MOCS registers */
for (i = 0; i < LNCFCMOCS_REG_COUNT; i++) for (i = 0; i < LNCFCMOCS_REG_COUNT; i++)
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
ret |= GUC_MCR_REG_ADD(gt, regset, XEHP_LNCFCMOCS(i), false); ret |= GUC_MCR_REG_ADD(gt, regset, XEHP_LNCFCMOCS(i), false);
else else
ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false); ret |= GUC_MMIO_REG_ADD(gt, regset, GEN9_LNCFCMOCS(i), false);
...@@ -503,7 +503,7 @@ static void fill_engine_enable_masks(struct intel_gt *gt, ...@@ -503,7 +503,7 @@ static void fill_engine_enable_masks(struct intel_gt *gt,
#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32)) #define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
#define XEHP_LR_HW_CONTEXT_SIZE (96 * sizeof(u32)) #define XEHP_LR_HW_CONTEXT_SIZE (96 * sizeof(u32))
#define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50) ? \ #define LR_HW_CONTEXT_SZ(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55) ? \
XEHP_LR_HW_CONTEXT_SIZE : \ XEHP_LR_HW_CONTEXT_SIZE : \
LR_HW_CONTEXT_SIZE) LR_HW_CONTEXT_SIZE)
#define LRC_SKIP_SIZE(i915) (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SZ(i915)) #define LRC_SKIP_SIZE(i915) (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SZ(i915))
......
...@@ -26,7 +26,7 @@ static void guc_prepare_xfer(struct intel_gt *gt) ...@@ -26,7 +26,7 @@ static void guc_prepare_xfer(struct intel_gt *gt)
GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA | GUC_ENABLE_READ_CACHE_FOR_WOPCM_DATA |
GUC_ENABLE_MIA_CLOCK_GATING; GUC_ENABLE_MIA_CLOCK_GATING;
if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 50)) if (GRAPHICS_VER_FULL(uncore->i915) < IP_VER(12, 55))
shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES | shim_flags |= GUC_DISABLE_SRAM_INIT_TO_ZEROES |
GUC_ENABLE_MIA_CACHING; GUC_ENABLE_MIA_CACHING;
......
...@@ -4507,7 +4507,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine) ...@@ -4507,7 +4507,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
*/ */
engine->emit_bb_start = gen8_emit_bb_start; engine->emit_bb_start = gen8_emit_bb_start;
if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
engine->emit_bb_start = xehp_emit_bb_start; engine->emit_bb_start = xehp_emit_bb_start;
} }
......
...@@ -160,7 +160,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, ...@@ -160,7 +160,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
break; break;
case I915_PARAM_SLICE_MASK: case I915_PARAM_SLICE_MASK:
/* Not supported from Xe_HP onward; use topology queries */ /* Not supported from Xe_HP onward; use topology queries */
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
return -EINVAL; return -EINVAL;
value = sseu->slice_mask; value = sseu->slice_mask;
...@@ -169,7 +169,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, ...@@ -169,7 +169,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
break; break;
case I915_PARAM_SUBSLICE_MASK: case I915_PARAM_SUBSLICE_MASK:
/* Not supported from Xe_HP onward; use topology queries */ /* Not supported from Xe_HP onward; use topology queries */
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
return -EINVAL; return -EINVAL;
/* Only copy bits from the first slice */ /* Only copy bits from the first slice */
......
...@@ -1245,8 +1245,7 @@ static void engine_record_registers(struct intel_engine_coredump *ee) ...@@ -1245,8 +1245,7 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
if (MEDIA_VER(i915) >= 13 && engine->gt->type == GT_MEDIA) if (MEDIA_VER(i915) >= 13 && engine->gt->type == GT_MEDIA)
ee->fault_reg = intel_uncore_read(engine->uncore, ee->fault_reg = intel_uncore_read(engine->uncore,
XELPMP_RING_FAULT_REG); XELPMP_RING_FAULT_REG);
else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
ee->fault_reg = intel_gt_mcr_read_any(engine->gt, ee->fault_reg = intel_gt_mcr_read_any(engine->gt,
XEHP_RING_FAULT_REG); XEHP_RING_FAULT_REG);
else if (GRAPHICS_VER(i915) >= 12) else if (GRAPHICS_VER(i915) >= 12)
...@@ -1852,7 +1851,7 @@ static void gt_record_global_regs(struct intel_gt_coredump *gt) ...@@ -1852,7 +1851,7 @@ static void gt_record_global_regs(struct intel_gt_coredump *gt)
if (GRAPHICS_VER(i915) == 7) if (GRAPHICS_VER(i915) == 7)
gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT); gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) { if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
gt->fault_data0 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt, gt->fault_data0 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
XEHP_FAULT_TLB_DATA0); XEHP_FAULT_TLB_DATA0);
gt->fault_data1 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt, gt->fault_data1 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
......
...@@ -292,7 +292,7 @@ static u32 i915_perf_stream_paranoid = true; ...@@ -292,7 +292,7 @@ static u32 i915_perf_stream_paranoid = true;
#define OAREPORT_REASON_CTX_SWITCH (1<<3) #define OAREPORT_REASON_CTX_SWITCH (1<<3)
#define OAREPORT_REASON_CLK_RATIO (1<<5) #define OAREPORT_REASON_CLK_RATIO (1<<5)
#define HAS_MI_SET_PREDICATE(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) #define HAS_MI_SET_PREDICATE(i915) (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
/* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate /* For sysctl proc_dointvec_minmax of i915_oa_max_sample_rate
* *
...@@ -817,7 +817,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, ...@@ -817,7 +817,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
*/ */
if (oa_report_ctx_invalid(stream, report) && if (oa_report_ctx_invalid(stream, report) &&
GRAPHICS_VER_FULL(stream->engine->i915) < IP_VER(12, 50)) { GRAPHICS_VER_FULL(stream->engine->i915) < IP_VER(12, 55)) {
ctx_id = INVALID_CTX_ID; ctx_id = INVALID_CTX_ID;
oa_context_id_squash(stream, report32); oa_context_id_squash(stream, report32);
} }
...@@ -1419,7 +1419,7 @@ static int gen12_get_render_context_id(struct i915_perf_stream *stream) ...@@ -1419,7 +1419,7 @@ static int gen12_get_render_context_id(struct i915_perf_stream *stream)
mask = ((1U << GEN12_GUC_SW_CTX_ID_WIDTH) - 1) << mask = ((1U << GEN12_GUC_SW_CTX_ID_WIDTH) - 1) <<
(GEN12_GUC_SW_CTX_ID_SHIFT - 32); (GEN12_GUC_SW_CTX_ID_SHIFT - 32);
} else if (GRAPHICS_VER_FULL(stream->engine->i915) >= IP_VER(12, 50)) { } else if (GRAPHICS_VER_FULL(stream->engine->i915) >= IP_VER(12, 55)) {
ctx_id = (XEHP_MAX_CONTEXT_HW_ID - 1) << ctx_id = (XEHP_MAX_CONTEXT_HW_ID - 1) <<
(XEHP_SW_CTX_ID_SHIFT - 32); (XEHP_SW_CTX_ID_SHIFT - 32);
...@@ -4122,7 +4122,7 @@ static int read_properties_unlocked(struct i915_perf *perf, ...@@ -4122,7 +4122,7 @@ static int read_properties_unlocked(struct i915_perf *perf,
props->hold_preemption = !!value; props->hold_preemption = !!value;
break; break;
case DRM_I915_PERF_PROP_GLOBAL_SSEU: { case DRM_I915_PERF_PROP_GLOBAL_SSEU: {
if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 50)) { if (GRAPHICS_VER_FULL(perf->i915) >= IP_VER(12, 55)) {
drm_dbg(&perf->i915->drm, drm_dbg(&perf->i915->drm,
"SSEU config not supported on gfx %x\n", "SSEU config not supported on gfx %x\n",
GRAPHICS_VER_FULL(perf->i915)); GRAPHICS_VER_FULL(perf->i915));
......
...@@ -105,7 +105,7 @@ static int query_geometry_subslices(struct drm_i915_private *i915, ...@@ -105,7 +105,7 @@ static int query_geometry_subslices(struct drm_i915_private *i915,
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
struct i915_engine_class_instance classinstance; struct i915_engine_class_instance classinstance;
if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
return -ENODEV; return -ENODEV;
classinstance = *((struct i915_engine_class_instance *)&query_item->flags); classinstance = *((struct i915_engine_class_instance *)&query_item->flags);
......
...@@ -2714,7 +2714,7 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore, ...@@ -2714,7 +2714,7 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
* the forcewake domain if any of the other engines * the forcewake domain if any of the other engines
* in the same media slice are present. * in the same media slice are present.
*/ */
if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 50) && i % 2 == 0) { if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 55) && i % 2 == 0) {
if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1))) if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment