Commit 88ed07cb authored by Daniele Ceraolo Spurio's avatar Daniele Ceraolo Spurio Committed by Matt Roper

drm/i915/xehp: handle fused off CCS engines

HW resources are divided across the active CCS engines at the compute
slice level, with each CCS having priority on one of the cslices.
If a compute slice has no enabled DSS, its paired compute engine is not
usable in full parallel execution because the other ones already fully
saturate the HW, so consider it fused off.

v2 (José):
 - moved it to its own function
 - fixed definition of ccs_mask

v3 (Matt):
 - Replace fls() condition with a simple IP version test

v4 (Matt):
 - Don't try to calculate a ccs_mask using
   intel_slicemask_from_dssmask() until we've determined that we're
   running on an Xe_HP platform where the logic makes sense (and won't
   overflow).

Cc: Stuart Summers <stuart.summers@intel.com>
Cc: Vinay Belgaumkar <vinay.belgaumkar@intel.com>
Cc: Ashutosh Dixit <ashutosh.dixit@intel.com>
Signed-off-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: default avatarStuart Summers <stuart.summers@intel.com>
Signed-off-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Reviewed-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220302052008.1884985-1-matthew.d.roper@intel.com
parent e393e2aa
......@@ -592,6 +592,29 @@ bool gen11_vdbox_has_sfc(struct intel_gt *gt,
return false;
}
static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_gt_info *info = &gt->info;
int ss_per_ccs = info->sseu.max_subslices / I915_MAX_CCS;
unsigned long ccs_mask;
unsigned int i;
if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
return;
ccs_mask = intel_slicemask_from_dssmask(intel_sseu_get_compute_subslices(&info->sseu),
ss_per_ccs);
/*
* If all DSS in a quadrant are fused off, the corresponding CCS
* engine is not available for use.
*/
for_each_clear_bit(i, &ccs_mask, I915_MAX_CCS) {
info->engine_mask &= ~BIT(_CCS(i));
drm_dbg(&i915->drm, "ccs%u fused off\n", i);
}
}
/*
* Determine which engines are fused off in our particular hardware.
* Note that we have a catch-22 situation where we need to be able to access
......@@ -673,6 +696,8 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
vebox_mask, VEBOX_MASK(gt));
GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
engine_mask_apply_compute_fuses(gt);
return info->engine_mask;
}
......
......@@ -32,7 +32,9 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu)
return total;
}
u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice)
static u32
_intel_sseu_get_subslices(const struct sseu_dev_info *sseu,
const u8 *subslice_mask, u8 slice)
{
int i, offset = slice * sseu->ss_stride;
u32 mask = 0;
......@@ -40,12 +42,21 @@ u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice)
GEM_BUG_ON(slice >= sseu->max_slices);
for (i = 0; i < sseu->ss_stride; i++)
mask |= (u32)sseu->subslice_mask[offset + i] <<
i * BITS_PER_BYTE;
mask |= (u32)subslice_mask[offset + i] << i * BITS_PER_BYTE;
return mask;
}
u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice)
{
return _intel_sseu_get_subslices(sseu, sseu->subslice_mask, slice);
}
u32 intel_sseu_get_compute_subslices(const struct sseu_dev_info *sseu)
{
return _intel_sseu_get_subslices(sseu, sseu->compute_subslice_mask, 0);
}
void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
u8 *subslice_mask, u32 ss_mask)
{
......
......@@ -103,7 +103,9 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu);
unsigned int
intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice);
u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice);
u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice);
u32 intel_sseu_get_compute_subslices(const struct sseu_dev_info *sseu);
void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
u8 *subslice_mask, u32 ss_mask);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment