Commit 96c5a15f authored by Matt Roper's avatar Matt Roper Committed by Rodrigo Vivi

drm/i915/kbl: Fix revision ID checks

We usually assume that increasing PCI device revision ID's translates to
newer steppings; macros like IS_KBL_REVID() that we use rely on this
behavior.  Unfortunately this turns out to not be true on KBL; the
newer device 2 revision ID's sometimes go backward to older steppings.
The situation is further complicated by different GT and display
steppings associated with each revision ID.

Let's work around this by providing a table to map the revision ID to
specific GT and display steppings, and then perform our comparisons on
the mapped values.

v2:
 - Move the kbl_revids[] array to intel_workarounds.c to avoid compiler
   warnings about an unused variable in files that don't call the
   macros (kernel test robot).

Bspec: 18329
Signed-off-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200811032105.2819370-1-matthew.d.roper@intel.comReviewed-by: default avatarSwathi Dhanavanthri <swathi.dhanavanthri@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 04dfb1ac
......@@ -4555,7 +4555,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
vf_flush_wa = true;
/* WaForGAMHang:kbl */
if (IS_KBL_REVID(request->engine->i915, 0, KBL_REVID_B0))
if (IS_KBL_GT_REVID(request->engine->i915, 0, KBL_REVID_B0))
dc_flush_wa = true;
}
......
......@@ -52,6 +52,24 @@
* - Public functions to init or apply the given workaround type.
*/
/*
* KBL revision ID ordering is bizarre; higher revision ID's map to lower
* steppings in some cases. So rather than test against the revision ID
* directly, let's map that into our own range of increasing ID's that we
* can test against in a regular manner.
*/
const struct i915_rev_steppings kbl_revids[] = {
[0] = { .gt_stepping = KBL_REVID_A0, .disp_stepping = KBL_REVID_A0 },
[1] = { .gt_stepping = KBL_REVID_B0, .disp_stepping = KBL_REVID_B0 },
[2] = { .gt_stepping = KBL_REVID_C0, .disp_stepping = KBL_REVID_B0 },
[3] = { .gt_stepping = KBL_REVID_D0, .disp_stepping = KBL_REVID_B0 },
[4] = { .gt_stepping = KBL_REVID_F0, .disp_stepping = KBL_REVID_C0 },
[5] = { .gt_stepping = KBL_REVID_C0, .disp_stepping = KBL_REVID_B1 },
[6] = { .gt_stepping = KBL_REVID_D1, .disp_stepping = KBL_REVID_B1 },
[7] = { .gt_stepping = KBL_REVID_G0, .disp_stepping = KBL_REVID_C0 },
};
static void wa_init_start(struct i915_wa_list *wal, const char *name, const char *engine_name)
{
wal->name = name;
......@@ -470,7 +488,7 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
gen9_ctx_workarounds_init(engine, wal);
/* WaToEnableHwFixForPushConstHWBug:kbl */
if (IS_KBL_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
if (IS_KBL_GT_REVID(i915, KBL_REVID_C0, REVID_FOREVER))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
......@@ -1008,7 +1026,7 @@ kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
gen9_gt_workarounds_init(i915, wal);
/* WaDisableDynamicCreditSharing:kbl */
if (IS_KBL_REVID(i915, 0, KBL_REVID_B0))
if (IS_KBL_GT_REVID(i915, 0, KBL_REVID_B0))
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
......@@ -1923,7 +1941,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
struct drm_i915_private *i915 = engine->i915;
/* WaKBLVECSSemaphoreWaitPoll:kbl */
if (IS_KBL_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
if (IS_KBL_GT_REVID(i915, KBL_REVID_A0, KBL_REVID_E0)) {
wa_write(wal,
RING_SEMA_WAIT_POLL(engine->mmio_base),
1);
......
......@@ -392,7 +392,7 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
pre |= IS_HSW_EARLY_SDV(dev_priv);
pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
pre |= IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_A0);
pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2);
if (pre) {
......
......@@ -1523,14 +1523,34 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_BXT_REVID(dev_priv, since, until) \
(IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
#define KBL_REVID_A0 0x0
#define KBL_REVID_B0 0x1
#define KBL_REVID_C0 0x2
#define KBL_REVID_D0 0x3
#define KBL_REVID_E0 0x4
#define IS_KBL_REVID(dev_priv, since, until) \
(IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
enum {
KBL_REVID_A0,
KBL_REVID_B0,
KBL_REVID_B1,
KBL_REVID_C0,
KBL_REVID_D0,
KBL_REVID_D1,
KBL_REVID_E0,
KBL_REVID_F0,
KBL_REVID_G0,
};
struct i915_rev_steppings {
u8 gt_stepping;
u8 disp_stepping;
};
/* Defined in intel_workarounds.c */
extern const struct i915_rev_steppings kbl_revids[];
#define IS_KBL_GT_REVID(dev_priv, since, until) \
(IS_KABYLAKE(dev_priv) && \
kbl_revids[INTEL_REVID(dev_priv)].gt_stepping >= since && \
kbl_revids[INTEL_REVID(dev_priv)].gt_stepping <= until)
#define IS_KBL_DISP_REVID(dev_priv, since, until) \
(IS_KABYLAKE(dev_priv) && \
kbl_revids[INTEL_REVID(dev_priv)].disp_stepping >= since && \
kbl_revids[INTEL_REVID(dev_priv)].disp_stepping <= until)
#define GLK_REVID_A0 0x0
#define GLK_REVID_A1 0x1
......
......@@ -7217,12 +7217,12 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
gen9_init_clock_gating(dev_priv);
/* WaDisableSDEUnitClockGating:kbl */
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/* WaDisableGamClockGating:kbl */
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0))
I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment