Commit 78306438 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2022-02-10' of...

Merge tag 'drm-intel-fixes-2022-02-10' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

- Build fix for non-x86 platforms after remap_io_mmapping changes. (Lucas De Marchi)
- Correctly propagate errors during object migration blits. (Thomas Hellström)
- Disable DRRS support on HSW/IVB where it is not implemented yet. (Ville Syrjälä)
- Correct pipe dbuf BIOS configuration during readout. (Ville Syrjälä)
- Properly sanitise BIOS buf configuration on ADL-P+ for !join_mbus cases. (Ville Syrjälä)
- Fix oops due to missing stack depot. (Ville Syrjälä)
- Workaround broken BIOS DBUF configuration on TGL/RKL. (Ville Syrjälä)
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/YgTuYAtpaV3XAGmx@tursulin-mobl2
parents df2bb4dc 4e6f5512
...@@ -10673,6 +10673,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, ...@@ -10673,6 +10673,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
vlv_wm_sanitize(dev_priv); vlv_wm_sanitize(dev_priv);
} else if (DISPLAY_VER(dev_priv) >= 9) { } else if (DISPLAY_VER(dev_priv) >= 9) {
skl_wm_get_hw_state(dev_priv); skl_wm_get_hw_state(dev_priv);
skl_wm_sanitize(dev_priv);
} else if (HAS_PCH_SPLIT(dev_priv)) { } else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_wm_get_hw_state(dev_priv); ilk_wm_get_hw_state(dev_priv);
} }
......
...@@ -405,6 +405,7 @@ intel_drrs_init(struct intel_connector *connector, ...@@ -405,6 +405,7 @@ intel_drrs_init(struct intel_connector *connector,
struct drm_display_mode *fixed_mode) struct drm_display_mode *fixed_mode)
{ {
struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_encoder *encoder = connector->encoder;
struct drm_display_mode *downclock_mode = NULL; struct drm_display_mode *downclock_mode = NULL;
INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_drrs_downclock_work); INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_drrs_downclock_work);
...@@ -416,6 +417,13 @@ intel_drrs_init(struct intel_connector *connector, ...@@ -416,6 +417,13 @@ intel_drrs_init(struct intel_connector *connector,
return NULL; return NULL;
} }
if ((DISPLAY_VER(dev_priv) < 8 && !HAS_GMCH(dev_priv)) &&
encoder->port != PORT_A) {
drm_dbg_kms(&dev_priv->drm,
"DRRS only supported on eDP port A\n");
return NULL;
}
if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
return NULL; return NULL;
......
...@@ -427,11 +427,17 @@ __i915_ttm_move(struct ttm_buffer_object *bo, ...@@ -427,11 +427,17 @@ __i915_ttm_move(struct ttm_buffer_object *bo,
if (!IS_ERR(fence)) if (!IS_ERR(fence))
goto out; goto out;
} else if (move_deps) { } else {
int err = i915_deps_sync(move_deps, ctx); int err = PTR_ERR(fence);
if (err == -EINTR || err == -ERESTARTSYS || err == -EAGAIN)
return fence;
if (err) if (move_deps) {
return ERR_PTR(err); err = i915_deps_sync(move_deps, ctx);
if (err)
return ERR_PTR(err);
}
} }
/* Error intercept failed or no accelerated migration to start with */ /* Error intercept failed or no accelerated migration to start with */
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#ifndef __I915_MM_H__ #ifndef __I915_MM_H__
#define __I915_MM_H__ #define __I915_MM_H__
#include <linux/bug.h>
#include <linux/types.h> #include <linux/types.h>
struct vm_area_struct; struct vm_area_struct;
......
...@@ -4717,6 +4717,10 @@ static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = { ...@@ -4717,6 +4717,10 @@ static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
}; };
static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = { static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
/*
* Keep the join_mbus cases first so check_mbus_joined()
* will prefer them over the !join_mbus cases.
*/
{ {
.active_pipes = BIT(PIPE_A), .active_pipes = BIT(PIPE_A),
.dbuf_mask = { .dbuf_mask = {
...@@ -4731,6 +4735,20 @@ static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = { ...@@ -4731,6 +4735,20 @@ static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
}, },
.join_mbus = true, .join_mbus = true,
}, },
{
.active_pipes = BIT(PIPE_A),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
},
.join_mbus = false,
},
{
.active_pipes = BIT(PIPE_B),
.dbuf_mask = {
[PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
},
.join_mbus = false,
},
{ {
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B), .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
.dbuf_mask = { .dbuf_mask = {
...@@ -4847,13 +4865,14 @@ static bool adlp_check_mbus_joined(u8 active_pipes) ...@@ -4847,13 +4865,14 @@ static bool adlp_check_mbus_joined(u8 active_pipes)
return check_mbus_joined(active_pipes, adlp_allowed_dbufs); return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
} }
static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
const struct dbuf_slice_conf_entry *dbuf_slices) const struct dbuf_slice_conf_entry *dbuf_slices)
{ {
int i; int i;
for (i = 0; i < dbuf_slices[i].active_pipes; i++) { for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
if (dbuf_slices[i].active_pipes == active_pipes) if (dbuf_slices[i].active_pipes == active_pipes &&
dbuf_slices[i].join_mbus == join_mbus)
return dbuf_slices[i].dbuf_mask[pipe]; return dbuf_slices[i].dbuf_mask[pipe];
} }
return 0; return 0;
...@@ -4864,7 +4883,7 @@ static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, ...@@ -4864,7 +4883,7 @@ static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
* returns correspondent DBuf slice mask as stated in BSpec for particular * returns correspondent DBuf slice mask as stated in BSpec for particular
* platform. * platform.
*/ */
static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes) static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{ {
/* /*
* FIXME: For ICL this is still a bit unclear as prev BSpec revision * FIXME: For ICL this is still a bit unclear as prev BSpec revision
...@@ -4878,37 +4897,41 @@ static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes) ...@@ -4878,37 +4897,41 @@ static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
* still here - we will need it once those additional constraints * still here - we will need it once those additional constraints
* pop up. * pop up.
*/ */
return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs); return compute_dbuf_slices(pipe, active_pipes, join_mbus,
icl_allowed_dbufs);
} }
static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes) static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{ {
return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs); return compute_dbuf_slices(pipe, active_pipes, join_mbus,
tgl_allowed_dbufs);
} }
static u32 adlp_compute_dbuf_slices(enum pipe pipe, u32 active_pipes) static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{ {
return compute_dbuf_slices(pipe, active_pipes, adlp_allowed_dbufs); return compute_dbuf_slices(pipe, active_pipes, join_mbus,
adlp_allowed_dbufs);
} }
static u32 dg2_compute_dbuf_slices(enum pipe pipe, u32 active_pipes) static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{ {
return compute_dbuf_slices(pipe, active_pipes, dg2_allowed_dbufs); return compute_dbuf_slices(pipe, active_pipes, join_mbus,
dg2_allowed_dbufs);
} }
static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes) static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
{ {
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe; enum pipe pipe = crtc->pipe;
if (IS_DG2(dev_priv)) if (IS_DG2(dev_priv))
return dg2_compute_dbuf_slices(pipe, active_pipes); return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
else if (IS_ALDERLAKE_P(dev_priv)) else if (IS_ALDERLAKE_P(dev_priv))
return adlp_compute_dbuf_slices(pipe, active_pipes); return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
else if (DISPLAY_VER(dev_priv) == 12) else if (DISPLAY_VER(dev_priv) == 12)
return tgl_compute_dbuf_slices(pipe, active_pipes); return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
else if (DISPLAY_VER(dev_priv) == 11) else if (DISPLAY_VER(dev_priv) == 11)
return icl_compute_dbuf_slices(pipe, active_pipes); return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
/* /*
* For anything else just return one slice yet. * For anything else just return one slice yet.
* Should be extended for other platforms. * Should be extended for other platforms.
...@@ -6127,11 +6150,16 @@ skl_compute_ddb(struct intel_atomic_state *state) ...@@ -6127,11 +6150,16 @@ skl_compute_ddb(struct intel_atomic_state *state)
return ret; return ret;
} }
if (IS_ALDERLAKE_P(dev_priv))
new_dbuf_state->joined_mbus =
adlp_check_mbus_joined(new_dbuf_state->active_pipes);
for_each_intel_crtc(&dev_priv->drm, crtc) { for_each_intel_crtc(&dev_priv->drm, crtc) {
enum pipe pipe = crtc->pipe; enum pipe pipe = crtc->pipe;
new_dbuf_state->slices[pipe] = new_dbuf_state->slices[pipe] =
skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes); skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
new_dbuf_state->joined_mbus);
if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe]) if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
continue; continue;
...@@ -6143,9 +6171,6 @@ skl_compute_ddb(struct intel_atomic_state *state) ...@@ -6143,9 +6171,6 @@ skl_compute_ddb(struct intel_atomic_state *state)
new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state); new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
if (IS_ALDERLAKE_P(dev_priv))
new_dbuf_state->joined_mbus = adlp_check_mbus_joined(new_dbuf_state->active_pipes);
if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices || if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) { old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
ret = intel_atomic_serialize_global_state(&new_dbuf_state->base); ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
...@@ -6626,6 +6651,7 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) ...@@ -6626,6 +6651,7 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
enum pipe pipe = crtc->pipe; enum pipe pipe = crtc->pipe;
unsigned int mbus_offset; unsigned int mbus_offset;
enum plane_id plane_id; enum plane_id plane_id;
u8 slices;
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal); skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal; crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
...@@ -6645,19 +6671,22 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) ...@@ -6645,19 +6671,22 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_uv); skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_uv);
} }
dbuf_state->slices[pipe] =
skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes);
dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state); dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
/* /*
* Used for checking overlaps, so we need absolute * Used for checking overlaps, so we need absolute
* offsets instead of MBUS relative offsets. * offsets instead of MBUS relative offsets.
*/ */
mbus_offset = mbus_ddb_offset(dev_priv, dbuf_state->slices[pipe]); slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
dbuf_state->joined_mbus);
mbus_offset = mbus_ddb_offset(dev_priv, slices);
crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start; crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end; crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
/* The slices actually used by the planes on the pipe */
dbuf_state->slices[pipe] =
skl_ddb_dbuf_slice_mask(dev_priv, &crtc_state->wm.skl.ddb);
drm_dbg_kms(&dev_priv->drm, drm_dbg_kms(&dev_priv->drm,
"[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n", "[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
crtc->base.base.id, crtc->base.name, crtc->base.base.id, crtc->base.name,
...@@ -6669,6 +6698,74 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) ...@@ -6669,6 +6698,74 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices; dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
} }
static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
{
const struct intel_dbuf_state *dbuf_state =
to_intel_dbuf_state(i915->dbuf.obj.state);
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
struct intel_crtc *crtc;
for_each_intel_crtc(&i915->drm, crtc) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
entries[crtc->pipe] = crtc_state->wm.skl.ddb;
}
for_each_intel_crtc(&i915->drm, crtc) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
u8 slices;
slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
dbuf_state->joined_mbus);
if (dbuf_state->slices[crtc->pipe] & ~slices)
return true;
if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
I915_MAX_PIPES, crtc->pipe))
return true;
}
return false;
}
void skl_wm_sanitize(struct drm_i915_private *i915)
{
struct intel_crtc *crtc;
/*
* On TGL/RKL (at least) the BIOS likes to assign the planes
* to the wrong DBUF slices. This will cause an infinite loop
* in skl_commit_modeset_enables() as it can't find a way to
* transition between the old bogus DBUF layout to the new
* proper DBUF layout without DBUF allocation overlaps between
* the planes (which cannot be allowed or else the hardware
* may hang). If we detect a bogus DBUF layout just turn off
* all the planes so that skl_commit_modeset_enables() can
* simply ignore them.
*/
if (!skl_dbuf_is_misconfigured(i915))
return;
drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
for_each_intel_crtc(&i915->drm, crtc) {
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
if (plane_state->uapi.visible)
intel_plane_disable_noatomic(crtc, plane);
drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
}
}
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
{ {
struct drm_device *dev = crtc->base.dev; struct drm_device *dev = crtc->base.dev;
......
...@@ -47,6 +47,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, ...@@ -47,6 +47,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out); struct skl_pipe_wm *out);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv); void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
void vlv_wm_sanitize(struct drm_i915_private *dev_priv); void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
void skl_wm_sanitize(struct drm_i915_private *dev_priv);
bool intel_can_enable_sagv(struct drm_i915_private *dev_priv, bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
const struct intel_bw_state *bw_state); const struct intel_bw_state *bw_state);
void intel_sagv_pre_plane_update(struct intel_atomic_state *state); void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
......
...@@ -68,9 +68,7 @@ static noinline depot_stack_handle_t __save_depot_stack(void) ...@@ -68,9 +68,7 @@ static noinline depot_stack_handle_t __save_depot_stack(void)
static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{ {
spin_lock_init(&rpm->debug.lock); spin_lock_init(&rpm->debug.lock);
stack_depot_init();
if (rpm->available)
stack_depot_init();
} }
static noinline depot_stack_handle_t static noinline depot_stack_handle_t
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment