Commit ff43bc37 authored by Ville Syrjälä's avatar Ville Syrjälä

drm/i915: Move ddb/wm programming into plane update/disable hooks on skl+

On SKL+ the plane WM/BUF_CFG registers are a proper part of each
plane's register set. That means accessing them will cancel any
pending plane update, and we would need a PLANE_SURF register write
to arm the wm/ddb change as well.

To avoid all the problems with that let's just move the wm/ddb
programming into the plane update/disable hooks. Now all plane
registers get written in one (hopefully atomic) operation.

To make that feasible we'll move the plane ddb tracking into
the crtc state. Watermarks were already tracked there.

v2: Rebase due to input CSC
v3: Split out a bunch of junk (Matt)
v4: Add skl_wm_add_affected_planes() to deal with
    cursor special case and non-zero wm register reset value
v5: Drop the unrelated for_each_intel_plane_mask() fix (Matt)
    Remove the redundant ddb memset() (Matt)

Cc: Matt Roper <matthew.d.roper@intel.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> #v3
Signed-off-by: default avatarVille Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181127165900.31298-1-ville.syrjala@linux.intel.com
parent 51de9c6d
......@@ -3441,31 +3441,32 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
struct skl_ddb_allocation *ddb;
struct skl_ddb_entry *entry;
enum pipe pipe;
int plane;
struct intel_crtc *crtc;
if (INTEL_GEN(dev_priv) < 9)
return -ENODEV;
drm_modeset_lock_all(dev);
ddb = &dev_priv->wm.skl_hw.ddb;
seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
for_each_pipe(dev_priv, pipe) {
for_each_intel_crtc(&dev_priv->drm, crtc) {
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
enum pipe pipe = crtc->pipe;
enum plane_id plane_id;
seq_printf(m, "Pipe %c\n", pipe_name(pipe));
for_each_universal_plane(dev_priv, pipe, plane) {
entry = &ddb->plane[pipe][plane];
seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
for_each_plane_id_on_crtc(crtc, plane_id) {
entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
entry->start, entry->end,
skl_ddb_entry_size(entry));
}
entry = &ddb->plane[pipe][PLANE_CURSOR];
entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
entry->end, skl_ddb_entry_size(entry));
}
......
......@@ -1095,9 +1095,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
}
struct skl_ddb_allocation {
/* packed/y */
struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
struct skl_ddb_entry uv_plane[I915_MAX_PIPES][I915_MAX_PLANES];
u8 enabled_slices; /* GEN11 has configurable 2 slices */
};
......
......@@ -10114,6 +10114,10 @@ static void i9xx_update_cursor(struct intel_plane *plane,
* except when the plane is getting enabled at which time
* the CURCNTR write arms the update.
*/
if (INTEL_GEN(dev_priv) >= 9)
skl_write_cursor_wm(plane, crtc_state);
if (plane->cursor.base != base ||
plane->cursor.size != fbc_ctl ||
plane->cursor.cntl != cntl) {
......@@ -11903,6 +11907,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
struct skl_pipe_wm hw_wm, *sw_wm;
struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES];
struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const enum pipe pipe = intel_crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
......@@ -11913,6 +11919,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
skl_pipe_wm_get_hw_state(crtc, &hw_wm);
sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
skl_ddb_get_hw_state(dev_priv, &hw_ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
......@@ -11955,8 +11963,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
hw_ddb_entry = &hw_ddb.plane[pipe][plane];
sw_ddb_entry = &sw_ddb->plane[pipe][plane];
hw_ddb_entry = &hw_ddb_y[plane];
sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
......@@ -12005,8 +12013,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR];
sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
......
......@@ -431,6 +431,15 @@ struct intel_link_m_n {
(__i)++) \
for_each_if(plane)
#define for_each_oldnew_intel_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->base.dev->mode_config.num_crtc && \
((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
(old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \
(new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
(__i)++) \
for_each_if(crtc)
void intel_link_compute_m_n(int bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
......
......@@ -706,6 +706,8 @@ struct intel_crtc_wm_state {
/* gen9+ only needs 1-step wm programming */
struct skl_pipe_wm optimal;
struct skl_ddb_entry ddb;
struct skl_ddb_entry plane_ddb_y[I915_MAX_PLANES];
struct skl_ddb_entry plane_ddb_uv[I915_MAX_PLANES];
} skl;
struct {
......@@ -2185,6 +2187,9 @@ void g4x_wm_get_hw_state(struct drm_device *dev);
void vlv_wm_get_hw_state(struct drm_device *dev);
void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct skl_ddb_entry *ddb_y,
struct skl_ddb_entry *ddb_uv);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
......@@ -2199,6 +2204,10 @@ bool skl_wm_level_equals(const struct skl_wm_level *l1,
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
const struct skl_ddb_entry entries[],
int num_entries, int ignore_idx);
void skl_write_plane_wm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
void skl_write_cursor_wm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state);
bool ilk_disable_lp_wm(struct drm_device *dev);
int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
struct intel_crtc_state *cstate);
......
......@@ -3951,68 +3951,68 @@ static void
skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
const enum pipe pipe,
const enum plane_id plane_id,
struct skl_ddb_allocation *ddb /* out */)
struct skl_ddb_entry *ddb_y,
struct skl_ddb_entry *ddb_uv)
{
u32 val, val2 = 0;
int fourcc, pixel_format;
u32 val, val2;
u32 fourcc = 0;
/* Cursor doesn't support NV12/planar, so no extra calculation needed */
if (plane_id == PLANE_CURSOR) {
val = I915_READ(CUR_BUF_CFG(pipe));
skl_ddb_entry_init_from_hw(dev_priv,
&ddb->plane[pipe][plane_id], val);
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
return;
}
val = I915_READ(PLANE_CTL(pipe, plane_id));
/* No DDB allocated for disabled planes */
if (!(val & PLANE_CTL_ENABLE))
return;
pixel_format = val & PLANE_CTL_FORMAT_MASK;
fourcc = skl_format_to_fourcc(pixel_format,
val & PLANE_CTL_ORDER_RGBX,
val & PLANE_CTL_ALPHA_MASK);
if (val & PLANE_CTL_ENABLE)
fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK,
val & PLANE_CTL_ORDER_RGBX,
val & PLANE_CTL_ALPHA_MASK);
val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
if (fourcc == DRM_FORMAT_NV12 && INTEL_GEN(dev_priv) < 11) {
if (INTEL_GEN(dev_priv) >= 11) {
val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
} else {
val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
skl_ddb_entry_init_from_hw(dev_priv,
&ddb->plane[pipe][plane_id], val2);
skl_ddb_entry_init_from_hw(dev_priv,
&ddb->uv_plane[pipe][plane_id], val);
} else {
skl_ddb_entry_init_from_hw(dev_priv,
&ddb->plane[pipe][plane_id], val);
if (fourcc == DRM_FORMAT_NV12)
swap(val, val2);
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
skl_ddb_entry_init_from_hw(dev_priv, ddb_uv, val2);
}
}
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */)
void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct skl_ddb_entry *ddb_y,
struct skl_ddb_entry *ddb_uv)
{
struct intel_crtc *crtc;
memset(ddb, 0, sizeof(*ddb));
ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
enum pipe pipe = crtc->pipe;
enum plane_id plane_id;
for_each_intel_crtc(&dev_priv->drm, crtc) {
enum intel_display_power_domain power_domain;
enum plane_id plane_id;
enum pipe pipe = crtc->pipe;
power_domain = POWER_DOMAIN_PIPE(pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return;
power_domain = POWER_DOMAIN_PIPE(pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
continue;
for_each_plane_id_on_crtc(crtc, plane_id)
skl_ddb_get_hw_plane_state(dev_priv, pipe,
plane_id,
&ddb_y[plane_id],
&ddb_uv[plane_id]);
for_each_plane_id_on_crtc(crtc, plane_id)
skl_ddb_get_hw_plane_state(dev_priv, pipe,
plane_id, ddb);
intel_display_power_put(dev_priv, power_domain);
}
intel_display_power_put(dev_priv, power_domain);
}
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */)
{
ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
}
/*
......@@ -4410,7 +4410,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct drm_crtc *crtc = cstate->base.crtc;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
uint16_t alloc_size, start;
uint16_t minimum[I915_MAX_PLANES] = {};
......@@ -4423,8 +4422,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
uint16_t total_min_blocks = 0;
/* Clear the partitioning for disabled planes. */
memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
memset(ddb->uv_plane[pipe], 0, sizeof(ddb->uv_plane[pipe]));
memset(cstate->wm.skl.plane_ddb_y, 0, sizeof(cstate->wm.skl.plane_ddb_y));
memset(cstate->wm.skl.plane_ddb_uv, 0, sizeof(cstate->wm.skl.plane_ddb_uv));
if (WARN_ON(!state))
return 0;
......@@ -4471,8 +4470,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
}
alloc_size -= total_min_blocks;
ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start = alloc->end - minimum[PLANE_CURSOR];
cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].end = alloc->end;
/*
* 2. Distribute the remaining space in proportion to the amount of
......@@ -4503,8 +4502,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
/* Leave disabled planes at (0,0) */
if (data_rate) {
ddb->plane[pipe][plane_id].start = start;
ddb->plane[pipe][plane_id].end = start + plane_blocks;
cstate->wm.skl.plane_ddb_y[plane_id].start = start;
cstate->wm.skl.plane_ddb_y[plane_id].end = start + plane_blocks;
}
start += plane_blocks;
......@@ -4519,8 +4518,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_plane_blocks);
if (uv_data_rate) {
ddb->uv_plane[pipe][plane_id].start = start;
ddb->uv_plane[pipe][plane_id].end =
cstate->wm.skl.plane_ddb_uv[plane_id].start = start;
cstate->wm.skl.plane_ddb_uv[plane_id].end =
start + uv_plane_blocks;
}
......@@ -4978,16 +4977,13 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *cstate,
}
}
static int skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
struct intel_crtc_state *crtc_state,
static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
enum plane_id plane_id, int color_plane)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_y[plane_id]);
struct skl_wm_params wm_params;
enum pipe pipe = plane->pipe;
uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][plane_id]);
int ret;
ret = skl_compute_plane_wm_params(crtc_state, plane_state,
......@@ -5005,16 +5001,13 @@ static int skl_build_plane_wm_single(struct skl_ddb_allocation *ddb,
return 0;
}
static int skl_build_plane_wm_uv(struct skl_ddb_allocation *ddb,
struct intel_crtc_state *crtc_state,
static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
enum plane_id plane_id)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id];
u16 ddb_blocks = skl_ddb_entry_size(&crtc_state->wm.skl.plane_ddb_uv[plane_id]);
struct skl_wm_params wm_params;
enum pipe pipe = plane->pipe;
uint16_t ddb_blocks = skl_ddb_entry_size(&ddb->uv_plane[pipe][plane_id]);
int ret;
wm->is_planar = true;
......@@ -5033,8 +5026,7 @@ static int skl_build_plane_wm_uv(struct skl_ddb_allocation *ddb,
return 0;
}
static int skl_build_plane_wm(struct skl_ddb_allocation *ddb,
struct skl_pipe_wm *pipe_wm,
static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
......@@ -5046,13 +5038,13 @@ static int skl_build_plane_wm(struct skl_ddb_allocation *ddb,
if (!intel_wm_plane_visible(crtc_state, plane_state))
return 0;
ret = skl_build_plane_wm_single(ddb, crtc_state, plane_state,
ret = skl_build_plane_wm_single(crtc_state, plane_state,
plane_id, 0);
if (ret)
return ret;
if (fb->format->is_yuv && fb->format->num_planes > 1) {
ret = skl_build_plane_wm_uv(ddb, crtc_state, plane_state,
ret = skl_build_plane_wm_uv(crtc_state, plane_state,
plane_id);
if (ret)
return ret;
......@@ -5061,8 +5053,7 @@ static int skl_build_plane_wm(struct skl_ddb_allocation *ddb,
return 0;
}
static int icl_build_plane_wm(struct skl_ddb_allocation *ddb,
struct skl_pipe_wm *pipe_wm,
static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
......@@ -5081,17 +5072,17 @@ static int icl_build_plane_wm(struct skl_ddb_allocation *ddb,
WARN_ON(!fb->format->is_yuv ||
fb->format->num_planes == 1);
ret = skl_build_plane_wm_single(ddb, crtc_state, plane_state,
ret = skl_build_plane_wm_single(crtc_state, plane_state,
y_plane_id, 0);
if (ret)
return ret;
ret = skl_build_plane_wm_single(ddb, crtc_state, plane_state,
ret = skl_build_plane_wm_single(crtc_state, plane_state,
plane_id, 1);
if (ret)
return ret;
} else if (intel_wm_plane_visible(crtc_state, plane_state)) {
ret = skl_build_plane_wm_single(ddb, crtc_state, plane_state,
ret = skl_build_plane_wm_single(crtc_state, plane_state,
plane_id, 0);
if (ret)
return ret;
......@@ -5101,7 +5092,6 @@ static int icl_build_plane_wm(struct skl_ddb_allocation *ddb,
}
static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
struct skl_ddb_allocation *ddb,
struct skl_pipe_wm *pipe_wm)
{
struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
......@@ -5121,10 +5111,10 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
to_intel_plane_state(pstate);
if (INTEL_GEN(dev_priv) >= 11)
ret = icl_build_plane_wm(ddb, pipe_wm,
ret = icl_build_plane_wm(pipe_wm,
cstate, intel_pstate);
else
ret = skl_build_plane_wm(ddb, pipe_wm,
ret = skl_build_plane_wm(pipe_wm,
cstate, intel_pstate);
if (ret)
return ret;
......@@ -5140,9 +5130,9 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry)
{
if (entry->end)
I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start);
else
I915_WRITE(reg, 0);
I915_WRITE_FW(reg, 0);
}
static void skl_write_wm_level(struct drm_i915_private *dev_priv,
......@@ -5157,19 +5147,22 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
}
I915_WRITE(reg, val);
I915_WRITE_FW(reg, val);
}
static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
const struct skl_plane_wm *wm,
const struct skl_ddb_allocation *ddb,
enum plane_id plane_id)
void skl_write_plane_wm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int level, max_level = ilk_wm_max_level(dev_priv);
enum pipe pipe = intel_crtc->pipe;
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
const struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
const struct skl_ddb_entry *ddb_uv =
&crtc_state->wm.skl.plane_ddb_uv[plane_id];
for (level = 0; level <= max_level; level++) {
skl_write_wm_level(dev_priv, PLANE_WM(pipe, plane_id, level),
......@@ -5178,29 +5171,32 @@ static void skl_write_plane_wm(struct intel_crtc *intel_crtc,
skl_write_wm_level(dev_priv, PLANE_WM_TRANS(pipe, plane_id),
&wm->trans_wm);
if (wm->is_planar && INTEL_GEN(dev_priv) < 11) {
skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
&ddb->uv_plane[pipe][plane_id]);
if (INTEL_GEN(dev_priv) >= 11) {
skl_ddb_entry_write(dev_priv,
PLANE_NV12_BUF_CFG(pipe, plane_id),
&ddb->plane[pipe][plane_id]);
} else {
skl_ddb_entry_write(dev_priv, PLANE_BUF_CFG(pipe, plane_id),
&ddb->plane[pipe][plane_id]);
if (INTEL_GEN(dev_priv) < 11)
I915_WRITE(PLANE_NV12_BUF_CFG(pipe, plane_id), 0x0);
PLANE_BUF_CFG(pipe, plane_id), ddb_y);
return;
}
if (wm->is_planar)
swap(ddb_y, ddb_uv);
skl_ddb_entry_write(dev_priv,
PLANE_BUF_CFG(pipe, plane_id), ddb_y);
skl_ddb_entry_write(dev_priv,
PLANE_NV12_BUF_CFG(pipe, plane_id), ddb_uv);
}
static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
const struct skl_plane_wm *wm,
const struct skl_ddb_allocation *ddb)
void skl_write_cursor_wm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
int level, max_level = ilk_wm_max_level(dev_priv);
enum pipe pipe = intel_crtc->pipe;
enum plane_id plane_id = plane->id;
enum pipe pipe = plane->pipe;
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
const struct skl_ddb_entry *ddb =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
for (level = 0; level <= max_level; level++) {
skl_write_wm_level(dev_priv, CUR_WM(pipe, level),
......@@ -5208,22 +5204,30 @@ static void skl_write_cursor_wm(struct intel_crtc *intel_crtc,
}
skl_write_wm_level(dev_priv, CUR_WM_TRANS(pipe), &wm->trans_wm);
skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
&ddb->plane[pipe][PLANE_CURSOR]);
skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), ddb);
}
bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2)
{
if (l1->plane_en != l2->plane_en)
return false;
return l1->plane_en == l2->plane_en &&
l1->plane_res_l == l2->plane_res_l &&
l1->plane_res_b == l2->plane_res_b;
}
/* If both planes aren't enabled, the rest shouldn't matter */
if (!l1->plane_en)
return true;
static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
const struct skl_plane_wm *wm1,
const struct skl_plane_wm *wm2)
{
int level, max_level = ilk_wm_max_level(dev_priv);
return (l1->plane_res_l == l2->plane_res_l &&
l1->plane_res_b == l2->plane_res_b);
for (level = 0; level <= max_level; level++) {
if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
!skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level]))
return false;
}
return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
}
static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
......@@ -5250,13 +5254,12 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
const struct skl_pipe_wm *old_pipe_wm,
struct skl_pipe_wm *pipe_wm, /* out */
struct skl_ddb_allocation *ddb, /* out */
bool *changed /* out */)
{
struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
int ret;
ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
ret = skl_build_pipe_wm(intel_cstate, pipe_wm);
if (ret)
return ret;
......@@ -5282,42 +5285,29 @@ pipes_modified(struct drm_atomic_state *state)
}
static int
skl_ddb_add_affected_planes(struct intel_crtc_state *cstate)
skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
struct drm_atomic_state *state = cstate->base.state;
struct drm_device *dev = state->dev;
struct drm_crtc *crtc = cstate->base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
struct drm_plane *plane;
enum pipe pipe = intel_crtc->pipe;
struct intel_atomic_state *state = to_intel_atomic_state(new_crtc_state->base.state);
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_plane *plane;
drm_for_each_plane_mask(plane, dev, cstate->base.plane_mask) {
struct drm_plane_state *plane_state;
struct intel_plane *linked;
enum plane_id plane_id = to_intel_plane(plane)->id;
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
struct intel_plane_state *plane_state;
enum plane_id plane_id = plane->id;
if (skl_ddb_entry_equal(&cur_ddb->plane[pipe][plane_id],
&new_ddb->plane[pipe][plane_id]) &&
skl_ddb_entry_equal(&cur_ddb->uv_plane[pipe][plane_id],
&new_ddb->uv_plane[pipe][plane_id]))
if (skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_y[plane_id],
&new_crtc_state->wm.skl.plane_ddb_y[plane_id]) &&
skl_ddb_entry_equal(&old_crtc_state->wm.skl.plane_ddb_uv[plane_id],
&new_crtc_state->wm.skl.plane_ddb_uv[plane_id]))
continue;
plane_state = drm_atomic_get_plane_state(state, plane);
plane_state = intel_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
/* Make sure linked plane is updated too */
linked = to_intel_plane_state(plane_state)->linked_plane;
if (!linked)
continue;
plane_state = drm_atomic_get_plane_state(state, &linked->base);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
new_crtc_state->update_planes |= BIT(plane_id);
}
return 0;
......@@ -5329,18 +5319,21 @@ skl_compute_ddb(struct drm_atomic_state *state)
const struct drm_i915_private *dev_priv = to_i915(state->dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
struct intel_crtc_state *cstate;
int ret, i;
memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
for_each_new_intel_crtc_in_state(intel_state, crtc, cstate, i) {
ret = skl_allocate_pipe_ddb(cstate, ddb);
for_each_oldnew_intel_crtc_in_state(intel_state, crtc, old_crtc_state,
new_crtc_state, i) {
ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
if (ret)
return ret;
ret = skl_ddb_add_affected_planes(cstate);
ret = skl_ddb_add_affected_planes(old_crtc_state,
new_crtc_state);
if (ret)
return ret;
}
......@@ -5349,36 +5342,29 @@ skl_compute_ddb(struct drm_atomic_state *state)
}
static void
skl_print_wm_changes(const struct drm_atomic_state *state)
skl_print_wm_changes(struct intel_atomic_state *state)
{
const struct drm_device *dev = state->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
const struct intel_atomic_state *intel_state =
to_intel_atomic_state(state);
const struct drm_crtc *crtc;
const struct drm_crtc_state *cstate;
const struct intel_plane *intel_plane;
const struct skl_ddb_allocation *old_ddb = &dev_priv->wm.skl_hw.ddb;
const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state;
const struct intel_crtc_state *new_crtc_state;
struct intel_plane *plane;
struct intel_crtc *crtc;
int i;
for_each_new_crtc_in_state(state, crtc, cstate, i) {
const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
enum plane_id plane_id = intel_plane->id;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
enum plane_id plane_id = plane->id;
const struct skl_ddb_entry *old, *new;
old = &old_ddb->plane[pipe][plane_id];
new = &new_ddb->plane[pipe][plane_id];
old = &old_crtc_state->wm.skl.plane_ddb_y[plane_id];
new = &new_crtc_state->wm.skl.plane_ddb_y[plane_id];
if (skl_ddb_entry_equal(old, new))
continue;
DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
intel_plane->base.base.id,
intel_plane->base.name,
plane->base.base.id, plane->base.name,
old->start, old->end,
new->start, new->end);
}
......@@ -5475,6 +5461,66 @@ skl_ddb_add_affected_pipes(struct drm_atomic_state *state, bool *changed)
return 0;
}
/*
* To make sure the cursor watermark registers are always consistent
* with our computed state the following scenario needs special
* treatment:
*
* 1. enable cursor
* 2. move cursor entirely offscreen
* 3. disable cursor
*
* Step 2. does call .disable_plane() but does not zero the watermarks
* (since we consider an offscreen cursor still active for the purposes
* of watermarks). Step 3. would not normally call .disable_plane()
* because the actual plane visibility isn't changing, and we don't
* deallocate the cursor ddb until the pipe gets disabled. So we must
* force step 3. to call .disable_plane() to update the watermark
* registers properly.
*
* Other planes do not suffer from this issues as their watermarks are
* calculated based on the actual plane visibility. The only time this
* can trigger for the other planes is during the initial readout as the
* default value of the watermarks registers is not zero.
*/
static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_plane *plane;
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
struct intel_plane_state *plane_state;
enum plane_id plane_id = plane->id;
/*
* Force a full wm update for every plane on modeset.
* Required because the reset value of the wm registers
* is non-zero, whereas we want all disabled planes to
* have zero watermarks. So if we turn off the relevant
* power well the hardware state will go out of sync
* with the software state.
*/
if (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) &&
skl_plane_wm_equals(dev_priv,
&old_crtc_state->wm.skl.optimal.planes[plane_id],
&new_crtc_state->wm.skl.optimal.planes[plane_id]))
continue;
plane_state = intel_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
new_crtc_state->update_planes |= BIT(plane_id);
}
return 0;
}
static int
skl_compute_wm(struct drm_atomic_state *state)
{
......@@ -5514,8 +5560,12 @@ skl_compute_wm(struct drm_atomic_state *state)
&to_intel_crtc_state(crtc->state)->wm.skl.optimal;
pipe_wm = &intel_cstate->wm.skl.optimal;
ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm,
&results->ddb, &changed);
ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed);
if (ret)
return ret;
ret = skl_wm_add_affected_planes(intel_state,
to_intel_crtc(crtc));
if (ret)
return ret;
......@@ -5529,7 +5579,7 @@ skl_compute_wm(struct drm_atomic_state *state)
intel_cstate->update_wm_pre = true;
}
skl_print_wm_changes(state);
skl_print_wm_changes(intel_state);
return 0;
}
......@@ -5540,23 +5590,12 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
const struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
enum pipe pipe = crtc->pipe;
enum plane_id plane_id;
if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base)))
return;
I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
for_each_plane_id_on_crtc(crtc, plane_id) {
if (plane_id != PLANE_CURSOR)
skl_write_plane_wm(crtc, &pipe_wm->planes[plane_id],
ddb, plane_id);
else
skl_write_cursor_wm(crtc, &pipe_wm->planes[plane_id],
ddb);
}
}
static void skl_initial_wm(struct intel_atomic_state *state,
......@@ -5566,8 +5605,6 @@ static void skl_initial_wm(struct intel_atomic_state *state,
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct skl_ddb_values *results = &state->wm_results;
struct skl_ddb_values *hw_vals = &dev_priv->wm.skl_hw;
enum pipe pipe = intel_crtc->pipe;
if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0)
return;
......@@ -5577,11 +5614,6 @@ static void skl_initial_wm(struct intel_atomic_state *state,
if (cstate->base.active_changed)
skl_atomic_update_crtc_wm(state, cstate);
memcpy(hw_vals->ddb.uv_plane[pipe], results->ddb.uv_plane[pipe],
sizeof(hw_vals->ddb.uv_plane[pipe]));
memcpy(hw_vals->ddb.plane[pipe], results->ddb.plane[pipe],
sizeof(hw_vals->ddb.plane[pipe]));
mutex_unlock(&dev_priv->wm.wm_mutex);
}
......@@ -5732,13 +5764,6 @@ void skl_wm_get_hw_state(struct drm_device *dev)
if (dev_priv->active_crtcs) {
/* Fully recompute DDB on first atomic commit */
dev_priv->wm.distrust_bios_wm = true;
} else {
/*
* Easy/common case; just sanitize DDB now if everything off
* Keep dbuf slice info intact
*/
memset(ddb->plane, 0, sizeof(ddb->plane));
memset(ddb->uv_plane, 0, sizeof(ddb->uv_plane));
}
}
......
......@@ -542,6 +542,8 @@ skl_program_plane(struct intel_plane *plane,
if (fb->format->is_yuv && icl_is_hdr_plane(plane))
icl_program_input_csc_coeff(crtc_state, plane_state);
skl_write_plane_wm(plane, crtc_state);
I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk);
I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax);
......@@ -604,6 +606,8 @@ skl_disable_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
skl_write_plane_wm(plane, crtc_state);
I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment