Commit 463cc940 authored by Vinod Govindapillai's avatar Vinod Govindapillai Committed by Radhakrishna Sripada

drm/i915: extract intel_bw_check_qgv_points()

Extract intel_bw_check_qgv_points() from intel_bw_atomic_check
to facilitate future platform variations in handling SAGV
configurations.
Signed-off-by: default avatarVinod Govindapillai <vinod.govindapillai@intel.com>
Reviewed-by: default avatarStanislav Lisovskiy <stanislav.lisovskiy@intel.com>
Signed-off-by: default avatarRadhakrishna Sripada <radhakrishna.sripada@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230606093509.221709-5-vinod.govindapillai@intel.com
parent 88d0ecbd
......@@ -803,6 +803,128 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state)
return to_intel_bw_state(bw_state);
}
static int icl_find_qgv_points(struct drm_i915_private *i915,
unsigned int data_rate,
unsigned int num_active_planes,
const struct intel_bw_state *old_bw_state,
struct intel_bw_state *new_bw_state)
{
unsigned int max_bw_point = 0;
unsigned int max_bw = 0;
unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points;
unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points;
u16 psf_points = 0;
u16 qgv_points = 0;
int i;
int ret;
ret = intel_atomic_lock_global_state(&new_bw_state->base);
if (ret)
return ret;
for (i = 0; i < num_qgv_points; i++) {
unsigned int max_data_rate;
if (DISPLAY_VER(i915) > 11)
max_data_rate = tgl_max_bw(i915, num_active_planes, i);
else
max_data_rate = icl_max_bw(i915, num_active_planes, i);
/*
* We need to know which qgv point gives us
* maximum bandwidth in order to disable SAGV
* if we find that we exceed SAGV block time
* with watermarks. By that moment we already
* have those, as it is calculated earlier in
* intel_atomic_check,
*/
if (max_data_rate > max_bw) {
max_bw_point = i;
max_bw = max_data_rate;
}
if (max_data_rate >= data_rate)
qgv_points |= BIT(i);
drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d\n",
i, max_data_rate, data_rate);
}
for (i = 0; i < num_psf_gv_points; i++) {
unsigned int max_data_rate = adl_psf_bw(i915, i);
if (max_data_rate >= data_rate)
psf_points |= BIT(i);
drm_dbg_kms(&i915->drm, "PSF GV point %d: max bw %d"
" required %d\n",
i, max_data_rate, data_rate);
}
/*
* BSpec states that we always should have at least one allowed point
* left, so if we couldn't - simply reject the configuration for obvious
* reasons.
*/
if (qgv_points == 0) {
drm_dbg_kms(&i915->drm, "No QGV points provide sufficient memory"
" bandwidth %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
}
if (num_psf_gv_points > 0 && psf_points == 0) {
drm_dbg_kms(&i915->drm, "No PSF GV points provide sufficient memory"
" bandwidth %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
}
/*
* Leave only single point with highest bandwidth, if
* we can't enable SAGV due to the increased memory latency it may
* cause.
*/
if (!intel_can_enable_sagv(i915, new_bw_state)) {
qgv_points = BIT(max_bw_point);
drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point %d\n",
max_bw_point);
}
/*
* We store the ones which need to be masked as that is what PCode
* actually accepts as a parameter.
*/
new_bw_state->qgv_points_mask =
~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
ADLS_PCODE_REQ_PSF_PT(psf_points)) &
icl_qgv_points_mask(i915);
/*
* If the actual mask had changed we need to make sure that
* the commits are serialized(in case this is a nomodeset, nonblocking)
*/
if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
ret = intel_atomic_serialize_global_state(&new_bw_state->base);
if (ret)
return ret;
}
return 0;
}
static int intel_bw_check_qgv_points(struct drm_i915_private *i915,
const struct intel_bw_state *old_bw_state,
struct intel_bw_state *new_bw_state)
{
unsigned int data_rate = intel_bw_data_rate(i915, new_bw_state);
unsigned int num_active_planes =
intel_bw_num_active_planes(i915, new_bw_state);
data_rate = DIV_ROUND_UP(data_rate, 1000);
return icl_find_qgv_points(i915, data_rate, num_active_planes,
old_bw_state, new_bw_state);
}
static bool intel_bw_state_changed(struct drm_i915_private *i915,
const struct intel_bw_state *old_bw_state,
const struct intel_bw_state *new_bw_state)
......@@ -1049,20 +1171,14 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan
int intel_bw_atomic_check(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
const struct intel_bw_state *old_bw_state;
struct intel_bw_state *new_bw_state;
unsigned int data_rate;
unsigned int num_active_planes;
int i, ret;
u16 qgv_points = 0, psf_points = 0;
unsigned int max_bw_point = 0, max_bw = 0;
unsigned int num_qgv_points = dev_priv->display.bw.max[0].num_qgv_points;
unsigned int num_psf_gv_points = dev_priv->display.bw.max[0].num_psf_gv_points;
bool changed = false;
struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_bw_state *new_bw_state;
const struct intel_bw_state *old_bw_state;
int ret;
/* FIXME earlier gens need some checks too */
if (DISPLAY_VER(dev_priv) < 11)
if (DISPLAY_VER(i915) < 11)
return 0;
ret = intel_bw_check_data_rate(state, &changed);
......@@ -1073,8 +1189,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
new_bw_state = intel_atomic_get_new_bw_state(state);
if (new_bw_state &&
intel_can_enable_sagv(dev_priv, old_bw_state) !=
intel_can_enable_sagv(dev_priv, new_bw_state))
intel_can_enable_sagv(i915, old_bw_state) !=
intel_can_enable_sagv(i915, new_bw_state))
changed = true;
/*
......@@ -1084,101 +1200,10 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
if (!changed)
return 0;
ret = intel_atomic_lock_global_state(&new_bw_state->base);
ret = intel_bw_check_qgv_points(i915, old_bw_state, new_bw_state);
if (ret)
return ret;
data_rate = intel_bw_data_rate(dev_priv, new_bw_state);
data_rate = DIV_ROUND_UP(data_rate, 1000);
num_active_planes = intel_bw_num_active_planes(dev_priv, new_bw_state);
for (i = 0; i < num_qgv_points; i++) {
unsigned int max_data_rate;
if (DISPLAY_VER(dev_priv) > 11)
max_data_rate = tgl_max_bw(dev_priv, num_active_planes, i);
else
max_data_rate = icl_max_bw(dev_priv, num_active_planes, i);
/*
* We need to know which qgv point gives us
* maximum bandwidth in order to disable SAGV
* if we find that we exceed SAGV block time
* with watermarks. By that moment we already
* have those, as it is calculated earlier in
* intel_atomic_check,
*/
if (max_data_rate > max_bw) {
max_bw_point = i;
max_bw = max_data_rate;
}
if (max_data_rate >= data_rate)
qgv_points |= BIT(i);
drm_dbg_kms(&dev_priv->drm, "QGV point %d: max bw %d required %d\n",
i, max_data_rate, data_rate);
}
for (i = 0; i < num_psf_gv_points; i++) {
unsigned int max_data_rate = adl_psf_bw(dev_priv, i);
if (max_data_rate >= data_rate)
psf_points |= BIT(i);
drm_dbg_kms(&dev_priv->drm, "PSF GV point %d: max bw %d"
" required %d\n",
i, max_data_rate, data_rate);
}
/*
* BSpec states that we always should have at least one allowed point
* left, so if we couldn't - simply reject the configuration for obvious
* reasons.
*/
if (qgv_points == 0) {
drm_dbg_kms(&dev_priv->drm, "No QGV points provide sufficient memory"
" bandwidth %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
}
if (num_psf_gv_points > 0 && psf_points == 0) {
drm_dbg_kms(&dev_priv->drm, "No PSF GV points provide sufficient memory"
" bandwidth %d for display configuration(%d active planes).\n",
data_rate, num_active_planes);
return -EINVAL;
}
/*
* Leave only single point with highest bandwidth, if
* we can't enable SAGV due to the increased memory latency it may
* cause.
*/
if (!intel_can_enable_sagv(dev_priv, new_bw_state)) {
qgv_points = BIT(max_bw_point);
drm_dbg_kms(&dev_priv->drm, "No SAGV, using single QGV point %d\n",
max_bw_point);
}
/*
* We store the ones which need to be masked as that is what PCode
* actually accepts as a parameter.
*/
new_bw_state->qgv_points_mask =
~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
ADLS_PCODE_REQ_PSF_PT(psf_points)) &
icl_qgv_points_mask(dev_priv);
/*
* If the actual mask had changed we need to make sure that
* the commits are serialized(in case this is a nomodeset, nonblocking)
*/
if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
ret = intel_atomic_serialize_global_state(&new_bw_state->base);
if (ret)
return ret;
}
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment