Commit ffe02b40 authored by Ville Syrjälä's avatar Ville Syrjälä Committed by Daniel Vetter

drm/i915: Introduce intel_set_rps()

Replace the valleyview_set_rps() and gen6_set_rps() calls with
intel_set_rps() which itself does the IS_VALLEYVIEW() check. The
code becomes simpler since the callers don't have to do this check
themselves.

Most of the change was performe with the following semantic patch:
@@
expression E1, E2, E3;
@@
- if (IS_VALLEYVIEW(E1)) {
-  valleyview_set_rps(E2, E3);
- } else {
-  gen6_set_rps(E2, E3);
- }
+ intel_set_rps(E2, E3);

Adding intel_set_rps() and making valleyview_set_rps() and gen6_set_rps()
static was done manually. Also valleyview_set_rps() had to be moved a
bit avoid a forward declaration.

v2: Use a less greedy semantic patch

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Suggested-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarVille Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent ab8d6675
......@@ -4214,10 +4214,7 @@ i915_max_freq_set(void *data, u64 val)
dev_priv->rps.max_freq_softlimit = val;
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val);
else
gen6_set_rps(dev, val);
intel_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
......@@ -4292,10 +4289,7 @@ i915_min_freq_set(void *data, u64 val)
dev_priv->rps.min_freq_softlimit = val;
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val);
else
gen6_set_rps(dev, val);
intel_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
......
......@@ -3183,8 +3183,7 @@ extern void i915_redisable_vga(struct drm_device *dev);
extern void i915_redisable_vga_power_on(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void valleyview_set_rps(struct drm_device *dev, u8 val);
extern void intel_set_rps(struct drm_device *dev, u8 val);
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
extern void intel_detect_pch(struct drm_device *dev);
......
......@@ -1243,10 +1243,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
if (IS_VALLEYVIEW(dev_priv->dev))
valleyview_set_rps(dev_priv->dev, new_delay);
else
gen6_set_rps(dev_priv->dev, new_delay);
intel_set_rps(dev_priv->dev, new_delay);
mutex_unlock(&dev_priv->rps.hw_lock);
}
......
......@@ -402,10 +402,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val);
else
gen6_set_rps(dev, val);
intel_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
......@@ -464,10 +461,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val);
else
gen6_set_rps(dev, val);
intel_set_rps(dev, val);
mutex_unlock(&dev_priv->rps.hw_lock);
......
......@@ -3750,7 +3750,7 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
/* gen6_set_rps is called to update the frequency request, but should also be
* called when the range (min_delay and max_delay) is modified so that we can
* update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
void gen6_set_rps(struct drm_device *dev, u8 val)
static void gen6_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
......@@ -3786,6 +3786,27 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
trace_intel_gpu_freq_change(val * 50);
}
static void valleyview_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_freq_softlimit);
WARN_ON(val < dev_priv->rps.min_freq_softlimit);
if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
"Odd GPU freq value\n"))
val &= ~1;
if (val != dev_priv->rps.cur_freq)
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
dev_priv->rps.cur_freq = val;
trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
}
/* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
*
* * If Gfx is Idle, then
......@@ -3850,38 +3871,20 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
void gen6_rps_boost(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
else
gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
intel_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
dev_priv->rps.last_adj = 0;
}
mutex_unlock(&dev_priv->rps.hw_lock);
}
void valleyview_set_rps(struct drm_device *dev, u8 val)
void intel_set_rps(struct drm_device *dev, u8 val)
{
struct drm_i915_private *dev_priv = dev->dev_private;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_freq_softlimit);
WARN_ON(val < dev_priv->rps.min_freq_softlimit);
if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
"Odd GPU freq value\n"))
val &= ~1;
if (val != dev_priv->rps.cur_freq)
vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
dev_priv->rps.cur_freq = val;
trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
if (IS_VALLEYVIEW(dev))
valleyview_set_rps(dev, val);
else
gen6_set_rps(dev, val);
}
static void gen9_disable_rps(struct drm_device *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment