Commit 4a8ab5ea authored by Chris Wilson's avatar Chris Wilson

drm/i915: Mark up Ironlake ips with rpm wakerefs

Currently Ironlake operates under the assumption that rpm awake (and its
error checking is disabled). As such, we have missed a few places where we
access registers without taking the rpm wakeref and thus trigger
warnings. intel_ips being one culprit.

As this involved adding a potentially sleeping rpm_get, we have to
rearrange the spinlocks slightly and so switch to acquiring a device-ref
under the spinlock rather than hold the spinlock for the whole
operation. To be consistent, we make the change in pattern common to the
intel_ips interface even though this adds a few more atomic operations
than necessary in a few cases.

v2: Sagar noted the mb around setting mch_dev were overkill as we only
need ordering there, and that i915_emon_status was still using
struct_mutex for no reason, but lacked rpm.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Jani Nikula <jani.nikula@intel.com>
Reviewed-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190114142129.24398-21-chris@chris-wilson.co.uk
parent 8d761e77
...@@ -1741,32 +1741,24 @@ static int i915_sr_status(struct seq_file *m, void *unused) ...@@ -1741,32 +1741,24 @@ static int i915_sr_status(struct seq_file *m, void *unused)
static int i915_emon_status(struct seq_file *m, void *unused) static int i915_emon_status(struct seq_file *m, void *unused)
{ {
struct drm_i915_private *dev_priv = node_to_i915(m->private); struct drm_i915_private *i915 = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
unsigned long temp, chipset, gfx;
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
int ret;
if (!IS_GEN(dev_priv, 5)) if (!IS_GEN(i915, 5))
return -ENODEV; return -ENODEV;
ret = mutex_lock_interruptible(&dev->struct_mutex); with_intel_runtime_pm(i915, wakeref) {
if (ret) unsigned long temp, chipset, gfx;
return ret;
wakeref = intel_runtime_pm_get(dev_priv); temp = i915_mch_val(i915);
chipset = i915_chipset_val(i915);
temp = i915_mch_val(dev_priv); gfx = i915_gfx_val(i915);
chipset = i915_chipset_val(dev_priv);
gfx = i915_gfx_val(dev_priv);
mutex_unlock(&dev->struct_mutex);
intel_runtime_pm_put(dev_priv, wakeref); seq_printf(m, "GMCH temp: %ld\n", temp);
seq_printf(m, "Chipset power: %ld\n", chipset);
seq_printf(m, "GMCH temp: %ld\n", temp); seq_printf(m, "GFX power: %ld\n", gfx);
seq_printf(m, "Chipset power: %ld\n", chipset); seq_printf(m, "Total power: %ld\n", chipset + gfx);
seq_printf(m, "GFX power: %ld\n", gfx); }
seq_printf(m, "Total power: %ld\n", chipset + gfx);
return 0; return 0;
} }
......
...@@ -1780,6 +1780,9 @@ void i915_driver_unload(struct drm_device *dev) ...@@ -1780,6 +1780,9 @@ void i915_driver_unload(struct drm_device *dev)
i915_driver_unregister(dev_priv); i915_driver_unregister(dev_priv);
/* Flush any external code that still may be under the RCU lock */
synchronize_rcu();
if (i915_gem_suspend(dev_priv)) if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n"); DRM_ERROR("failed to idle hardware; continuing to unload!\n");
......
...@@ -6203,10 +6203,6 @@ void intel_init_ipc(struct drm_i915_private *dev_priv) ...@@ -6203,10 +6203,6 @@ void intel_init_ipc(struct drm_i915_private *dev_priv)
*/ */
DEFINE_SPINLOCK(mchdev_lock); DEFINE_SPINLOCK(mchdev_lock);
/* Global for IPS driver to get at the current i915 device. Protected by
* mchdev_lock. */
static struct drm_i915_private *i915_mch_dev;
bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val) bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
{ {
u16 rgvswctl; u16 rgvswctl;
...@@ -7849,16 +7845,17 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) ...@@ -7849,16 +7845,17 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
{ {
unsigned long val; intel_wakeref_t wakeref;
unsigned long val = 0;
if (!IS_GEN(dev_priv, 5)) if (!IS_GEN(dev_priv, 5))
return 0; return 0;
spin_lock_irq(&mchdev_lock); with_intel_runtime_pm(dev_priv, wakeref) {
spin_lock_irq(&mchdev_lock);
val = __i915_chipset_val(dev_priv); val = __i915_chipset_val(dev_priv);
spin_unlock_irq(&mchdev_lock);
spin_unlock_irq(&mchdev_lock); }
return val; return val;
} }
...@@ -7935,14 +7932,16 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) ...@@ -7935,14 +7932,16 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
void i915_update_gfx_val(struct drm_i915_private *dev_priv) void i915_update_gfx_val(struct drm_i915_private *dev_priv)
{ {
intel_wakeref_t wakeref;
if (!IS_GEN(dev_priv, 5)) if (!IS_GEN(dev_priv, 5))
return; return;
spin_lock_irq(&mchdev_lock); with_intel_runtime_pm(dev_priv, wakeref) {
spin_lock_irq(&mchdev_lock);
__i915_update_gfx_val(dev_priv); __i915_update_gfx_val(dev_priv);
spin_unlock_irq(&mchdev_lock);
spin_unlock_irq(&mchdev_lock); }
} }
static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
...@@ -7984,18 +7983,34 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) ...@@ -7984,18 +7983,34 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
{ {
unsigned long val; intel_wakeref_t wakeref;
unsigned long val = 0;
if (!IS_GEN(dev_priv, 5)) if (!IS_GEN(dev_priv, 5))
return 0; return 0;
spin_lock_irq(&mchdev_lock); with_intel_runtime_pm(dev_priv, wakeref) {
spin_lock_irq(&mchdev_lock);
val = __i915_gfx_val(dev_priv);
spin_unlock_irq(&mchdev_lock);
}
val = __i915_gfx_val(dev_priv); return val;
}
spin_unlock_irq(&mchdev_lock); static struct drm_i915_private *i915_mch_dev;
return val; static struct drm_i915_private *mchdev_get(void)
{
struct drm_i915_private *i915;
rcu_read_lock();
i915 = i915_mch_dev;
if (!kref_get_unless_zero(&i915->drm.ref))
i915 = NULL;
rcu_read_unlock();
return i915;
} }
/** /**
...@@ -8006,23 +8021,24 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) ...@@ -8006,23 +8021,24 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
*/ */
unsigned long i915_read_mch_val(void) unsigned long i915_read_mch_val(void)
{ {
struct drm_i915_private *dev_priv; struct drm_i915_private *i915;
unsigned long chipset_val, graphics_val, ret = 0; unsigned long chipset_val = 0;
unsigned long graphics_val = 0;
spin_lock_irq(&mchdev_lock); intel_wakeref_t wakeref;
if (!i915_mch_dev)
goto out_unlock;
dev_priv = i915_mch_dev;
chipset_val = __i915_chipset_val(dev_priv);
graphics_val = __i915_gfx_val(dev_priv);
ret = chipset_val + graphics_val; i915 = mchdev_get();
if (!i915)
return 0;
out_unlock: with_intel_runtime_pm(i915, wakeref) {
spin_unlock_irq(&mchdev_lock); spin_lock_irq(&mchdev_lock);
chipset_val = __i915_chipset_val(i915);
graphics_val = __i915_gfx_val(i915);
spin_unlock_irq(&mchdev_lock);
}
return ret; drm_dev_put(&i915->drm);
return chipset_val + graphics_val;
} }
EXPORT_SYMBOL_GPL(i915_read_mch_val); EXPORT_SYMBOL_GPL(i915_read_mch_val);
...@@ -8033,23 +8049,19 @@ EXPORT_SYMBOL_GPL(i915_read_mch_val); ...@@ -8033,23 +8049,19 @@ EXPORT_SYMBOL_GPL(i915_read_mch_val);
*/ */
bool i915_gpu_raise(void) bool i915_gpu_raise(void)
{ {
struct drm_i915_private *dev_priv; struct drm_i915_private *i915;
bool ret = true;
spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
}
dev_priv = i915_mch_dev;
if (dev_priv->ips.max_delay > dev_priv->ips.fmax) i915 = mchdev_get();
dev_priv->ips.max_delay--; if (!i915)
return false;
out_unlock: spin_lock_irq(&mchdev_lock);
if (i915->ips.max_delay > i915->ips.fmax)
i915->ips.max_delay--;
spin_unlock_irq(&mchdev_lock); spin_unlock_irq(&mchdev_lock);
return ret; drm_dev_put(&i915->drm);
return true;
} }
EXPORT_SYMBOL_GPL(i915_gpu_raise); EXPORT_SYMBOL_GPL(i915_gpu_raise);
...@@ -8061,23 +8073,19 @@ EXPORT_SYMBOL_GPL(i915_gpu_raise); ...@@ -8061,23 +8073,19 @@ EXPORT_SYMBOL_GPL(i915_gpu_raise);
*/ */
bool i915_gpu_lower(void) bool i915_gpu_lower(void)
{ {
struct drm_i915_private *dev_priv; struct drm_i915_private *i915;
bool ret = true;
spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
}
dev_priv = i915_mch_dev;
if (dev_priv->ips.max_delay < dev_priv->ips.min_delay) i915 = mchdev_get();
dev_priv->ips.max_delay++; if (!i915)
return false;
out_unlock: spin_lock_irq(&mchdev_lock);
if (i915->ips.max_delay < i915->ips.min_delay)
i915->ips.max_delay++;
spin_unlock_irq(&mchdev_lock); spin_unlock_irq(&mchdev_lock);
return ret; drm_dev_put(&i915->drm);
return true;
} }
EXPORT_SYMBOL_GPL(i915_gpu_lower); EXPORT_SYMBOL_GPL(i915_gpu_lower);
...@@ -8088,13 +8096,16 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower); ...@@ -8088,13 +8096,16 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
*/ */
bool i915_gpu_busy(void) bool i915_gpu_busy(void)
{ {
bool ret = false; struct drm_i915_private *i915;
bool ret;
spin_lock_irq(&mchdev_lock); i915 = mchdev_get();
if (i915_mch_dev) if (!i915)
ret = i915_mch_dev->gt.awake; return false;
spin_unlock_irq(&mchdev_lock);
ret = i915->gt.awake;
drm_dev_put(&i915->drm);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(i915_gpu_busy); EXPORT_SYMBOL_GPL(i915_gpu_busy);
...@@ -8107,24 +8118,19 @@ EXPORT_SYMBOL_GPL(i915_gpu_busy); ...@@ -8107,24 +8118,19 @@ EXPORT_SYMBOL_GPL(i915_gpu_busy);
*/ */
bool i915_gpu_turbo_disable(void) bool i915_gpu_turbo_disable(void)
{ {
struct drm_i915_private *dev_priv; struct drm_i915_private *i915;
bool ret = true; bool ret;
spin_lock_irq(&mchdev_lock);
if (!i915_mch_dev) {
ret = false;
goto out_unlock;
}
dev_priv = i915_mch_dev;
dev_priv->ips.max_delay = dev_priv->ips.fstart;
if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart)) i915 = mchdev_get();
ret = false; if (!i915)
return false;
out_unlock: spin_lock_irq(&mchdev_lock);
i915->ips.max_delay = i915->ips.fstart;
ret = ironlake_set_drps(i915, i915->ips.fstart);
spin_unlock_irq(&mchdev_lock); spin_unlock_irq(&mchdev_lock);
drm_dev_put(&i915->drm);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
...@@ -8153,18 +8159,14 @@ void intel_gpu_ips_init(struct drm_i915_private *dev_priv) ...@@ -8153,18 +8159,14 @@ void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
{ {
/* We only register the i915 ips part with intel-ips once everything is /* We only register the i915 ips part with intel-ips once everything is
* set up, to avoid intel-ips sneaking in and reading bogus values. */ * set up, to avoid intel-ips sneaking in and reading bogus values. */
spin_lock_irq(&mchdev_lock); rcu_assign_pointer(i915_mch_dev, dev_priv);
i915_mch_dev = dev_priv;
spin_unlock_irq(&mchdev_lock);
ips_ping_for_i915_load(); ips_ping_for_i915_load();
} }
void intel_gpu_ips_teardown(void) void intel_gpu_ips_teardown(void)
{ {
spin_lock_irq(&mchdev_lock); rcu_assign_pointer(i915_mch_dev, NULL);
i915_mch_dev = NULL;
spin_unlock_irq(&mchdev_lock);
} }
static void intel_init_emon(struct drm_i915_private *dev_priv) static void intel_init_emon(struct drm_i915_private *dev_priv)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment