Commit 562d9bae authored by Sagar Arun Kamble's avatar Sagar Arun Kamble Committed by Chris Wilson

drm/i915: Name structure in dev_priv that contains RPS/RC6 state as "gt_pm"

Prepared substructure rps for RPS related state. autoenable_work is
used for RC6 too hence it is defined outside rps structure. As we do
this lot many functions are refactored to use intel_rps *rps to access
rps related members. Hence renamed intel_rps_client pointer variables
to rps_client in various functions.

v2: Rebase.

v3: s/pm/gt_pm (Chris)
Refactored access to rps structure by declaring struct intel_rps * in
many functions.
Signed-off-by: default avatarSagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com> #1
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/1507360055-19948-9-git-send-email-sagar.a.kamble@intel.comAcked-by: default avatarImre Deak <imre.deak@intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171010213010.7415-8-chris@chris-wilson.co.uk
parent 9f817501
This diff is collapsed.
......@@ -2502,7 +2502,7 @@ static int intel_runtime_suspend(struct device *kdev)
struct drm_i915_private *dev_priv = to_i915(dev);
int ret;
if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
if (WARN_ON_ONCE(!(dev_priv->gt_pm.rps.enabled && intel_enable_rc6())))
return -ENODEV;
if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
......
......@@ -609,7 +609,7 @@ struct drm_i915_file_private {
struct intel_rps_client {
atomic_t boosts;
} rps;
} rps_client;
unsigned int bsd_engine;
......@@ -1317,7 +1317,7 @@ struct intel_rps_ei {
u32 media_c0;
};
struct intel_gen6_power_mgmt {
struct intel_rps {
/*
* work, interrupts_enabled and pm_iir are protected by
* dev_priv->irq_lock
......@@ -1358,7 +1358,6 @@ struct intel_gen6_power_mgmt {
enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
bool enabled;
struct delayed_work autoenable_work;
atomic_t num_waiters;
atomic_t boosts;
......@@ -1366,6 +1365,11 @@ struct intel_gen6_power_mgmt {
struct intel_rps_ei ei;
};
struct intel_gen6_power_mgmt {
struct intel_rps rps;
struct delayed_work autoenable_work;
};
/* defined intel_pm.c */
extern spinlock_t mchdev_lock;
......@@ -2421,8 +2425,8 @@ struct drm_i915_private {
*/
struct mutex pcu_lock;
/* gen6+ rps state */
struct intel_gen6_power_mgmt rps;
/* gen6+ GT PM state */
struct intel_gen6_power_mgmt gt_pm;
/* ilk-only ips/rps state. Everything in here is protected by the global
* mchdev_lock in intel_pm.c */
......
......@@ -358,7 +358,7 @@ static long
i915_gem_object_wait_fence(struct dma_fence *fence,
unsigned int flags,
long timeout,
struct intel_rps_client *rps)
struct intel_rps_client *rps_client)
{
struct drm_i915_gem_request *rq;
......@@ -391,11 +391,11 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
* forcing the clocks too high for the whole system, we only allow
* each client to waitboost once in a busy period.
*/
if (rps) {
if (rps_client) {
if (INTEL_GEN(rq->i915) >= 6)
gen6_rps_boost(rq, rps);
gen6_rps_boost(rq, rps_client);
else
rps = NULL;
rps_client = NULL;
}
timeout = i915_wait_request(rq, flags, timeout);
......@@ -411,7 +411,7 @@ static long
i915_gem_object_wait_reservation(struct reservation_object *resv,
unsigned int flags,
long timeout,
struct intel_rps_client *rps)
struct intel_rps_client *rps_client)
{
unsigned int seq = __read_seqcount_begin(&resv->seq);
struct dma_fence *excl;
......@@ -430,7 +430,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
for (i = 0; i < count; i++) {
timeout = i915_gem_object_wait_fence(shared[i],
flags, timeout,
rps);
rps_client);
if (timeout < 0)
break;
......@@ -447,7 +447,8 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
}
if (excl && timeout >= 0) {
timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
timeout = i915_gem_object_wait_fence(excl, flags, timeout,
rps_client);
prune_fences = timeout >= 0;
}
......@@ -543,7 +544,7 @@ int
i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
long timeout,
struct intel_rps_client *rps)
struct intel_rps_client *rps_client)
{
might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
......@@ -555,7 +556,7 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj,
timeout = i915_gem_object_wait_reservation(obj->resv,
flags, timeout,
rps);
rps_client);
return timeout < 0 ? timeout : 0;
}
......@@ -563,7 +564,7 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file)
{
struct drm_i915_file_private *fpriv = file->driver_priv;
return &fpriv->rps;
return &fpriv->rps_client;
}
static int
......
......@@ -416,7 +416,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
spin_lock_irq(&request->lock);
if (request->waitboost)
atomic_dec(&request->i915->rps.num_waiters);
atomic_dec(&request->i915->gt_pm.rps.num_waiters);
dma_fence_signal_locked(&request->fence);
spin_unlock_irq(&request->lock);
......
......@@ -1028,6 +1028,7 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int irqs;
......@@ -1064,12 +1065,13 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
* Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will
* result in the register bit being left SET!
*/
dev_priv->rps.pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
dev_priv->rps.pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
}
static void guc_interrupts_release(struct drm_i915_private *dev_priv)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int irqs;
......@@ -1088,8 +1090,8 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
I915_WRITE(GUC_VCS2_VCS1_IER, 0);
I915_WRITE(GUC_WD_VECS_IER, 0);
dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
dev_priv->rps.pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
}
int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
......
......@@ -404,19 +404,21 @@ void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
{
spin_lock_irq(&dev_priv->irq_lock);
gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events);
dev_priv->rps.pm_iir = 0;
dev_priv->gt_pm.rps.pm_iir = 0;
spin_unlock_irq(&dev_priv->irq_lock);
}
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
{
if (READ_ONCE(dev_priv->rps.interrupts_enabled))
struct intel_rps *rps = &dev_priv->gt_pm.rps;
if (READ_ONCE(rps->interrupts_enabled))
return;
spin_lock_irq(&dev_priv->irq_lock);
WARN_ON_ONCE(dev_priv->rps.pm_iir);
WARN_ON_ONCE(rps->pm_iir);
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
dev_priv->rps.interrupts_enabled = true;
rps->interrupts_enabled = true;
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
......@@ -424,11 +426,13 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
{
if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
struct intel_rps *rps = &dev_priv->gt_pm.rps;
if (!READ_ONCE(rps->interrupts_enabled))
return;
spin_lock_irq(&dev_priv->irq_lock);
dev_priv->rps.interrupts_enabled = false;
rps->interrupts_enabled = false;
I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
......@@ -442,7 +446,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
* we will reset the GPU to minimum frequencies, so the current
* state of the worker can be discarded.
*/
cancel_work_sync(&dev_priv->rps.work);
cancel_work_sync(&rps->work);
gen6_reset_rps_interrupts(dev_priv);
}
......@@ -1119,12 +1123,13 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv,
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
{
memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
}
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
{
const struct intel_rps_ei *prev = &dev_priv->rps.ei;
struct intel_rps *rps = &dev_priv->gt_pm.rps;
const struct intel_rps_ei *prev = &rps->ei;
struct intel_rps_ei now;
u32 events = 0;
......@@ -1151,28 +1156,29 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
c0 = max(render, media);
c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
if (c0 > time * dev_priv->rps.up_threshold)
if (c0 > time * rps->up_threshold)
events = GEN6_PM_RP_UP_THRESHOLD;
else if (c0 < time * dev_priv->rps.down_threshold)
else if (c0 < time * rps->down_threshold)
events = GEN6_PM_RP_DOWN_THRESHOLD;
}
dev_priv->rps.ei = now;
rps->ei = now;
return events;
}
static void gen6_pm_rps_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, rps.work);
container_of(work, struct drm_i915_private, gt_pm.rps.work);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
bool client_boost = false;
int new_delay, adj, min, max;
u32 pm_iir = 0;
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->rps.interrupts_enabled) {
pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir);
client_boost = atomic_read(&dev_priv->rps.num_waiters);
if (rps->interrupts_enabled) {
pm_iir = fetch_and_zero(&rps->pm_iir);
client_boost = atomic_read(&rps->num_waiters);
}
spin_unlock_irq(&dev_priv->irq_lock);
......@@ -1185,14 +1191,14 @@ static void gen6_pm_rps_work(struct work_struct *work)
pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
adj = dev_priv->rps.last_adj;
new_delay = dev_priv->rps.cur_freq;
min = dev_priv->rps.min_freq_softlimit;
max = dev_priv->rps.max_freq_softlimit;
adj = rps->last_adj;
new_delay = rps->cur_freq;
min = rps->min_freq_softlimit;
max = rps->max_freq_softlimit;
if (client_boost)
max = dev_priv->rps.max_freq;
if (client_boost && new_delay < dev_priv->rps.boost_freq) {
new_delay = dev_priv->rps.boost_freq;
max = rps->max_freq;
if (client_boost && new_delay < rps->boost_freq) {
new_delay = rps->boost_freq;
adj = 0;
} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
if (adj > 0)
......@@ -1200,15 +1206,15 @@ static void gen6_pm_rps_work(struct work_struct *work)
else /* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
if (new_delay >= dev_priv->rps.max_freq_softlimit)
if (new_delay >= rps->max_freq_softlimit)
adj = 0;
} else if (client_boost) {
adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
new_delay = dev_priv->rps.efficient_freq;
else if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
new_delay = dev_priv->rps.min_freq_softlimit;
if (rps->cur_freq > rps->efficient_freq)
new_delay = rps->efficient_freq;
else if (rps->cur_freq > rps->min_freq_softlimit)
new_delay = rps->min_freq_softlimit;
adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
if (adj < 0)
......@@ -1216,13 +1222,13 @@ static void gen6_pm_rps_work(struct work_struct *work)
else /* CHV needs even encode values */
adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
if (new_delay <= dev_priv->rps.min_freq_softlimit)
if (new_delay <= rps->min_freq_softlimit)
adj = 0;
} else { /* unknown event */
adj = 0;
}
dev_priv->rps.last_adj = adj;
rps->last_adj = adj;
/* sysfs frequency interfaces may have snuck in while servicing the
* interrupt
......@@ -1232,7 +1238,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
if (intel_set_rps(dev_priv, new_delay)) {
DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
dev_priv->rps.last_adj = 0;
rps->last_adj = 0;
}
mutex_unlock(&dev_priv->pcu_lock);
......@@ -1240,7 +1246,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
out:
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->rps.interrupts_enabled)
if (rps->interrupts_enabled)
gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
spin_unlock_irq(&dev_priv->irq_lock);
}
......@@ -1721,12 +1727,14 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
* the work queue. */
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
if (pm_iir & dev_priv->pm_rps_events) {
spin_lock(&dev_priv->irq_lock);
gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
if (dev_priv->rps.interrupts_enabled) {
dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
schedule_work(&dev_priv->rps.work);
if (rps->interrupts_enabled) {
rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
schedule_work(&rps->work);
}
spin_unlock(&dev_priv->irq_lock);
}
......@@ -4007,11 +4015,12 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
void intel_irq_init(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
struct intel_rps *rps = &dev_priv->gt_pm.rps;
int i;
intel_hpd_init_work(dev_priv);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&rps->work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
for (i = 0; i < MAX_L3_SLICES; ++i)
......@@ -4027,7 +4036,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
dev_priv->rps.pm_intrmsk_mbz = 0;
rps->pm_intrmsk_mbz = 0;
/*
* SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
......@@ -4036,10 +4045,10 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
* TODO: verify if this can be reproduced on VLV,CHV.
*/
if (INTEL_GEN(dev_priv) <= 7)
dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
if (INTEL_GEN(dev_priv) >= 8)
dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
if (IS_GEN2(dev_priv)) {
/* Gen2 doesn't have a hardware frame counter */
......
......@@ -275,7 +275,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(dev_priv,
dev_priv->rps.cur_freq));
dev_priv->gt_pm.rps.cur_freq));
}
static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
......@@ -284,7 +284,7 @@ static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribu
return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(dev_priv,
dev_priv->rps.boost_freq));
dev_priv->gt_pm.rps.boost_freq));
}
static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
......@@ -292,6 +292,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 val;
ssize_t ret;
......@@ -301,11 +302,11 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
/* Validate against (static) hardware limits */
val = intel_freq_opcode(dev_priv, val);
if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq)
if (val < rps->min_freq || val > rps->max_freq)
return -EINVAL;
mutex_lock(&dev_priv->pcu_lock);
dev_priv->rps.boost_freq = val;
rps->boost_freq = val;
mutex_unlock(&dev_priv->pcu_lock);
return count;
......@@ -318,7 +319,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(dev_priv,
dev_priv->rps.efficient_freq));
dev_priv->gt_pm.rps.efficient_freq));
}
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
......@@ -327,7 +328,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(dev_priv,
dev_priv->rps.max_freq_softlimit));
dev_priv->gt_pm.rps.max_freq_softlimit));
}
static ssize_t gt_max_freq_mhz_store(struct device *kdev,
......@@ -335,6 +336,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 val;
ssize_t ret;
......@@ -348,23 +350,23 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
val = intel_freq_opcode(dev_priv, val);
if (val < dev_priv->rps.min_freq ||
val > dev_priv->rps.max_freq ||
val < dev_priv->rps.min_freq_softlimit) {
if (val < rps->min_freq ||
val > rps->max_freq ||
val < rps->min_freq_softlimit) {
mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv);
return -EINVAL;
}
if (val > dev_priv->rps.rp0_freq)
if (val > rps->rp0_freq)
DRM_DEBUG("User requested overclocking to %d\n",
intel_gpu_freq(dev_priv, val));
dev_priv->rps.max_freq_softlimit = val;
rps->max_freq_softlimit = val;
val = clamp_t(int, dev_priv->rps.cur_freq,
dev_priv->rps.min_freq_softlimit,
dev_priv->rps.max_freq_softlimit);
val = clamp_t(int, rps->cur_freq,
rps->min_freq_softlimit,
rps->max_freq_softlimit);
/* We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though
......@@ -384,7 +386,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
return snprintf(buf, PAGE_SIZE, "%d\n",
intel_gpu_freq(dev_priv,
dev_priv->rps.min_freq_softlimit));
dev_priv->gt_pm.rps.min_freq_softlimit));
}
static ssize_t gt_min_freq_mhz_store(struct device *kdev,
......@@ -392,6 +394,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
const char *buf, size_t count)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 val;
ssize_t ret;
......@@ -405,19 +408,19 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
val = intel_freq_opcode(dev_priv, val);
if (val < dev_priv->rps.min_freq ||
val > dev_priv->rps.max_freq ||
val > dev_priv->rps.max_freq_softlimit) {
if (val < rps->min_freq ||
val > rps->max_freq ||
val > rps->max_freq_softlimit) {
mutex_unlock(&dev_priv->pcu_lock);
intel_runtime_pm_put(dev_priv);
return -EINVAL;
}
dev_priv->rps.min_freq_softlimit = val;
rps->min_freq_softlimit = val;
val = clamp_t(int, dev_priv->rps.cur_freq,
dev_priv->rps.min_freq_softlimit,
dev_priv->rps.max_freq_softlimit);
val = clamp_t(int, rps->cur_freq,
rps->min_freq_softlimit,
rps->max_freq_softlimit);
/* We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though
......@@ -448,14 +451,15 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 val;
if (attr == &dev_attr_gt_RP0_freq_mhz)
val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
val = intel_gpu_freq(dev_priv, rps->rp0_freq);
else if (attr == &dev_attr_gt_RP1_freq_mhz)
val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
val = intel_gpu_freq(dev_priv, rps->rp1_freq);
else if (attr == &dev_attr_gt_RPn_freq_mhz)
val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
val = intel_gpu_freq(dev_priv, rps->min_freq);
else
BUG();
......
......@@ -1243,7 +1243,7 @@ void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
u32 mask)
{
return mask & ~i915->rps.pm_intrmsk_mbz;
return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
}
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment