Commit 41e5c17e authored by Vinay Belgaumkar's avatar Vinay Belgaumkar Committed by John Harrison

drm/i915/guc/slpc: Sysfs hooks for SLPC

Update the get/set min/max freq hooks to work for
SLPC case as well. Consolidate helpers for requested/min/max
frequency get/set to intel_rps where the proper action can
be taken depending on whether SLPC is enabled.

v2: Add wrappers for getting rp0/1/n frequencies, update
softlimits in set min/max SLPC functions. Also check for
boundary conditions before setting them.

v3: Address review comments (Michal W)

v4: Add helper for host part of intel_rps_set_freq helpers (Michal W)

v5: checkpatch()
Reviewed-by: default avatarMichal Wajdeczko <michal.wajdeczko@intel.com>
Acked-by: default avatarMichal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: default avatarVinay Belgaumkar <vinay.belgaumkar@intel.com>
Signed-off-by: default avatarTvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Signed-off-by: default avatarSujaritha Sundaresan <sujaritha.sundaresan@intel.com>
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210730202119.23810-13-vinay.belgaumkar@intel.com
parent 025cb07b
...@@ -37,6 +37,13 @@ static struct intel_uncore *rps_to_uncore(struct intel_rps *rps) ...@@ -37,6 +37,13 @@ static struct intel_uncore *rps_to_uncore(struct intel_rps *rps)
return rps_to_gt(rps)->uncore; return rps_to_gt(rps)->uncore;
} }
static struct intel_guc_slpc *rps_to_slpc(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
return &gt->uc.guc.slpc;
}
static bool rps_uses_slpc(struct intel_rps *rps) static bool rps_uses_slpc(struct intel_rps *rps)
{ {
struct intel_gt *gt = rps_to_gt(rps); struct intel_gt *gt = rps_to_gt(rps);
...@@ -1963,6 +1970,176 @@ u32 intel_rps_read_actual_frequency(struct intel_rps *rps) ...@@ -1963,6 +1970,176 @@ u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
return freq; return freq;
} }
u32 intel_rps_read_punit_req(struct intel_rps *rps)
{
struct intel_uncore *uncore = rps_to_uncore(rps);
return intel_uncore_read(uncore, GEN6_RPNSWREQ);
}
static u32 intel_rps_get_req(u32 pureq)
{
u32 req = pureq >> GEN9_SW_REQ_UNSLICE_RATIO_SHIFT;
return req;
}
u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps)
{
u32 freq = intel_rps_get_req(intel_rps_read_punit_req(rps));
return intel_gpu_freq(rps, freq);
}
u32 intel_rps_get_requested_frequency(struct intel_rps *rps)
{
if (rps_uses_slpc(rps))
return intel_rps_read_punit_req_frequency(rps);
else
return intel_gpu_freq(rps, rps->cur_freq);
}
u32 intel_rps_get_max_frequency(struct intel_rps *rps)
{
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
if (rps_uses_slpc(rps))
return slpc->max_freq_softlimit;
else
return intel_gpu_freq(rps, rps->max_freq_softlimit);
}
u32 intel_rps_get_rp0_frequency(struct intel_rps *rps)
{
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
if (rps_uses_slpc(rps))
return slpc->rp0_freq;
else
return intel_gpu_freq(rps, rps->rp0_freq);
}
u32 intel_rps_get_rp1_frequency(struct intel_rps *rps)
{
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
if (rps_uses_slpc(rps))
return slpc->rp1_freq;
else
return intel_gpu_freq(rps, rps->rp1_freq);
}
u32 intel_rps_get_rpn_frequency(struct intel_rps *rps)
{
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
if (rps_uses_slpc(rps))
return slpc->min_freq;
else
return intel_gpu_freq(rps, rps->min_freq);
}
static int set_max_freq(struct intel_rps *rps, u32 val)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
int ret = 0;
mutex_lock(&rps->lock);
val = intel_freq_opcode(rps, val);
if (val < rps->min_freq ||
val > rps->max_freq ||
val < rps->min_freq_softlimit) {
ret = -EINVAL;
goto unlock;
}
if (val > rps->rp0_freq)
drm_dbg(&i915->drm, "User requested overclocking to %d\n",
intel_gpu_freq(rps, val));
rps->max_freq_softlimit = val;
val = clamp_t(int, rps->cur_freq,
rps->min_freq_softlimit,
rps->max_freq_softlimit);
/*
* We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged.
*/
intel_rps_set(rps, val);
unlock:
mutex_unlock(&rps->lock);
return ret;
}
int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val)
{
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
if (rps_uses_slpc(rps))
return intel_guc_slpc_set_max_freq(slpc, val);
else
return set_max_freq(rps, val);
}
u32 intel_rps_get_min_frequency(struct intel_rps *rps)
{
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
if (rps_uses_slpc(rps))
return slpc->min_freq_softlimit;
else
return intel_gpu_freq(rps, rps->min_freq_softlimit);
}
static int set_min_freq(struct intel_rps *rps, u32 val)
{
int ret = 0;
mutex_lock(&rps->lock);
val = intel_freq_opcode(rps, val);
if (val < rps->min_freq ||
val > rps->max_freq ||
val > rps->max_freq_softlimit) {
ret = -EINVAL;
goto unlock;
}
rps->min_freq_softlimit = val;
val = clamp_t(int, rps->cur_freq,
rps->min_freq_softlimit,
rps->max_freq_softlimit);
/*
* We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged.
*/
intel_rps_set(rps, val);
unlock:
mutex_unlock(&rps->lock);
return ret;
}
int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val)
{
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
if (rps_uses_slpc(rps))
return intel_guc_slpc_set_min_freq(slpc, val);
else
return set_min_freq(rps, val);
}
/* External interface for intel_ips.ko */ /* External interface for intel_ips.ko */
static struct drm_i915_private __rcu *ips_mchdev; static struct drm_i915_private __rcu *ips_mchdev;
......
...@@ -31,6 +31,16 @@ int intel_gpu_freq(struct intel_rps *rps, int val); ...@@ -31,6 +31,16 @@ int intel_gpu_freq(struct intel_rps *rps, int val);
int intel_freq_opcode(struct intel_rps *rps, int val); int intel_freq_opcode(struct intel_rps *rps, int val);
u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat1); u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat1);
u32 intel_rps_read_actual_frequency(struct intel_rps *rps); u32 intel_rps_read_actual_frequency(struct intel_rps *rps);
u32 intel_rps_get_requested_frequency(struct intel_rps *rps);
u32 intel_rps_get_min_frequency(struct intel_rps *rps);
int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val);
u32 intel_rps_get_max_frequency(struct intel_rps *rps);
int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val);
u32 intel_rps_get_rp0_frequency(struct intel_rps *rps);
u32 intel_rps_get_rp1_frequency(struct intel_rps *rps);
u32 intel_rps_get_rpn_frequency(struct intel_rps *rps);
u32 intel_rps_read_punit_req(struct intel_rps *rps);
u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps);
void gen5_rps_irq_handler(struct intel_rps *rps); void gen5_rps_irq_handler(struct intel_rps *rps);
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir); void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
......
...@@ -407,7 +407,7 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns) ...@@ -407,7 +407,7 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) { if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) {
add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ], add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
intel_gpu_freq(rps, rps->cur_freq), intel_rps_get_requested_frequency(rps),
period_ns / 1000); period_ns / 1000);
} }
......
...@@ -9229,6 +9229,8 @@ enum { ...@@ -9229,6 +9229,8 @@ enum {
#define GEN9_FREQUENCY(x) ((x) << 23) #define GEN9_FREQUENCY(x) ((x) << 23)
#define GEN6_OFFSET(x) ((x) << 19) #define GEN6_OFFSET(x) ((x) << 19)
#define GEN6_AGGRESSIVE_TURBO (0 << 15) #define GEN6_AGGRESSIVE_TURBO (0 << 15)
#define GEN9_SW_REQ_UNSLICE_RATIO_SHIFT 23
#define GEN6_RC_VIDEO_FREQ _MMIO(0xA00C) #define GEN6_RC_VIDEO_FREQ _MMIO(0xA00C)
#define GEN6_RC_CONTROL _MMIO(0xA090) #define GEN6_RC_CONTROL _MMIO(0xA090)
#define GEN6_RC_CTL_RC6pp_ENABLE (1 << 16) #define GEN6_RC_CTL_RC6pp_ENABLE (1 << 16)
......
...@@ -272,7 +272,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev, ...@@ -272,7 +272,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &i915->gt.rps; struct intel_rps *rps = &i915->gt.rps;
return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->cur_freq)); return sysfs_emit(buf, "%d\n", intel_rps_get_requested_frequency(rps));
} }
static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
...@@ -326,9 +326,10 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, ...@@ -326,9 +326,10 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{ {
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt.rps; struct intel_gt *gt = &dev_priv->gt;
struct intel_rps *rps = &gt->rps;
return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->max_freq_softlimit)); return sysfs_emit(buf, "%d\n", intel_rps_get_max_frequency(rps));
} }
static ssize_t gt_max_freq_mhz_store(struct device *kdev, static ssize_t gt_max_freq_mhz_store(struct device *kdev,
...@@ -336,7 +337,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -336,7 +337,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
const char *buf, size_t count) const char *buf, size_t count)
{ {
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt.rps; struct intel_gt *gt = &dev_priv->gt;
struct intel_rps *rps = &gt->rps;
ssize_t ret; ssize_t ret;
u32 val; u32 val;
...@@ -344,53 +346,26 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, ...@@ -344,53 +346,26 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
if (ret) if (ret)
return ret; return ret;
mutex_lock(&rps->lock); ret = intel_rps_set_max_frequency(rps, val);
val = intel_freq_opcode(rps, val);
if (val < rps->min_freq ||
val > rps->max_freq ||
val < rps->min_freq_softlimit) {
ret = -EINVAL;
goto unlock;
}
if (val > rps->rp0_freq)
DRM_DEBUG("User requested overclocking to %d\n",
intel_gpu_freq(rps, val));
rps->max_freq_softlimit = val;
val = clamp_t(int, rps->cur_freq,
rps->min_freq_softlimit,
rps->max_freq_softlimit);
/*
* We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged.
*/
intel_rps_set(rps, val);
unlock:
mutex_unlock(&rps->lock);
return ret ?: count; return ret ?: count;
} }
static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
{ {
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt.rps; struct intel_gt *gt = &i915->gt;
struct intel_rps *rps = &gt->rps;
return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->min_freq_softlimit)); return sysfs_emit(buf, "%d\n", intel_rps_get_min_frequency(rps));
} }
static ssize_t gt_min_freq_mhz_store(struct device *kdev, static ssize_t gt_min_freq_mhz_store(struct device *kdev,
struct device_attribute *attr, struct device_attribute *attr,
const char *buf, size_t count) const char *buf, size_t count)
{ {
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
struct intel_rps *rps = &dev_priv->gt.rps; struct intel_rps *rps = &i915->gt.rps;
ssize_t ret; ssize_t ret;
u32 val; u32 val;
...@@ -398,31 +373,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, ...@@ -398,31 +373,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
if (ret) if (ret)
return ret; return ret;
mutex_lock(&rps->lock); ret = intel_rps_set_min_frequency(rps, val);
val = intel_freq_opcode(rps, val);
if (val < rps->min_freq ||
val > rps->max_freq ||
val > rps->max_freq_softlimit) {
ret = -EINVAL;
goto unlock;
}
rps->min_freq_softlimit = val;
val = clamp_t(int, rps->cur_freq,
rps->min_freq_softlimit,
rps->max_freq_softlimit);
/*
* We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged.
*/
intel_rps_set(rps, val);
unlock:
mutex_unlock(&rps->lock);
return ret ?: count; return ret ?: count;
} }
...@@ -448,11 +399,11 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr ...@@ -448,11 +399,11 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
u32 val; u32 val;
if (attr == &dev_attr_gt_RP0_freq_mhz) if (attr == &dev_attr_gt_RP0_freq_mhz)
val = intel_gpu_freq(rps, rps->rp0_freq); val = intel_rps_get_rp0_frequency(rps);
else if (attr == &dev_attr_gt_RP1_freq_mhz) else if (attr == &dev_attr_gt_RP1_freq_mhz)
val = intel_gpu_freq(rps, rps->rp1_freq); val = intel_rps_get_rp1_frequency(rps);
else if (attr == &dev_attr_gt_RPn_freq_mhz) else if (attr == &dev_attr_gt_RPn_freq_mhz)
val = intel_gpu_freq(rps, rps->min_freq); val = intel_rps_get_rpn_frequency(rps);
else else
BUG(); BUG();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment