Commit 292e4fb0 authored by Vinay Belgaumkar's avatar Vinay Belgaumkar Committed by John Harrison

drm/i915/guc/slpc: Define and initialize boost frequency

Define helpers and struct members required to record boost info.
Boost frequency is initialized to RP0 at SLPC init. Also define num_waiters
which can track the pending boost requests.

Boost will be done by scheduling a worker thread. This will avoid
the need to make H2G calls inside an interrupt context. Initialize the
worker function during SLPC init as well. Had to move intel_guc_slpc_init
a few lines below to accommodate this.

v2: Add a workqueue to handle waitboost
v3: Code review comments (Ashutosh)

Cc: Ashutosh Dixit <ashutosh.dixit@intel.com>
Signed-off-by: default avatarVinay Belgaumkar <vinay.belgaumkar@intel.com>
Reviewed-by: default avatarAshutosh Dixit <ashutosh.dixit@intel.com>
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211102012608.8609-2-vinay.belgaumkar@intel.com
parent c1f110ee
......@@ -79,29 +79,6 @@ static void slpc_mem_set_disabled(struct slpc_shared_data *data,
slpc_mem_set_param(data, enable_id, 0);
}
int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
{
struct intel_guc *guc = slpc_to_guc(slpc);
struct drm_i915_private *i915 = slpc_to_i915(slpc);
u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
int err;
GEM_BUG_ON(slpc->vma);
err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
if (unlikely(err)) {
drm_err(&i915->drm,
"Failed to allocate SLPC struct (err=%pe)\n",
ERR_PTR(err));
return err;
}
slpc->max_freq_softlimit = 0;
slpc->min_freq_softlimit = 0;
return err;
}
static u32 slpc_get_state(struct intel_guc_slpc *slpc)
{
struct slpc_shared_data *data;
......@@ -203,6 +180,82 @@ static int slpc_unset_param(struct intel_guc_slpc *slpc,
return guc_action_slpc_unset_param(guc, id);
}
static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq)
{
struct drm_i915_private *i915 = slpc_to_i915(slpc);
intel_wakeref_t wakeref;
int ret = 0;
lockdep_assert_held(&slpc->lock);
/*
* This function is a little different as compared to
* intel_guc_slpc_set_min_freq(). Softlimit will not be updated
* here since this is used to temporarily change min freq,
* for example, during a waitboost. Caller is responsible for
* checking bounds.
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
ret = slpc_set_param(slpc,
SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
freq);
if (ret)
drm_err(&i915->drm, "Unable to force min freq to %u: %d",
freq, ret);
}
return ret;
}
static void slpc_boost_work(struct work_struct *work)
{
struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work);
/*
* Raise min freq to boost. It's possible that
* this is greater than current max. But it will
* certainly be limited by RP0. An error setting
* the min param is not fatal.
*/
mutex_lock(&slpc->lock);
if (atomic_read(&slpc->num_waiters)) {
slpc_force_min_freq(slpc, slpc->boost_freq);
slpc->num_boosts++;
}
mutex_unlock(&slpc->lock);
}
int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
{
struct intel_guc *guc = slpc_to_guc(slpc);
struct drm_i915_private *i915 = slpc_to_i915(slpc);
u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
int err;
GEM_BUG_ON(slpc->vma);
err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
if (unlikely(err)) {
drm_err(&i915->drm,
"Failed to allocate SLPC struct (err=%pe)\n",
ERR_PTR(err));
return err;
}
slpc->max_freq_softlimit = 0;
slpc->min_freq_softlimit = 0;
slpc->boost_freq = 0;
atomic_set(&slpc->num_waiters, 0);
slpc->num_boosts = 0;
mutex_init(&slpc->lock);
INIT_WORK(&slpc->boost_work, slpc_boost_work);
return err;
}
static const char *slpc_global_state_to_string(enum slpc_global_state state)
{
switch (state) {
......@@ -522,6 +575,9 @@ static void slpc_get_rp_values(struct intel_guc_slpc *slpc)
GT_FREQUENCY_MULTIPLIER;
slpc->min_freq = REG_FIELD_GET(RPN_CAP_MASK, rp_state_cap) *
GT_FREQUENCY_MULTIPLIER;
if (!slpc->boost_freq)
slpc->boost_freq = slpc->rp0_freq;
}
/*
......
......@@ -38,5 +38,6 @@ int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val);
int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val);
int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p);
void intel_guc_pm_intrmsk_enable(struct intel_gt *gt);
void intel_guc_slpc_boost(struct intel_guc_slpc *slpc);
#endif
......@@ -6,6 +6,9 @@
#ifndef _INTEL_GUC_SLPC_TYPES_H_
#define _INTEL_GUC_SLPC_TYPES_H_
#include <linux/atomic.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <linux/types.h>
#define SLPC_RESET_TIMEOUT_MS 5
......@@ -20,10 +23,20 @@ struct intel_guc_slpc {
u32 min_freq;
u32 rp0_freq;
u32 rp1_freq;
u32 boost_freq;
/* frequency softlimits */
u32 min_freq_softlimit;
u32 max_freq_softlimit;
/* Protects set/reset of boost freq
* and value of num_waiters
*/
struct mutex lock;
struct work_struct boost_work;
atomic_t num_waiters;
u32 num_boosts;
};
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment