Commit 1e88da4f authored by John Harrison's avatar John Harrison

drm/i915/guc: Enable compute scheduling on DG2

DG2 has issues. To work around one of these the GuC must schedule
apps in an exclusive manner across both RCS and CCS. That is, if a
context from app X is running on RCS then all CCS engines must sit
idle even if there are contexts from apps Y, Z, ... waiting to run. A
certain OS favours RCS to the total starvation of CCS. Linux does not.
Hence the GuC now has a scheduling policy setting to control this
abitration.
Signed-off-by: default avatarJohn Harrison <John.C.Harrison@Intel.com>
Reviewed-by: default avatarUmesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220922201209.1446343-2-John.C.Harrison@Intel.com
parent d24e7855
......@@ -117,6 +117,7 @@ enum intel_guc_action {
INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
INTEL_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE = 0x506,
INTEL_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV = 0x509,
INTEL_GUC_ACTION_SCHED_CONTEXT = 0x1000,
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET = 0x1001,
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
......
......@@ -81,10 +81,17 @@
#define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_KEY 0x0907
#define GUC_KLV_SELF_CFG_G2H_CTB_SIZE_LEN 1u
/*
* Global scheduling policy update keys.
*/
enum {
GUC_SCHEDULING_POLICIES_KLV_ID_RENDER_COMPUTE_YIELD = 0x1001,
};
/*
* Per context scheduling policy update keys.
*/
enum {
enum {
GUC_CONTEXT_POLICIES_KLV_ID_EXECUTION_QUANTUM = 0x2001,
GUC_CONTEXT_POLICIES_KLV_ID_PREEMPTION_TIMEOUT = 0x2002,
GUC_CONTEXT_POLICIES_KLV_ID_SCHEDULING_PRIORITY = 0x2003,
......
......@@ -290,6 +290,25 @@ struct guc_update_context_policy {
struct guc_klv_generic_dw_t klv[GUC_CONTEXT_POLICIES_KLV_NUM_IDS];
} __packed;
/* Format of the UPDATE_SCHEDULING_POLICIES H2G data packet */
struct guc_update_scheduling_policy_header {
u32 action;
} __packed;
/*
* Can't dynmically allocate memory for the scheduling policy KLV because
* it will be sent from within the reset path. Need a fixed size lump on
* the stack instead :(.
*
* Currently, there is only one KLV defined, which has 1 word of KL + 2 words of V.
*/
#define MAX_SCHEDULING_POLICY_SIZE 3
struct guc_update_scheduling_policy {
struct guc_update_scheduling_policy_header header;
u32 data[MAX_SCHEDULING_POLICY_SIZE];
} __packed;
#define GUC_POWER_UNSPECIFIED 0
#define GUC_POWER_D0 1
#define GUC_POWER_D1 2
......@@ -298,6 +317,9 @@ struct guc_update_context_policy {
/* Scheduling policy settings */
#define GLOBAL_SCHEDULE_POLICY_RC_YIELD_DURATION 100 /* in ms */
#define GLOBAL_SCHEDULE_POLICY_RC_YIELD_RATIO 50 /* in percent */
#define GLOBAL_POLICY_MAX_NUM_WI 15
/* Don't reset an engine upon preemption failure */
......
......@@ -4178,6 +4178,98 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
return 0;
}
struct scheduling_policy {
/* internal data */
u32 max_words, num_words;
u32 count;
/* API data */
struct guc_update_scheduling_policy h2g;
};
static u32 __guc_scheduling_policy_action_size(struct scheduling_policy *policy)
{
u32 *start = (void *)&policy->h2g;
u32 *end = policy->h2g.data + policy->num_words;
size_t delta = end - start;
return delta;
}
static struct scheduling_policy *__guc_scheduling_policy_start_klv(struct scheduling_policy *policy)
{
policy->h2g.header.action = INTEL_GUC_ACTION_UPDATE_SCHEDULING_POLICIES_KLV;
policy->max_words = ARRAY_SIZE(policy->h2g.data);
policy->num_words = 0;
policy->count = 0;
return policy;
}
static void __guc_scheduling_policy_add_klv(struct scheduling_policy *policy,
u32 action, u32 *data, u32 len)
{
u32 *klv_ptr = policy->h2g.data + policy->num_words;
GEM_BUG_ON((policy->num_words + 1 + len) > policy->max_words);
*(klv_ptr++) = FIELD_PREP(GUC_KLV_0_KEY, action) |
FIELD_PREP(GUC_KLV_0_LEN, len);
memcpy(klv_ptr, data, sizeof(u32) * len);
policy->num_words += 1 + len;
policy->count++;
}
static int __guc_action_set_scheduling_policies(struct intel_guc *guc,
struct scheduling_policy *policy)
{
int ret;
ret = intel_guc_send(guc, (u32 *)&policy->h2g,
__guc_scheduling_policy_action_size(policy));
if (ret < 0)
return ret;
if (ret != policy->count) {
drm_warn(&guc_to_gt(guc)->i915->drm, "GuC global scheduler policy processed %d of %d KLVs!",
ret, policy->count);
if (ret > policy->count)
return -EPROTO;
}
return 0;
}
static int guc_init_global_schedule_policy(struct intel_guc *guc)
{
struct scheduling_policy policy;
struct intel_gt *gt = guc_to_gt(guc);
intel_wakeref_t wakeref;
int ret = 0;
if (GET_UC_VER(guc) < MAKE_UC_VER(70, 3, 0))
return 0;
__guc_scheduling_policy_start_klv(&policy);
with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref) {
u32 yield[] = {
GLOBAL_SCHEDULE_POLICY_RC_YIELD_DURATION,
GLOBAL_SCHEDULE_POLICY_RC_YIELD_RATIO,
};
__guc_scheduling_policy_add_klv(&policy,
GUC_SCHEDULING_POLICIES_KLV_ID_RENDER_COMPUTE_YIELD,
yield, ARRAY_SIZE(yield));
ret = __guc_action_set_scheduling_policies(guc, &policy);
if (ret)
i915_probe_error(gt->i915,
"Failed to configure global scheduling policies: %pe!\n",
ERR_PTR(ret));
}
return ret;
}
void intel_guc_submission_enable(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
......@@ -4190,6 +4282,7 @@ void intel_guc_submission_enable(struct intel_guc *guc)
guc_init_lrc_mapping(guc);
guc_init_engine_stats(guc);
guc_init_global_schedule_policy(guc);
}
void intel_guc_submission_disable(struct intel_guc *guc)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment