Commit 733827ee authored by Riana Tauro's avatar Riana Tauro Committed by Anshuman Gupta

drm/i915/guc/slpc: Add selftest for slpc tile-tile interaction

Run a workload on tiles simultaneously by requesting for RP0 frequency.
Pcode can however limit the frequency being granted due to throttling
reasons. This test checks if there is any throttling but does not fail
if RP0 is not granted due to throttle reasons

v2: Fix build error
v3: Use IS_ERR_OR_NULL to check worker
    Addressed cosmetic review comments (Tvrtko)
v4: do not skip test on media engines if gt type is GT_MEDIA.
    Use correct PERF_LIMIT_REASONS register for MTL (Vinay)
Signed-off-by: default avatarRiana Tauro <riana.tauro@intel.com>
Reviewed-by: default avatarVinay Belgaumkar <vinay.belgaumkar@intel.com>
Signed-off-by: default avatarAnshuman Gupta <anshuman.gupta@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221109112541.275021-2-riana.tauro@intel.com
parent 9bae30de
......@@ -13,6 +13,14 @@ enum test_type {
VARY_MAX,
MAX_GRANTED,
SLPC_POWER,
TILE_INTERACTION,
};
struct slpc_thread {
struct kthread_worker *worker;
struct kthread_work work;
struct intel_gt *gt;
int result;
};
static int slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 freq)
......@@ -212,7 +220,8 @@ static int max_granted_freq(struct intel_guc_slpc *slpc, struct intel_rps *rps,
*max_act_freq = intel_rps_read_actual_frequency(rps);
if (*max_act_freq != slpc->rp0_freq) {
/* Check if there was some throttling by pcode */
perf_limit_reasons = intel_uncore_read(gt->uncore, GT0_PERF_LIMIT_REASONS);
perf_limit_reasons = intel_uncore_read(gt->uncore,
intel_gt_perf_limit_reasons_reg(gt));
/* If not, this is an error */
if (!(perf_limit_reasons & GT0_PERF_LIMIT_REASONS_MASK)) {
......@@ -310,9 +319,10 @@ static int run_test(struct intel_gt *gt, int test_type)
break;
case MAX_GRANTED:
case TILE_INTERACTION:
/* Media engines have a different RP0 */
if (engine->class == VIDEO_DECODE_CLASS ||
engine->class == VIDEO_ENHANCEMENT_CLASS) {
if (gt->type != GT_MEDIA && (engine->class == VIDEO_DECODE_CLASS ||
engine->class == VIDEO_ENHANCEMENT_CLASS)) {
igt_spinner_end(&spin);
st_engine_heartbeat_enable(engine);
err = 0;
......@@ -335,7 +345,8 @@ static int run_test(struct intel_gt *gt, int test_type)
if (max_act_freq <= slpc->min_freq) {
pr_err("Actual freq did not rise above min\n");
pr_err("Perf Limit Reasons: 0x%x\n",
intel_uncore_read(gt->uncore, GT0_PERF_LIMIT_REASONS));
intel_uncore_read(gt->uncore,
intel_gt_perf_limit_reasons_reg(gt)));
err = -EINVAL;
}
}
......@@ -426,6 +437,56 @@ static int live_slpc_power(void *arg)
return ret;
}
static void slpc_spinner_thread(struct kthread_work *work)
{
struct slpc_thread *thread = container_of(work, typeof(*thread), work);
thread->result = run_test(thread->gt, TILE_INTERACTION);
}
static int live_slpc_tile_interaction(void *arg)
{
struct drm_i915_private *i915 = arg;
struct intel_gt *gt;
struct slpc_thread *threads;
int i = 0, ret = 0;
threads = kcalloc(I915_MAX_GT, sizeof(*threads), GFP_KERNEL);
if (!threads)
return -ENOMEM;
for_each_gt(gt, i915, i) {
threads[i].worker = kthread_create_worker(0, "igt/slpc_parallel:%d", gt->info.id);
if (IS_ERR(threads[i].worker)) {
ret = PTR_ERR(threads[i].worker);
break;
}
threads[i].gt = gt;
kthread_init_work(&threads[i].work, slpc_spinner_thread);
kthread_queue_work(threads[i].worker, &threads[i].work);
}
for_each_gt(gt, i915, i) {
int status;
if (IS_ERR_OR_NULL(threads[i].worker))
continue;
kthread_flush_work(&threads[i].work);
status = READ_ONCE(threads[i].result);
if (status && !ret) {
pr_err("%s GT %d failed ", __func__, gt->info.id);
ret = status;
}
kthread_destroy_worker(threads[i].worker);
}
kfree(threads);
return ret;
}
int intel_slpc_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
......@@ -433,6 +494,7 @@ int intel_slpc_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_slpc_vary_min),
SUBTEST(live_slpc_max_granted),
SUBTEST(live_slpc_power),
SUBTEST(live_slpc_tile_interaction),
};
struct intel_gt *gt;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment