Commit 06a2d7cc authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: revert "implement tdr advanced mode"

This reverts commit e6c6338f.

This feature basically re-submits one job after another to
figure out which one was the one causing a hang.

This is obviously incompatible with gang-submit which requires
that multiple jobs run at the same time. It's also absolutely
not helpful to crash the hardware multiple times if a clean
recovery is desired.

For testing and debugging environments we should rather disable
recovery alltogether to be able to inspect the state with a hw
debugger.

Additional to that the sw implementation is clearly buggy and causes
reference count issues for the hardware fence.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent d27252b5
...@@ -5088,94 +5088,6 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) ...@@ -5088,94 +5088,6 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
return 0; return 0;
} }
static void amdgpu_device_recheck_guilty_jobs(
struct amdgpu_device *adev, struct list_head *device_list_handle,
struct amdgpu_reset_context *reset_context)
{
int i, r = 0;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
int ret = 0;
struct drm_sched_job *s_job;
if (!ring || !ring->sched.thread)
continue;
s_job = list_first_entry_or_null(&ring->sched.pending_list,
struct drm_sched_job, list);
if (s_job == NULL)
continue;
/* clear job's guilty and depend the folowing step to decide the real one */
drm_sched_reset_karma(s_job);
drm_sched_resubmit_jobs_ext(&ring->sched, 1);
if (!s_job->s_fence->parent) {
DRM_WARN("Failed to get a HW fence for job!");
continue;
}
ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout);
if (ret == 0) { /* timeout */
DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n",
ring->sched.name, s_job->id);
amdgpu_fence_driver_isr_toggle(adev, true);
/* Clear this failed job from fence array */
amdgpu_fence_driver_clear_job_fences(ring);
amdgpu_fence_driver_isr_toggle(adev, false);
/* Since the job won't signal and we go for
* another resubmit drop this parent pointer
*/
dma_fence_put(s_job->s_fence->parent);
s_job->s_fence->parent = NULL;
/* set guilty */
drm_sched_increase_karma(s_job);
amdgpu_reset_prepare_hwcontext(adev, reset_context);
retry:
/* do hw reset */
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_fini_data_exchange(adev);
r = amdgpu_device_reset_sriov(adev, false);
if (r)
adev->asic_reset_res = r;
} else {
clear_bit(AMDGPU_SKIP_HW_RESET,
&reset_context->flags);
r = amdgpu_do_asic_reset(device_list_handle,
reset_context);
if (r && r == -EAGAIN)
goto retry;
}
/*
* add reset counter so that the following
* resubmitted job could flush vmid
*/
atomic_inc(&adev->gpu_reset_counter);
continue;
}
/* got the hw fence, signal finished fence */
atomic_dec(ring->sched.score);
dma_fence_get(&s_job->s_fence->finished);
dma_fence_signal(&s_job->s_fence->finished);
dma_fence_put(&s_job->s_fence->finished);
/* remove node from list and free the job */
spin_lock(&ring->sched.job_list_lock);
list_del_init(&s_job->list);
spin_unlock(&ring->sched.job_list_lock);
ring->sched.ops->free_job(s_job);
}
}
static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev) static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
{ {
struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
...@@ -5196,7 +5108,6 @@ static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev) ...@@ -5196,7 +5108,6 @@ static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev)
} }
/** /**
* amdgpu_device_gpu_recover - reset the asic and recover scheduler * amdgpu_device_gpu_recover - reset the asic and recover scheduler
* *
...@@ -5219,7 +5130,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, ...@@ -5219,7 +5130,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
int i, r = 0; int i, r = 0;
bool need_emergency_restart = false; bool need_emergency_restart = false;
bool audio_suspended = false; bool audio_suspended = false;
int tmp_vram_lost_counter;
bool gpu_reset_for_dev_remove = false; bool gpu_reset_for_dev_remove = false;
gpu_reset_for_dev_remove = gpu_reset_for_dev_remove =
...@@ -5365,7 +5275,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, ...@@ -5365,7 +5275,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
amdgpu_device_stop_pending_resets(tmp_adev); amdgpu_device_stop_pending_resets(tmp_adev);
} }
tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
/* Actual ASIC resets if needed.*/ /* Actual ASIC resets if needed.*/
/* Host driver will handle XGMI hive reset for SRIOV */ /* Host driver will handle XGMI hive reset for SRIOV */
if (amdgpu_sriov_vf(adev)) { if (amdgpu_sriov_vf(adev)) {
...@@ -5390,18 +5299,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, ...@@ -5390,18 +5299,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
/* Post ASIC reset for all devs .*/ /* Post ASIC reset for all devs .*/
list_for_each_entry(tmp_adev, device_list_handle, reset_list) { list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
/*
* Sometimes a later bad compute job can block a good gfx job as gfx
* and compute ring share internal GC HW mutually. We add an additional
* guilty jobs recheck step to find the real guilty job, it synchronously
* submits and pends for the first job being signaled. If it gets timeout,
* we identify it as a real guilty job.
*/
if (amdgpu_gpu_recovery == 2 &&
!(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter)))
amdgpu_device_recheck_guilty_jobs(
tmp_adev, device_list_handle, reset_context);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i]; struct amdgpu_ring *ring = tmp_adev->rings[i];
......
...@@ -533,7 +533,7 @@ module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444); ...@@ -533,7 +533,7 @@ module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
* DOC: gpu_recovery (int) * DOC: gpu_recovery (int)
* Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV). * Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
*/ */
MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (2 = advanced tdr mode, 1 = enable, 0 = disable, -1 = auto)"); MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444); module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
/** /**
......
...@@ -443,27 +443,6 @@ static void drm_sched_job_timedout(struct work_struct *work) ...@@ -443,27 +443,6 @@ static void drm_sched_job_timedout(struct work_struct *work)
} }
} }
/**
* drm_sched_increase_karma - Update sched_entity guilty flag
*
* @bad: The job guilty of time out
*
* Increment on every hang caused by the 'bad' job. If this exceeds the hang
* limit of the scheduler then the respective sched entity is marked guilty and
* jobs from it will not be scheduled further
*/
void drm_sched_increase_karma(struct drm_sched_job *bad)
{
drm_sched_increase_karma_ext(bad, 1);
}
EXPORT_SYMBOL(drm_sched_increase_karma);
void drm_sched_reset_karma(struct drm_sched_job *bad)
{
drm_sched_increase_karma_ext(bad, 0);
}
EXPORT_SYMBOL(drm_sched_reset_karma);
/** /**
* drm_sched_stop - stop the scheduler * drm_sched_stop - stop the scheduler
* *
...@@ -604,32 +583,15 @@ EXPORT_SYMBOL(drm_sched_start); ...@@ -604,32 +583,15 @@ EXPORT_SYMBOL(drm_sched_start);
* *
*/ */
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
{
drm_sched_resubmit_jobs_ext(sched, INT_MAX);
}
EXPORT_SYMBOL(drm_sched_resubmit_jobs);
/**
* drm_sched_resubmit_jobs_ext - helper to relunch certain number of jobs from mirror ring list
*
* @sched: scheduler instance
* @max: job numbers to relaunch
*
*/
void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max)
{ {
struct drm_sched_job *s_job, *tmp; struct drm_sched_job *s_job, *tmp;
uint64_t guilty_context; uint64_t guilty_context;
bool found_guilty = false; bool found_guilty = false;
struct dma_fence *fence; struct dma_fence *fence;
int i = 0;
list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct drm_sched_fence *s_fence = s_job->s_fence; struct drm_sched_fence *s_fence = s_job->s_fence;
if (i >= max)
break;
if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
found_guilty = true; found_guilty = true;
guilty_context = s_job->s_fence->scheduled.context; guilty_context = s_job->s_fence->scheduled.context;
...@@ -639,7 +601,6 @@ void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max) ...@@ -639,7 +601,6 @@ void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max)
dma_fence_set_error(&s_fence->finished, -ECANCELED); dma_fence_set_error(&s_fence->finished, -ECANCELED);
fence = sched->ops->run_job(s_job); fence = sched->ops->run_job(s_job);
i++;
if (IS_ERR_OR_NULL(fence)) { if (IS_ERR_OR_NULL(fence)) {
if (IS_ERR(fence)) if (IS_ERR(fence))
...@@ -655,7 +616,7 @@ void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max) ...@@ -655,7 +616,7 @@ void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max)
} }
} }
} }
EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext); EXPORT_SYMBOL(drm_sched_resubmit_jobs);
/** /**
* drm_sched_job_init - init a scheduler job * drm_sched_job_init - init a scheduler job
...@@ -1172,13 +1133,15 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) ...@@ -1172,13 +1133,15 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
EXPORT_SYMBOL(drm_sched_fini); EXPORT_SYMBOL(drm_sched_fini);
/** /**
* drm_sched_increase_karma_ext - Update sched_entity guilty flag * drm_sched_increase_karma - Update sched_entity guilty flag
* *
* @bad: The job guilty of time out * @bad: The job guilty of time out
* @type: type for increase/reset karma
* *
* Increment on every hang caused by the 'bad' job. If this exceeds the hang
* limit of the scheduler then the respective sched entity is marked guilty and
* jobs from it will not be scheduled further
*/ */
void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type) void drm_sched_increase_karma(struct drm_sched_job *bad)
{ {
int i; int i;
struct drm_sched_entity *tmp; struct drm_sched_entity *tmp;
...@@ -1190,9 +1153,6 @@ void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type) ...@@ -1190,9 +1153,6 @@ void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type)
* corrupt but keep in mind that kernel jobs always considered good. * corrupt but keep in mind that kernel jobs always considered good.
*/ */
if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) { if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
if (type == 0)
atomic_set(&bad->karma, 0);
else if (type == 1)
atomic_inc(&bad->karma); atomic_inc(&bad->karma);
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
...@@ -1204,7 +1164,7 @@ void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type) ...@@ -1204,7 +1164,7 @@ void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type)
if (bad->s_fence->scheduled.context == if (bad->s_fence->scheduled.context ==
entity->fence_context) { entity->fence_context) {
if (entity->guilty) if (entity->guilty)
atomic_set(entity->guilty, type); atomic_set(entity->guilty, 1);
break; break;
} }
} }
...@@ -1214,4 +1174,4 @@ void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type) ...@@ -1214,4 +1174,4 @@ void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type)
} }
} }
} }
EXPORT_SYMBOL(drm_sched_increase_karma_ext); EXPORT_SYMBOL(drm_sched_increase_karma);
...@@ -528,10 +528,7 @@ void drm_sched_wakeup(struct drm_gpu_scheduler *sched); ...@@ -528,10 +528,7 @@ void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery); void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max);
void drm_sched_increase_karma(struct drm_sched_job *bad); void drm_sched_increase_karma(struct drm_sched_job *bad);
void drm_sched_reset_karma(struct drm_sched_job *bad);
void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
bool drm_sched_dependency_optimized(struct dma_fence* fence, bool drm_sched_dependency_optimized(struct dma_fence* fence,
struct drm_sched_entity *entity); struct drm_sched_entity *entity);
void drm_sched_fault(struct drm_gpu_scheduler *sched); void drm_sched_fault(struct drm_gpu_scheduler *sched);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment