Commit 041a62bc authored by Andrey Grodzovsky's avatar Andrey Grodzovsky Committed by Alex Deucher

drm/amdgpu: reverts commit ce316fa5.

In preparation for doing XGMI reset synchronization using task barrier.
Signed-off-by: default avatarAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: default avatarLe Ma <Le.Ma@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent f06a58db
...@@ -994,8 +994,6 @@ struct amdgpu_device { ...@@ -994,8 +994,6 @@ struct amdgpu_device {
bool pm_sysfs_en; bool pm_sysfs_en;
bool ucode_sysfs_en; bool ucode_sysfs_en;
bool in_baco;
}; };
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
......
...@@ -3797,18 +3797,13 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, ...@@ -3797,18 +3797,13 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
return r; return r;
} }
static int amdgpu_do_asic_reset(struct amdgpu_device *adev, static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
struct amdgpu_hive_info *hive,
struct list_head *device_list_handle, struct list_head *device_list_handle,
bool *need_full_reset_arg) bool *need_full_reset_arg)
{ {
struct amdgpu_device *tmp_adev = NULL; struct amdgpu_device *tmp_adev = NULL;
bool need_full_reset = *need_full_reset_arg, vram_lost = false; bool need_full_reset = *need_full_reset_arg, vram_lost = false;
int r = 0; int r = 0;
int cpu = smp_processor_id();
bool use_baco =
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ?
true : false;
/* /*
* ASIC reset has to be done on all HGMI hive nodes ASAP * ASIC reset has to be done on all HGMI hive nodes ASAP
...@@ -3816,62 +3811,22 @@ static int amdgpu_do_asic_reset(struct amdgpu_device *adev, ...@@ -3816,62 +3811,22 @@ static int amdgpu_do_asic_reset(struct amdgpu_device *adev,
*/ */
if (need_full_reset) { if (need_full_reset) {
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
/* /* For XGMI run all resets in parallel to speed up the process */
* For XGMI run all resets in parallel to speed up the
* process by scheduling the highpri wq on different
* cpus. For XGMI with baco reset, all nodes must enter
* baco within close proximity before anyone exit.
*/
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
if (!queue_work_on(cpu, system_highpri_wq, if (!queue_work(system_highpri_wq, &tmp_adev->xgmi_reset_work))
&tmp_adev->xgmi_reset_work))
r = -EALREADY; r = -EALREADY;
cpu = cpumask_next(cpu, cpu_online_mask);
} else } else
r = amdgpu_asic_reset(tmp_adev); r = amdgpu_asic_reset(tmp_adev);
if (r)
break;
}
/* For XGMI wait for all work to complete before proceed */ if (r) {
if (!r) { DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
list_for_each_entry(tmp_adev, device_list_handle, r, tmp_adev->ddev->unique);
gmc.xgmi.head) { break;
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
flush_work(&tmp_adev->xgmi_reset_work);
r = tmp_adev->asic_reset_res;
if (r)
break;
if (use_baco)
tmp_adev->in_baco = true;
}
}
}
/*
* For XGMI with baco reset, need exit baco phase by scheduling
* xgmi_reset_work one more time. PSP reset and sGPU skips this
* phase. Not assume the situation that PSP reset and baco reset
* coexist within an XGMI hive.
*/
if (!r && use_baco) {
cpu = smp_processor_id();
list_for_each_entry(tmp_adev, device_list_handle,
gmc.xgmi.head) {
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
if (!queue_work_on(cpu,
system_highpri_wq,
&tmp_adev->xgmi_reset_work))
r = -EALREADY;
if (r)
break;
cpu = cpumask_next(cpu, cpu_online_mask);
}
} }
} }
if (!r && use_baco) { /* For XGMI wait for all resets to complete before proceed */
if (!r) {
list_for_each_entry(tmp_adev, device_list_handle, list_for_each_entry(tmp_adev, device_list_handle,
gmc.xgmi.head) { gmc.xgmi.head) {
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
...@@ -3879,16 +3834,9 @@ static int amdgpu_do_asic_reset(struct amdgpu_device *adev, ...@@ -3879,16 +3834,9 @@ static int amdgpu_do_asic_reset(struct amdgpu_device *adev,
r = tmp_adev->asic_reset_res; r = tmp_adev->asic_reset_res;
if (r) if (r)
break; break;
tmp_adev->in_baco = false;
} }
} }
} }
if (r) {
DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
r, tmp_adev->ddev->unique);
goto end;
}
} }
if (!r && amdgpu_ras_intr_triggered()) if (!r && amdgpu_ras_intr_triggered())
...@@ -4182,8 +4130,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, ...@@ -4182,8 +4130,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
if (r) if (r)
adev->asic_reset_res = r; adev->asic_reset_res = r;
} else { } else {
r = amdgpu_do_asic_reset(adev, hive, device_list_handle, r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset);
&need_full_reset);
if (r && r == -EAGAIN) if (r && r == -EAGAIN)
goto retry; goto retry;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment