Commit eb98c709 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/radeon: force fence completion only on problematic rings (v2)

Instead of resetting all fence numbers, only reset the
number of the problematic ring. Split out from a patch
from Maarten Lankhorst <maarten.lankhorst@canonical.com>

v2 (agd5f): rebase build fix
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarMaarten Lankhorst <maarten.lankhorst@canonical.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent f0d970b4
...@@ -371,7 +371,7 @@ struct radeon_fence { ...@@ -371,7 +371,7 @@ struct radeon_fence {
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
int radeon_fence_driver_init(struct radeon_device *rdev); int radeon_fence_driver_init(struct radeon_device *rdev);
void radeon_fence_driver_fini(struct radeon_device *rdev); void radeon_fence_driver_fini(struct radeon_device *rdev);
void radeon_fence_driver_force_completion(struct radeon_device *rdev); void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring); int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
void radeon_fence_process(struct radeon_device *rdev, int ring); void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence); bool radeon_fence_signaled(struct radeon_fence *fence);
......
...@@ -1488,7 +1488,6 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) ...@@ -1488,7 +1488,6 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
struct drm_crtc *crtc; struct drm_crtc *crtc;
struct drm_connector *connector; struct drm_connector *connector;
int i, r; int i, r;
bool force_completion = false;
if (dev == NULL || dev->dev_private == NULL) { if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV; return -ENODEV;
...@@ -1532,12 +1531,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) ...@@ -1532,12 +1531,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
r = radeon_fence_wait_empty(rdev, i); r = radeon_fence_wait_empty(rdev, i);
if (r) { if (r) {
/* delay GPU reset to resume */ /* delay GPU reset to resume */
force_completion = true; radeon_fence_driver_force_completion(rdev, i);
} }
} }
if (force_completion) {
radeon_fence_driver_force_completion(rdev);
}
radeon_save_bios_scratch_regs(rdev); radeon_save_bios_scratch_regs(rdev);
...@@ -1722,8 +1718,8 @@ int radeon_gpu_reset(struct radeon_device *rdev) ...@@ -1722,8 +1718,8 @@ int radeon_gpu_reset(struct radeon_device *rdev)
} }
} }
} else { } else {
radeon_fence_driver_force_completion(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) { for (i = 0; i < RADEON_NUM_RINGS; ++i) {
radeon_fence_driver_force_completion(rdev, i);
kfree(ring_data[i]); kfree(ring_data[i]);
} }
} }
......
...@@ -758,7 +758,7 @@ void radeon_fence_driver_fini(struct radeon_device *rdev) ...@@ -758,7 +758,7 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
r = radeon_fence_wait_empty(rdev, ring); r = radeon_fence_wait_empty(rdev, ring);
if (r) { if (r) {
/* no need to trigger GPU reset as we are unloading */ /* no need to trigger GPU reset as we are unloading */
radeon_fence_driver_force_completion(rdev); radeon_fence_driver_force_completion(rdev, ring);
} }
wake_up_all(&rdev->fence_queue); wake_up_all(&rdev->fence_queue);
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg); radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
...@@ -771,19 +771,15 @@ void radeon_fence_driver_fini(struct radeon_device *rdev) ...@@ -771,19 +771,15 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
* radeon_fence_driver_force_completion - force all fence waiter to complete * radeon_fence_driver_force_completion - force all fence waiter to complete
* *
* @rdev: radeon device pointer * @rdev: radeon device pointer
* @ring: the ring to complete
* *
* In case of GPU reset failure make sure no process keep waiting on fence * In case of GPU reset failure make sure no process keep waiting on fence
* that will never complete. * that will never complete.
*/ */
void radeon_fence_driver_force_completion(struct radeon_device *rdev) void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
{ {
int ring; if (rdev->fence_drv[ring].initialized)
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
if (!rdev->fence_drv[ring].initialized)
continue;
radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring); radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
}
} }
......
...@@ -269,6 +269,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev) ...@@ -269,6 +269,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev)
r = radeon_ib_test(rdev, i, ring); r = radeon_ib_test(rdev, i, ring);
if (r) { if (r) {
radeon_fence_driver_force_completion(rdev, i);
ring->ready = false; ring->ready = false;
rdev->needs_reset = false; rdev->needs_reset = false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment