Commit 176e1ab1 authored by Chunming Zhou's avatar Chunming Zhou Committed by Alex Deucher

drm/amdgpu: protect fence_process from multiple context

fence_process may be called from kthread, user thread and interrupt context.
it is possible to called concurrently, then will wake up fence queue multiple times.
Signed-off-by: default avatarChunming Zhou <david1.zhou@amd.com>
Reviewed-by: default avatarJammy Zhou <Jammy.Zhou@amd.com>
parent e0d8f3c3
...@@ -869,6 +869,7 @@ struct amdgpu_ring { ...@@ -869,6 +869,7 @@ struct amdgpu_ring {
struct amdgpu_fence_driver fence_drv; struct amdgpu_fence_driver fence_drv;
struct amd_gpu_scheduler *scheduler; struct amd_gpu_scheduler *scheduler;
spinlock_t fence_lock;
struct mutex *ring_lock; struct mutex *ring_lock;
struct amdgpu_bo *ring_obj; struct amdgpu_bo *ring_obj;
volatile uint32_t *ring; volatile uint32_t *ring;
......
...@@ -295,6 +295,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) ...@@ -295,6 +295,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
uint64_t seq, last_seq, last_emitted; uint64_t seq, last_seq, last_emitted;
unsigned count_loop = 0; unsigned count_loop = 0;
bool wake = false; bool wake = false;
unsigned long irqflags;
/* Note there is a scenario here for an infinite loop but it's /* Note there is a scenario here for an infinite loop but it's
* very unlikely to happen. For it to happen, the current polling * very unlikely to happen. For it to happen, the current polling
...@@ -317,6 +318,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) ...@@ -317,6 +318,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
* have temporarly set the last_seq not to the true real last * have temporarly set the last_seq not to the true real last
* seq but to an older one. * seq but to an older one.
*/ */
spin_lock_irqsave(&ring->fence_lock, irqflags);
last_seq = atomic64_read(&ring->fence_drv.last_seq); last_seq = atomic64_read(&ring->fence_drv.last_seq);
do { do {
last_emitted = ring->fence_drv.sync_seq[ring->idx]; last_emitted = ring->fence_drv.sync_seq[ring->idx];
...@@ -355,7 +357,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) ...@@ -355,7 +357,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
if (handled_seq == latest_seq) { if (handled_seq == latest_seq) {
DRM_ERROR("ring %d, EOP without seq update (lastest_seq=%llu)\n", DRM_ERROR("ring %d, EOP without seq update (lastest_seq=%llu)\n",
ring->idx, latest_seq); ring->idx, latest_seq);
return; goto exit;
} }
do { do {
amd_sched_isr(ring->scheduler); amd_sched_isr(ring->scheduler);
...@@ -364,6 +366,8 @@ void amdgpu_fence_process(struct amdgpu_ring *ring) ...@@ -364,6 +366,8 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
wake_up_all(&ring->adev->fence_queue); wake_up_all(&ring->adev->fence_queue);
} }
exit:
spin_unlock_irqrestore(&ring->fence_lock, irqflags);
} }
/** /**
......
...@@ -367,7 +367,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, ...@@ -367,7 +367,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
} }
ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4); ring->next_rptr_gpu_addr = adev->wb.gpu_addr + (ring->next_rptr_offs * 4);
ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs]; ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
spin_lock_init(&ring->fence_lock);
r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type); r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
if (r) { if (r) {
dev_err(adev->dev, "failed initializing fences (%d).\n", r); dev_err(adev->dev, "failed initializing fences (%d).\n", r);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment