Commit 332dfe90 authored by monk.liu's avatar monk.liu Committed by Alex Deucher

drm/amdgpu: new implement for fence_wait_any (v2)

origninal method will sleep/schedule at the granurarity of HZ/2 and
based on seq signal method, the new implement is based on kernel fance
interface, no unnecessary schedule at all

v2: replace logic of original amdgpu_fence_wait_any
Signed-off-by: default avatarmonk.liu <monk.liu@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
parent 2e536084
...@@ -440,9 +440,9 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); ...@@ -440,9 +440,9 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
bool amdgpu_fence_signaled(struct amdgpu_fence *fence); bool amdgpu_fence_signaled(struct amdgpu_fence *fence);
int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible); int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
int amdgpu_fence_wait_any(struct amdgpu_device *adev, signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
struct amdgpu_fence **fences, struct amdgpu_fence **fences,
bool intr); bool intr, long t);
struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence); struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
void amdgpu_fence_unref(struct amdgpu_fence **fence); void amdgpu_fence_unref(struct amdgpu_fence **fence);
......
...@@ -630,49 +630,6 @@ int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr) ...@@ -630,49 +630,6 @@ int amdgpu_fence_wait(struct amdgpu_fence *fence, bool intr)
return 0; return 0;
} }
/**
* amdgpu_fence_wait_any - wait for a fence to signal on any ring
*
* @adev: amdgpu device pointer
* @fences: amdgpu fence object(s)
* @intr: use interruptable sleep
*
* Wait for any requested fence to signal (all asics). Fence
* array is indexed by ring id. @intr selects whether to use
* interruptable (true) or non-interruptable (false) sleep when
* waiting for the fences. Used by the suballocator.
* Returns 0 if any fence has passed, error for all other cases.
*/
int amdgpu_fence_wait_any(struct amdgpu_device *adev,
struct amdgpu_fence **fences,
bool intr)
{
uint64_t seq[AMDGPU_MAX_RINGS];
unsigned i, num_rings = 0;
long r;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
seq[i] = 0;
if (!fences[i]) {
continue;
}
seq[i] = fences[i]->seq;
++num_rings;
}
/* nothing to wait for ? */
if (num_rings == 0)
return -ENOENT;
r = amdgpu_fence_wait_seq_timeout(adev, seq, intr, MAX_SCHEDULE_TIMEOUT);
if (r < 0) {
return r;
}
return 0;
}
/** /**
* amdgpu_fence_wait_next - wait for the next fence to signal * amdgpu_fence_wait_next - wait for the next fence to signal
* *
...@@ -1128,6 +1085,22 @@ static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence) ...@@ -1128,6 +1085,22 @@ static inline bool amdgpu_test_signaled(struct amdgpu_fence *fence)
return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
} }
static inline bool amdgpu_test_signaled_any(struct amdgpu_fence **fences)
{
int idx;
struct amdgpu_fence *fence;
idx = 0;
for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
fence = fences[idx];
if (fence) {
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
return true;
}
}
return false;
}
struct amdgpu_wait_cb { struct amdgpu_wait_cb {
struct fence_cb base; struct fence_cb base;
struct task_struct *task; struct task_struct *task;
...@@ -1182,6 +1155,62 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr, ...@@ -1182,6 +1155,62 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
return t; return t;
} }
/* wait until any fence in array signaled */
signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
struct amdgpu_fence **array, bool intr, signed long t)
{
long idx = 0;
struct amdgpu_wait_cb cb[AMDGPU_MAX_RINGS];
struct amdgpu_fence *fence;
BUG_ON(!array);
for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
fence = array[idx];
if (fence) {
cb[idx].task = current;
if (fence_add_callback(&fence->base,
&cb[idx].base, amdgpu_fence_wait_cb))
return t; /* return if fence is already signaled */
}
}
while (t > 0) {
if (intr)
set_current_state(TASK_INTERRUPTIBLE);
else
set_current_state(TASK_UNINTERRUPTIBLE);
/*
* amdgpu_test_signaled_any must be called after
* set_current_state to prevent a race with wake_up_process
*/
if (amdgpu_test_signaled_any(array))
break;
if (adev->needs_reset) {
t = -EDEADLK;
break;
}
t = schedule_timeout(t);
if (t > 0 && intr && signal_pending(current))
t = -ERESTARTSYS;
}
__set_current_state(TASK_RUNNING);
idx = 0;
for (idx = 0; idx < AMDGPU_MAX_RINGS; ++idx) {
fence = array[idx];
if (fence)
fence_remove_callback(&fence->base, &cb[idx].base);
}
return t;
}
const struct fence_ops amdgpu_fence_ops = { const struct fence_ops amdgpu_fence_ops = {
.get_driver_name = amdgpu_fence_get_driver_name, .get_driver_name = amdgpu_fence_get_driver_name,
.get_timeline_name = amdgpu_fence_get_timeline_name, .get_timeline_name = amdgpu_fence_get_timeline_name,
......
...@@ -350,7 +350,8 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev, ...@@ -350,7 +350,8 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
spin_unlock(&sa_manager->wq.lock); spin_unlock(&sa_manager->wq.lock);
r = amdgpu_fence_wait_any(adev, fences, false); r = amdgpu_fence_wait_any(adev, fences, false, MAX_SCHEDULE_TIMEOUT);
r = (r > 0) ? 0 : r;
spin_lock(&sa_manager->wq.lock); spin_lock(&sa_manager->wq.lock);
/* if we have nothing to wait for block */ /* if we have nothing to wait for block */
if (r == -ENOENT) { if (r == -ENOENT) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment