Commit fa688343 authored by Christian König's avatar Christian König

drm/radeon: further cleanup vm flushing & fencing

Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 2280ab57
...@@ -2804,6 +2804,9 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm); ...@@ -2804,6 +2804,9 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm); void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
struct radeon_vm *vm, int ring); struct radeon_vm *vm, int ring);
void radeon_vm_flush(struct radeon_device *rdev,
struct radeon_vm *vm,
int ring);
void radeon_vm_fence(struct radeon_device *rdev, void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm, struct radeon_vm *vm,
struct radeon_fence *fence); struct radeon_fence *fence);
......
...@@ -511,10 +511,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, ...@@ -511,10 +511,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
r = radeon_ib_schedule(rdev, &parser->ib, NULL); r = radeon_ib_schedule(rdev, &parser->ib, NULL);
} }
if (!r) {
radeon_vm_fence(rdev, vm, parser->ib.fence);
}
out: out:
radeon_vm_add_to_lru(rdev, vm); radeon_vm_add_to_lru(rdev, vm);
mutex_unlock(&vm->mutex); mutex_unlock(&vm->mutex);
......
...@@ -153,11 +153,9 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, ...@@ -153,11 +153,9 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
return r; return r;
} }
/* if we can't remember our last VM flush then flush now! */ if (ib->vm)
/* XXX figure out why we have to flush for every IB */ radeon_vm_flush(rdev, ib->vm, ib->ring);
if (ib->vm /*&& !ib->vm->last_flush*/) {
radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
}
if (const_ib) { if (const_ib) {
radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
radeon_semaphore_free(rdev, &const_ib->semaphore, NULL); radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
...@@ -172,10 +170,10 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, ...@@ -172,10 +170,10 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
if (const_ib) { if (const_ib) {
const_ib->fence = radeon_fence_ref(ib->fence); const_ib->fence = radeon_fence_ref(ib->fence);
} }
/* we just flushed the VM, remember that */
if (ib->vm && !ib->vm->last_flush) { if (ib->vm)
ib->vm->last_flush = radeon_fence_ref(ib->fence); radeon_vm_fence(rdev, ib->vm, ib->fence);
}
radeon_ring_unlock_commit(rdev, ring); radeon_ring_unlock_commit(rdev, ring);
return 0; return 0;
} }
......
...@@ -378,6 +378,27 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, ...@@ -378,6 +378,27 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
return NULL; return NULL;
} }
/**
* radeon_vm_flush - hardware flush the vm
*
* @rdev: radeon_device pointer
* @vm: vm we want to flush
* @ring: ring to use for flush
*
* Flush the vm (cayman+).
*
* Global and local mutex must be locked!
*/
void radeon_vm_flush(struct radeon_device *rdev,
struct radeon_vm *vm,
int ring)
{
/* if we can't remember our last VM flush then flush now! */
/* XXX figure out why we have to flush all the time */
if (!vm->last_flush || true)
radeon_ring_vm_flush(rdev, ring, vm);
}
/** /**
* radeon_vm_fence - remember fence for vm * radeon_vm_fence - remember fence for vm
* *
...@@ -394,14 +415,18 @@ void radeon_vm_fence(struct radeon_device *rdev, ...@@ -394,14 +415,18 @@ void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm, struct radeon_vm *vm,
struct radeon_fence *fence) struct radeon_fence *fence)
{ {
radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
radeon_fence_unref(&vm->fence); radeon_fence_unref(&vm->fence);
vm->fence = radeon_fence_ref(fence); vm->fence = radeon_fence_ref(fence);
radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
radeon_fence_unref(&vm->last_id_use); radeon_fence_unref(&vm->last_id_use);
vm->last_id_use = radeon_fence_ref(fence); vm->last_id_use = radeon_fence_ref(fence);
/* we just flushed the VM, remember that */
if (!vm->last_flush)
vm->last_flush = radeon_fence_ref(fence);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment