Commit 557017a0 authored by Christian König's avatar Christian König Committed by Dave Airlie

drm/radeon: define new SA interface v3

Define the interface without modifying the allocation
algorithm in any way.

v2: rebase on top of fence new uint64 patch
v3: add ring to debugfs output
Signed-off-by: default avatarJerome Glisse <jglisse@redhat.com>
Signed-off-by: default avatarChristian König <deathsimple@vodafone.de>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent 2e0d9910
......@@ -398,6 +398,7 @@ struct radeon_sa_bo {
struct radeon_sa_manager *manager;
unsigned soffset;
unsigned eoffset;
struct radeon_fence *fence;
};
/*
......
......@@ -326,7 +326,7 @@ static void radeon_vm_unbind_locked(struct radeon_device *rdev,
rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
list_del_init(&vm->list);
vm->id = -1;
radeon_sa_bo_free(rdev, &vm->sa_bo);
radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
vm->pt = NULL;
list_for_each_entry(bo_va, &vm->va, vm_list) {
......@@ -395,7 +395,7 @@ int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
retry:
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
RADEON_GPU_PAGE_SIZE);
RADEON_GPU_PAGE_SIZE, false);
if (r) {
if (list_empty(&rdev->vm_manager.lru_vm)) {
return r;
......@@ -426,7 +426,7 @@ int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
/* do hw bind */
r = rdev->vm_manager.funcs->bind(rdev, vm, id);
if (r) {
radeon_sa_bo_free(rdev, &vm->sa_bo);
radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
return r;
}
rdev->vm_manager.use_bitmap |= 1 << id;
......
......@@ -169,9 +169,10 @@ extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
extern int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
struct radeon_sa_bo **sa_bo,
unsigned size, unsigned align);
unsigned size, unsigned align, bool block);
extern void radeon_sa_bo_free(struct radeon_device *rdev,
struct radeon_sa_bo **sa_bo);
struct radeon_sa_bo **sa_bo,
struct radeon_fence *fence);
#if defined(CONFIG_DEBUG_FS)
extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
struct seq_file *m);
......
......@@ -85,7 +85,7 @@ bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
if (ib->fence && ib->fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
if (radeon_fence_signaled(ib->fence)) {
radeon_fence_unref(&ib->fence);
radeon_sa_bo_free(rdev, &ib->sa_bo);
radeon_sa_bo_free(rdev, &ib->sa_bo, NULL);
done = true;
}
}
......@@ -124,7 +124,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
if (rdev->ib_pool.ibs[idx].fence == NULL) {
r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
&rdev->ib_pool.ibs[idx].sa_bo,
size, 256);
size, 256, false);
if (!r) {
*ib = &rdev->ib_pool.ibs[idx];
(*ib)->ptr = radeon_sa_bo_cpu_addr((*ib)->sa_bo);
......@@ -173,7 +173,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
}
radeon_mutex_lock(&rdev->ib_pool.mutex);
if (tmp->fence && tmp->fence->seq == RADEON_FENCE_NOTEMITED_SEQ) {
radeon_sa_bo_free(rdev, &tmp->sa_bo);
radeon_sa_bo_free(rdev, &tmp->sa_bo, NULL);
radeon_fence_unref(&tmp->fence);
}
radeon_mutex_unlock(&rdev->ib_pool.mutex);
......@@ -247,7 +247,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
radeon_mutex_lock(&rdev->ib_pool.mutex);
if (rdev->ib_pool.ready) {
for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo, NULL);
radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
}
radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
......
......@@ -129,20 +129,32 @@ int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
*
* Alignment can't be bigger than page size
*/
static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
{
list_del(&sa_bo->list);
radeon_fence_unref(&sa_bo->fence);
kfree(sa_bo);
}
int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
struct radeon_sa_bo **sa_bo,
unsigned size, unsigned align)
unsigned size, unsigned align, bool block)
{
struct radeon_sa_bo *tmp;
struct radeon_fence *fence = NULL;
struct radeon_sa_bo *tmp, *next;
struct list_head *head;
unsigned offset = 0, wasted = 0;
int r;
BUG_ON(align > RADEON_GPU_PAGE_SIZE);
BUG_ON(size > sa_manager->size);
*sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
retry:
spin_lock(&sa_manager->lock);
/* no one ? */
......@@ -153,7 +165,17 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
/* look for a hole big enough */
offset = 0;
list_for_each_entry(tmp, &sa_manager->sa_bo, list) {
list_for_each_entry_safe(tmp, next, &sa_manager->sa_bo, list) {
/* try to free this object */
if (tmp->fence) {
if (radeon_fence_signaled(tmp->fence)) {
radeon_sa_bo_remove_locked(tmp);
continue;
} else {
fence = tmp->fence;
}
}
/* room before this object ? */
if (offset < tmp->soffset && (tmp->soffset - offset) >= size) {
head = tmp->list.prev;
......@@ -178,6 +200,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
if ((sa_manager->size - offset) < size) {
/* failed to find somethings big enough */
spin_unlock(&sa_manager->lock);
if (block && fence) {
r = radeon_fence_wait(fence, false);
if (r)
return r;
goto retry;
}
kfree(*sa_bo);
*sa_bo = NULL;
return -ENOMEM;
......@@ -192,15 +221,22 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
return 0;
}
void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo)
void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
struct radeon_fence *fence)
{
struct radeon_sa_manager *sa_manager;
if (!sa_bo || !*sa_bo)
return;
spin_lock(&(*sa_bo)->manager->lock);
list_del_init(&(*sa_bo)->list);
spin_unlock(&(*sa_bo)->manager->lock);
kfree(*sa_bo);
sa_manager = (*sa_bo)->manager;
spin_lock(&sa_manager->lock);
if (fence && fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
(*sa_bo)->fence = radeon_fence_ref(fence);
} else {
radeon_sa_bo_remove_locked(*sa_bo);
}
spin_unlock(&sa_manager->lock);
*sa_bo = NULL;
}
......@@ -212,8 +248,14 @@ void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
spin_lock(&sa_manager->lock);
list_for_each_entry(i, &sa_manager->sa_bo, list) {
seq_printf(m, "[%08x %08x] size %4d [%p]\n",
seq_printf(m, "[%08x %08x] size %4d (%p)",
i->soffset, i->eoffset, i->eoffset - i->soffset, i);
if (i->fence) {
seq_printf(m, " protected by %Ld (%p) on ring %d\n",
i->fence->seq, i->fence, i->fence->ring);
} else {
seq_printf(m, "\n");
}
}
spin_unlock(&sa_manager->lock);
}
......
......@@ -72,7 +72,7 @@ static int radeon_semaphore_add_bo(struct radeon_device *rdev)
static void radeon_semaphore_del_bo_locked(struct radeon_device *rdev,
struct radeon_semaphore_bo *bo)
{
radeon_sa_bo_free(rdev, &bo->ib->sa_bo);
radeon_sa_bo_free(rdev, &bo->ib->sa_bo, NULL);
radeon_fence_unref(&bo->ib->fence);
list_del(&bo->list);
kfree(bo);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment