Commit c507f7ef authored by Jerome Glisse's avatar Jerome Glisse Committed by Dave Airlie

drm/radeon: rip out the ib pool

It isn't necessary any more and the suballocator seems to perform
even better.
Signed-off-by: default avatarChristian König <deathsimple@vodafone.de>
Signed-off-by: default avatarJerome Glisse <jglisse@redhat.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
parent a8c05940
...@@ -625,7 +625,6 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); ...@@ -625,7 +625,6 @@ void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
struct radeon_ib { struct radeon_ib {
struct radeon_sa_bo *sa_bo; struct radeon_sa_bo *sa_bo;
unsigned idx;
uint32_t length_dw; uint32_t length_dw;
uint64_t gpu_addr; uint64_t gpu_addr;
uint32_t *ptr; uint32_t *ptr;
...@@ -634,18 +633,6 @@ struct radeon_ib { ...@@ -634,18 +633,6 @@ struct radeon_ib {
bool is_const_ib; bool is_const_ib;
}; };
/*
* locking -
* mutex protects scheduled_ibs, ready, alloc_bm
*/
struct radeon_ib_pool {
struct radeon_mutex mutex;
struct radeon_sa_manager sa_manager;
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
bool ready;
unsigned head_id;
};
struct radeon_ring { struct radeon_ring {
struct radeon_bo *ring_obj; struct radeon_bo *ring_obj;
volatile uint32_t *ring; volatile uint32_t *ring;
...@@ -787,7 +774,6 @@ struct si_rlc { ...@@ -787,7 +774,6 @@ struct si_rlc {
int radeon_ib_get(struct radeon_device *rdev, int ring, int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib **ib, unsigned size); struct radeon_ib **ib, unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib); void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_pool_init(struct radeon_device *rdev); int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev); void radeon_ib_pool_fini(struct radeon_device *rdev);
...@@ -1522,7 +1508,8 @@ struct radeon_device { ...@@ -1522,7 +1508,8 @@ struct radeon_device {
wait_queue_head_t fence_queue; wait_queue_head_t fence_queue;
struct mutex ring_lock; struct mutex ring_lock;
struct radeon_ring ring[RADEON_NUM_RINGS]; struct radeon_ring ring[RADEON_NUM_RINGS];
struct radeon_ib_pool ib_pool; bool ib_pool_ready;
struct radeon_sa_manager ring_tmp_bo;
struct radeon_irq irq; struct radeon_irq irq;
struct radeon_asic *asic; struct radeon_asic *asic;
struct radeon_gem gem; struct radeon_gem gem;
......
...@@ -724,7 +724,6 @@ int radeon_device_init(struct radeon_device *rdev, ...@@ -724,7 +724,6 @@ int radeon_device_init(struct radeon_device *rdev,
/* mutex initialization are all done here so we /* mutex initialization are all done here so we
* can recall function without having locking issues */ * can recall function without having locking issues */
radeon_mutex_init(&rdev->cs_mutex); radeon_mutex_init(&rdev->cs_mutex);
radeon_mutex_init(&rdev->ib_pool.mutex);
mutex_init(&rdev->ring_lock); mutex_init(&rdev->ring_lock);
mutex_init(&rdev->dc_hw_i2c_mutex); mutex_init(&rdev->dc_hw_i2c_mutex);
if (rdev->family >= CHIP_R600) if (rdev->family >= CHIP_R600)
......
...@@ -432,8 +432,8 @@ int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm) ...@@ -432,8 +432,8 @@ int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
rdev->vm_manager.use_bitmap |= 1 << id; rdev->vm_manager.use_bitmap |= 1 << id;
vm->id = id; vm->id = id;
list_add_tail(&vm->list, &rdev->vm_manager.lru_vm); list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
return radeon_vm_bo_update_pte(rdev, vm, rdev->ib_pool.sa_manager.bo, return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
&rdev->ib_pool.sa_manager.bo->tbo.mem); &rdev->ring_tmp_bo.bo->tbo.mem);
} }
/* object have to be reserved */ /* object have to be reserved */
...@@ -631,7 +631,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) ...@@ -631,7 +631,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
/* map the ib pool buffer at 0 in virtual address space, set /* map the ib pool buffer at 0 in virtual address space, set
* read only * read only
*/ */
r = radeon_vm_bo_add(rdev, vm, rdev->ib_pool.sa_manager.bo, 0, r = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo, 0,
RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
return r; return r;
} }
...@@ -648,12 +648,12 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) ...@@ -648,12 +648,12 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
radeon_mutex_unlock(&rdev->cs_mutex); radeon_mutex_unlock(&rdev->cs_mutex);
/* remove all bo */ /* remove all bo */
r = radeon_bo_reserve(rdev->ib_pool.sa_manager.bo, false); r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (!r) { if (!r) {
bo_va = radeon_bo_va(rdev->ib_pool.sa_manager.bo, vm); bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
list_del_init(&bo_va->bo_list); list_del_init(&bo_va->bo_list);
list_del_init(&bo_va->vm_list); list_del_init(&bo_va->vm_list);
radeon_bo_unreserve(rdev->ib_pool.sa_manager.bo); radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
kfree(bo_va); kfree(bo_va);
} }
if (!list_empty(&vm->va)) { if (!list_empty(&vm->va)) {
......
This diff is collapsed.
...@@ -41,7 +41,7 @@ int radeon_semaphore_create(struct radeon_device *rdev, ...@@ -41,7 +41,7 @@ int radeon_semaphore_create(struct radeon_device *rdev,
if (*semaphore == NULL) { if (*semaphore == NULL) {
return -ENOMEM; return -ENOMEM;
} }
r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager, r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
&(*semaphore)->sa_bo, 8, 8, true); &(*semaphore)->sa_bo, 8, 8, true);
if (r) { if (r) {
kfree(*semaphore); kfree(*semaphore);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment