Commit 5a5011a7 authored by Gerd Hoffmann's avatar Gerd Hoffmann
parent 4922f552
...@@ -218,7 +218,7 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) ...@@ -218,7 +218,7 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
struct amdgpu_amdkfd_fence *ef) struct amdgpu_amdkfd_fence *ef)
{ {
struct reservation_object *resv = bo->tbo.resv; struct reservation_object *resv = bo->tbo.base.resv;
struct reservation_object_list *old, *new; struct reservation_object_list *old, *new;
unsigned int i, j, k; unsigned int i, j, k;
...@@ -812,7 +812,7 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info, ...@@ -812,7 +812,7 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
struct amdgpu_bo *pd = peer_vm->root.base.bo; struct amdgpu_bo *pd = peer_vm->root.base.bo;
ret = amdgpu_sync_resv(NULL, ret = amdgpu_sync_resv(NULL,
sync, pd->tbo.resv, sync, pd->tbo.base.resv,
AMDGPU_FENCE_OWNER_KFD, false); AMDGPU_FENCE_OWNER_KFD, false);
if (ret) if (ret)
return ret; return ret;
...@@ -887,7 +887,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, ...@@ -887,7 +887,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
AMDGPU_FENCE_OWNER_KFD, false); AMDGPU_FENCE_OWNER_KFD, false);
if (ret) if (ret)
goto wait_pd_fail; goto wait_pd_fail;
ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1); ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
if (ret) if (ret)
goto reserve_shared_fail; goto reserve_shared_fail;
amdgpu_bo_fence(vm->root.base.bo, amdgpu_bo_fence(vm->root.base.bo,
...@@ -2132,7 +2132,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem ...@@ -2132,7 +2132,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
* Add process eviction fence to bo so they can * Add process eviction fence to bo so they can
* evict each other. * evict each other.
*/ */
ret = reservation_object_reserve_shared(gws_bo->tbo.resv, 1); ret = reservation_object_reserve_shared(gws_bo->tbo.base.resv, 1);
if (ret) if (ret)
goto reserve_shared_fail; goto reserve_shared_fail;
amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true); amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
......
...@@ -402,7 +402,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p, ...@@ -402,7 +402,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
struct ttm_operation_ctx ctx = { struct ttm_operation_ctx ctx = {
.interruptible = true, .interruptible = true,
.no_wait_gpu = false, .no_wait_gpu = false,
.resv = bo->tbo.resv, .resv = bo->tbo.base.resv,
.flags = 0 .flags = 0
}; };
uint32_t domain; uint32_t domain;
...@@ -730,7 +730,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) ...@@ -730,7 +730,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
list_for_each_entry(e, &p->validated, tv.head) { list_for_each_entry(e, &p->validated, tv.head) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
struct reservation_object *resv = bo->tbo.resv; struct reservation_object *resv = bo->tbo.base.resv;
r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp, r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
amdgpu_bo_explicit_sync(bo)); amdgpu_bo_explicit_sync(bo));
...@@ -1729,7 +1729,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, ...@@ -1729,7 +1729,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
*map = mapping; *map = mapping;
/* Double check that the BO is reserved by this CS */ /* Double check that the BO is reserved by this CS */
if (reservation_object_locking_ctx((*bo)->tbo.resv) != &parser->ticket) if (reservation_object_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
return -EINVAL; return -EINVAL;
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
......
...@@ -204,7 +204,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc, ...@@ -204,7 +204,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
goto unpin; goto unpin;
} }
r = reservation_object_get_fences_rcu(new_abo->tbo.resv, &work->excl, r = reservation_object_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
&work->shared_count, &work->shared_count,
&work->shared); &work->shared);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
......
...@@ -216,7 +216,7 @@ static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf, ...@@ -216,7 +216,7 @@ static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf,
* fences on the reservation object into a single exclusive * fences on the reservation object into a single exclusive
* fence. * fence.
*/ */
r = __reservation_object_make_exclusive(bo->tbo.resv); r = __reservation_object_make_exclusive(bo->tbo.base.resv);
if (r) if (r)
goto error_unreserve; goto error_unreserve;
} }
......
...@@ -134,7 +134,7 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, ...@@ -134,7 +134,7 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
return -EPERM; return -EPERM;
if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID && if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
abo->tbo.resv != vm->root.base.bo->tbo.resv) abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
return -EPERM; return -EPERM;
r = amdgpu_bo_reserve(abo, false); r = amdgpu_bo_reserve(abo, false);
...@@ -252,7 +252,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -252,7 +252,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
if (r) if (r)
return r; return r;
resv = vm->root.base.bo->tbo.resv; resv = vm->root.base.bo->tbo.base.resv;
} }
r = amdgpu_gem_object_create(adev, size, args->in.alignment, r = amdgpu_gem_object_create(adev, size, args->in.alignment,
...@@ -433,7 +433,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, ...@@ -433,7 +433,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
return -ENOENT; return -ENOENT;
} }
robj = gem_to_amdgpu_bo(gobj); robj = gem_to_amdgpu_bo(gobj);
ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true,
timeout); timeout);
/* ret == 0 means not signaled, /* ret == 0 means not signaled,
......
...@@ -1088,7 +1088,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, ...@@ -1088,7 +1088,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
amdgpu_vm_fini(adev, &fpriv->vm); amdgpu_vm_fini(adev, &fpriv->vm);
if (pasid) if (pasid)
amdgpu_pasid_free_delayed(pd->tbo.resv, pasid); amdgpu_pasid_free_delayed(pd->tbo.base.resv, pasid);
amdgpu_bo_unref(&pd); amdgpu_bo_unref(&pd);
idr_for_each_entry(&fpriv->bo_list_handles, list, handle) idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
......
...@@ -179,7 +179,7 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, ...@@ -179,7 +179,7 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end)) if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
continue; continue;
r = reservation_object_wait_timeout_rcu(bo->tbo.resv, r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
true, false, MAX_SCHEDULE_TIMEOUT); true, false, MAX_SCHEDULE_TIMEOUT);
if (r <= 0) if (r <= 0)
DRM_ERROR("(%ld) failed to wait for user bo\n", r); DRM_ERROR("(%ld) failed to wait for user bo\n", r);
......
...@@ -509,8 +509,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, ...@@ -509,8 +509,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
if (unlikely(r != 0)) if (unlikely(r != 0))
return r; return r;
bo->tbo.base.resv = bo->tbo.resv;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) && if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
bo->tbo.mem.mem_type == TTM_PL_VRAM && bo->tbo.mem.mem_type == TTM_PL_VRAM &&
bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT) bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
...@@ -523,7 +521,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, ...@@ -523,7 +521,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) { bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
struct dma_fence *fence; struct dma_fence *fence;
r = amdgpu_fill_buffer(bo, 0, bo->tbo.resv, &fence); r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
if (unlikely(r)) if (unlikely(r))
goto fail_unreserve; goto fail_unreserve;
...@@ -546,7 +544,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, ...@@ -546,7 +544,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
fail_unreserve: fail_unreserve:
if (!bp->resv) if (!bp->resv)
reservation_object_unlock(bo->tbo.resv); reservation_object_unlock(bo->tbo.base.resv);
amdgpu_bo_unref(&bo); amdgpu_bo_unref(&bo);
return r; return r;
} }
...@@ -567,7 +565,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev, ...@@ -567,7 +565,7 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC | bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_SHADOW; AMDGPU_GEM_CREATE_SHADOW;
bp.type = ttm_bo_type_kernel; bp.type = ttm_bo_type_kernel;
bp.resv = bo->tbo.resv; bp.resv = bo->tbo.base.resv;
r = amdgpu_bo_do_create(adev, &bp, &bo->shadow); r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
if (!r) { if (!r) {
...@@ -608,13 +606,13 @@ int amdgpu_bo_create(struct amdgpu_device *adev, ...@@ -608,13 +606,13 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) { if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
if (!bp->resv) if (!bp->resv)
WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv, WARN_ON(reservation_object_lock((*bo_ptr)->tbo.base.resv,
NULL)); NULL));
r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr); r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
if (!bp->resv) if (!bp->resv)
reservation_object_unlock((*bo_ptr)->tbo.resv); reservation_object_unlock((*bo_ptr)->tbo.base.resv);
if (r) if (r)
amdgpu_bo_unref(bo_ptr); amdgpu_bo_unref(bo_ptr);
...@@ -711,7 +709,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) ...@@ -711,7 +709,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
return 0; return 0;
} }
r = reservation_object_wait_timeout_rcu(bo->tbo.resv, false, false, r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, false, false,
MAX_SCHEDULE_TIMEOUT); MAX_SCHEDULE_TIMEOUT);
if (r < 0) if (r < 0)
return r; return r;
...@@ -1089,7 +1087,7 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags) ...@@ -1089,7 +1087,7 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
*/ */
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags) void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
{ {
reservation_object_assert_held(bo->tbo.resv); reservation_object_assert_held(bo->tbo.base.resv);
if (tiling_flags) if (tiling_flags)
*tiling_flags = bo->tiling_flags; *tiling_flags = bo->tiling_flags;
...@@ -1285,7 +1283,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) ...@@ -1285,7 +1283,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared) bool shared)
{ {
struct reservation_object *resv = bo->tbo.resv; struct reservation_object *resv = bo->tbo.base.resv;
if (shared) if (shared)
reservation_object_add_shared_fence(resv, fence); reservation_object_add_shared_fence(resv, fence);
...@@ -1310,7 +1308,7 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr) ...@@ -1310,7 +1308,7 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
int r; int r;
amdgpu_sync_create(&sync); amdgpu_sync_create(&sync);
amdgpu_sync_resv(adev, &sync, bo->tbo.resv, owner, false); amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, owner, false);
r = amdgpu_sync_wait(&sync, intr); r = amdgpu_sync_wait(&sync, intr);
amdgpu_sync_free(&sync); amdgpu_sync_free(&sync);
...@@ -1330,7 +1328,7 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr) ...@@ -1330,7 +1328,7 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
{ {
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM); WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
WARN_ON_ONCE(!reservation_object_is_locked(bo->tbo.resv) && WARN_ON_ONCE(!reservation_object_is_locked(bo->tbo.base.resv) &&
!bo->pin_count && bo->tbo.type != ttm_bo_type_kernel); !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET); WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM && WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
......
...@@ -440,7 +440,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, ...@@ -440,7 +440,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst, r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
new_mem->num_pages << PAGE_SHIFT, new_mem->num_pages << PAGE_SHIFT,
bo->resv, &fence); bo->base.resv, &fence);
if (r) if (r)
goto error; goto error;
...@@ -1478,18 +1478,18 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, ...@@ -1478,18 +1478,18 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
* cleanly handle page faults. * cleanly handle page faults.
*/ */
if (bo->type == ttm_bo_type_kernel && if (bo->type == ttm_bo_type_kernel &&
!reservation_object_test_signaled_rcu(bo->resv, true)) !reservation_object_test_signaled_rcu(bo->base.resv, true))
return false; return false;
/* If bo is a KFD BO, check if the bo belongs to the current process. /* If bo is a KFD BO, check if the bo belongs to the current process.
* If true, then return false as any KFD process needs all its BOs to * If true, then return false as any KFD process needs all its BOs to
* be resident to run successfully * be resident to run successfully
*/ */
flist = reservation_object_get_list(bo->resv); flist = reservation_object_get_list(bo->base.resv);
if (flist) { if (flist) {
for (i = 0; i < flist->shared_count; ++i) { for (i = 0; i < flist->shared_count; ++i) {
f = rcu_dereference_protected(flist->shared[i], f = rcu_dereference_protected(flist->shared[i],
reservation_object_held(bo->resv)); reservation_object_held(bo->base.resv));
if (amdkfd_fence_check_mm(f, current->mm)) if (amdkfd_fence_check_mm(f, current->mm))
return false; return false;
} }
......
...@@ -1073,7 +1073,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ...@@ -1073,7 +1073,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16; ib->length_dw = 16;
if (direct) { if (direct) {
r = reservation_object_wait_timeout_rcu(bo->tbo.resv, r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
true, false, true, false,
msecs_to_jiffies(10)); msecs_to_jiffies(10));
if (r == 0) if (r == 0)
...@@ -1085,7 +1085,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ...@@ -1085,7 +1085,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (r) if (r)
goto err_free; goto err_free;
} else { } else {
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv, r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
AMDGPU_FENCE_OWNER_UNDEFINED, false); AMDGPU_FENCE_OWNER_UNDEFINED, false);
if (r) if (r)
goto err_free; goto err_free;
......
...@@ -302,7 +302,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, ...@@ -302,7 +302,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
base->next = bo->vm_bo; base->next = bo->vm_bo;
bo->vm_bo = base; bo->vm_bo = base;
if (bo->tbo.resv != vm->root.base.bo->tbo.resv) if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv)
return; return;
vm->bulk_moveable = false; vm->bulk_moveable = false;
...@@ -583,7 +583,7 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) ...@@ -583,7 +583,7 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) { for (bo_base = abo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm; struct amdgpu_vm *vm = bo_base->vm;
if (abo->tbo.resv == vm->root.base.bo->tbo.resv) if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
vm->bulk_moveable = false; vm->bulk_moveable = false;
} }
...@@ -834,7 +834,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -834,7 +834,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bp->flags |= AMDGPU_GEM_CREATE_SHADOW; bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
bp->type = ttm_bo_type_kernel; bp->type = ttm_bo_type_kernel;
if (vm->root.base.bo) if (vm->root.base.bo)
bp->resv = vm->root.base.bo->tbo.resv; bp->resv = vm->root.base.bo->tbo.base.resv;
} }
/** /**
...@@ -1702,7 +1702,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, ...@@ -1702,7 +1702,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address; pages_addr = ttm->dma_address;
} }
exclusive = reservation_object_get_excl(bo->tbo.resv); exclusive = reservation_object_get_excl(bo->tbo.base.resv);
} }
if (bo) { if (bo) {
...@@ -1712,7 +1712,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, ...@@ -1712,7 +1712,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
flags = 0x0; flags = 0x0;
} }
if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv)) if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv))
last_update = &vm->last_update; last_update = &vm->last_update;
else else
last_update = &bo_va->last_pt_update; last_update = &bo_va->last_pt_update;
...@@ -1743,7 +1743,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, ...@@ -1743,7 +1743,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
* the evicted list so that it gets validated again on the * the evicted list so that it gets validated again on the
* next command submission. * next command submission.
*/ */
if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) { if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
uint32_t mem_type = bo->tbo.mem.mem_type; uint32_t mem_type = bo->tbo.mem.mem_type;
if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type))) if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
...@@ -1879,7 +1879,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev, ...@@ -1879,7 +1879,7 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
*/ */
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{ {
struct reservation_object *resv = vm->root.base.bo->tbo.resv; struct reservation_object *resv = vm->root.base.bo->tbo.base.resv;
struct dma_fence *excl, **shared; struct dma_fence *excl, **shared;
unsigned i, shared_count; unsigned i, shared_count;
int r; int r;
...@@ -1993,7 +1993,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, ...@@ -1993,7 +1993,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
while (!list_empty(&vm->invalidated)) { while (!list_empty(&vm->invalidated)) {
bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
base.vm_status); base.vm_status);
resv = bo_va->base.bo->tbo.resv; resv = bo_va->base.bo->tbo.base.resv;
spin_unlock(&vm->invalidated_lock); spin_unlock(&vm->invalidated_lock);
/* Try to reserve the BO to avoid clearing its ptes */ /* Try to reserve the BO to avoid clearing its ptes */
...@@ -2084,7 +2084,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev, ...@@ -2084,7 +2084,7 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
if (mapping->flags & AMDGPU_PTE_PRT) if (mapping->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev); amdgpu_vm_prt_get(adev);
if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv && if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv &&
!bo_va->base.moved) { !bo_va->base.moved) {
list_move(&bo_va->base.vm_status, &vm->moved); list_move(&bo_va->base.vm_status, &vm->moved);
} }
...@@ -2416,7 +2416,7 @@ void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket) ...@@ -2416,7 +2416,7 @@ void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
bo = mapping->bo_va->base.bo; bo = mapping->bo_va->base.bo;
if (reservation_object_locking_ctx(bo->tbo.resv) != if (reservation_object_locking_ctx(bo->tbo.base.resv) !=
ticket) ticket)
continue; continue;
} }
...@@ -2444,7 +2444,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, ...@@ -2444,7 +2444,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_vm_bo_base **base; struct amdgpu_vm_bo_base **base;
if (bo) { if (bo) {
if (bo->tbo.resv == vm->root.base.bo->tbo.resv) if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
vm->bulk_moveable = false; vm->bulk_moveable = false;
for (base = &bo_va->base.bo->vm_bo; *base; for (base = &bo_va->base.bo->vm_bo; *base;
...@@ -2508,7 +2508,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, ...@@ -2508,7 +2508,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm; struct amdgpu_vm *vm = bo_base->vm;
if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) { if (evicted && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
amdgpu_vm_bo_evicted(bo_base); amdgpu_vm_bo_evicted(bo_base);
continue; continue;
} }
...@@ -2519,7 +2519,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, ...@@ -2519,7 +2519,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
if (bo->tbo.type == ttm_bo_type_kernel) if (bo->tbo.type == ttm_bo_type_kernel)
amdgpu_vm_bo_relocated(bo_base); amdgpu_vm_bo_relocated(bo_base);
else if (bo->tbo.resv == vm->root.base.bo->tbo.resv) else if (bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv)
amdgpu_vm_bo_moved(bo_base); amdgpu_vm_bo_moved(bo_base);
else else
amdgpu_vm_bo_invalidated(bo_base); amdgpu_vm_bo_invalidated(bo_base);
...@@ -2649,7 +2649,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, ...@@ -2649,7 +2649,7 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
*/ */
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{ {
return reservation_object_wait_timeout_rcu(vm->root.base.bo->tbo.resv, return reservation_object_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
true, true, timeout); true, true, timeout);
} }
...@@ -2724,7 +2724,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2724,7 +2724,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (r) if (r)
goto error_free_root; goto error_free_root;
r = reservation_object_reserve_shared(root->tbo.resv, 1); r = reservation_object_reserve_shared(root->tbo.base.resv, 1);
if (r) if (r)
goto error_unreserve; goto error_unreserve;
......
...@@ -72,7 +72,7 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, ...@@ -72,7 +72,7 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
if (r) if (r)
return r; return r;
r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.resv, r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
owner, false); owner, false);
if (r) if (r)
return r; return r;
......
...@@ -5693,7 +5693,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, ...@@ -5693,7 +5693,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* deadlock during GPU reset when this fence will not signal * deadlock during GPU reset when this fence will not signal
* but we hold reservation lock for the BO. * but we hold reservation lock for the BO.
*/ */
r = reservation_object_wait_timeout_rcu(abo->tbo.resv, true, r = reservation_object_wait_timeout_rcu(abo->tbo.base.resv, true,
false, false,
msecs_to_jiffies(5000)); msecs_to_jiffies(5000));
if (unlikely(r <= 0)) if (unlikely(r <= 0))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment