Commit 9165fb87 authored by Christian König's avatar Christian König

drm/ttm: always keep BOs on the LRU

This allows blocking for BOs to become available
in the memory management.

Amdgpu is doing this for quite a while now during CS. Now
apply the new behavior to all drivers using TTM.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Link: https://patchwork.freedesktop.org/patch/332878/
parent 7fb03cc3
...@@ -586,7 +586,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem, ...@@ -586,7 +586,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]); amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
false, &ctx->duplicates, true); false, &ctx->duplicates);
if (!ret) if (!ret)
ctx->reserved = true; ctx->reserved = true;
else { else {
...@@ -659,7 +659,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, ...@@ -659,7 +659,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
} }
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
false, &ctx->duplicates, true); false, &ctx->duplicates);
if (!ret) if (!ret)
ctx->reserved = true; ctx->reserved = true;
else else
...@@ -1797,8 +1797,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) ...@@ -1797,8 +1797,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
} }
/* Reserve all BOs and page tables for validation */ /* Reserve all BOs and page tables for validation */
ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates, ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
true);
WARN(!list_empty(&duplicates), "Duplicates should be empty"); WARN(!list_empty(&duplicates), "Duplicates should be empty");
if (ret) if (ret)
goto out_free; goto out_free;
...@@ -1996,7 +1995,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) ...@@ -1996,7 +1995,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
} }
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list, ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
false, &duplicate_save, true); false, &duplicate_save);
if (ret) { if (ret) {
pr_debug("Memory eviction: TTM Reserve Failed. Try again\n"); pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
goto ttm_reserve_fail; goto ttm_reserve_fail;
......
...@@ -650,7 +650,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -650,7 +650,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
} }
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
&duplicates, false); &duplicates);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) if (r != -ERESTARTSYS)
DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
......
...@@ -80,7 +80,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -80,7 +80,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
list_add(&csa_tv.head, &list); list_add(&csa_tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &pd); amdgpu_vm_get_pd_bo(vm, &list, &pd);
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, false); r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r) { if (r) {
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
return r; return r;
......
...@@ -175,7 +175,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, ...@@ -175,7 +175,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, false); r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) { if (r) {
dev_err(adev->dev, "leaking bo va because " dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r); "we fail to reserve bo (%d)\n", r);
...@@ -613,7 +613,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, ...@@ -613,7 +613,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, false); r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r) if (r)
goto error_unref; goto error_unref;
......
...@@ -4494,7 +4494,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, ...@@ -4494,7 +4494,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
tv.num_shared = 1; tv.num_shared = 1;
list_add(&tv.head, &list); list_add(&tv.head, &list);
r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL, true); r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
if (r) { if (r) {
dev_err(adev->dev, "fail to reserve bo (%d)\n", r); dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
return r; return r;
......
...@@ -260,7 +260,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr) ...@@ -260,7 +260,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
return 0; return 0;
ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
!no_intr, NULL, true); !no_intr, NULL);
if (ret) if (ret)
return ret; return ret;
...@@ -459,7 +459,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) ...@@ -459,7 +459,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
bo = entry->bo; bo = entry->bo;
dma_resv_add_shared_fence(bo->base.resv, &release->base); dma_resv_add_shared_fence(bo->base.resv, &release->base);
ttm_bo_add_to_lru(bo); ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv); dma_resv_unlock(bo->base.resv);
} }
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
......
...@@ -566,7 +566,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev, ...@@ -566,7 +566,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
if (!vm_bos) if (!vm_bos)
return; return;
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true); r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r) if (r)
goto error_free; goto error_free;
......
...@@ -542,7 +542,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, ...@@ -542,7 +542,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
INIT_LIST_HEAD(&duplicates); INIT_LIST_HEAD(&duplicates);
r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates, true); r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
if (unlikely(r != 0)) { if (unlikely(r != 0)) {
return r; return r;
} }
......
...@@ -192,18 +192,12 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, ...@@ -192,18 +192,12 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
} }
} }
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
{
ttm_bo_add_mem_to_lru(bo, &bo->mem);
}
EXPORT_SYMBOL(ttm_bo_add_to_lru);
static void ttm_bo_ref_bug(struct kref *list_kref) static void ttm_bo_ref_bug(struct kref *list_kref)
{ {
BUG(); BUG();
} }
void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{ {
struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_device *bdev = bo->bdev;
bool notify = false; bool notify = false;
...@@ -223,16 +217,6 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) ...@@ -223,16 +217,6 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
bdev->driver->del_from_lru_notify(bo); bdev->driver->del_from_lru_notify(bo);
} }
void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_global *glob = bo->bdev->glob;
spin_lock(&glob->lru_lock);
ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock);
}
EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos, static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
struct ttm_buffer_object *bo) struct ttm_buffer_object *bo)
{ {
...@@ -247,7 +231,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, ...@@ -247,7 +231,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
dma_resv_assert_held(bo->base.resv); dma_resv_assert_held(bo->base.resv);
ttm_bo_del_from_lru(bo); ttm_bo_del_from_lru(bo);
ttm_bo_add_to_lru(bo); ttm_bo_add_mem_to_lru(bo, &bo->mem);
if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
switch (bo->mem.mem_type) { switch (bo->mem.mem_type) {
...@@ -511,7 +495,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) ...@@ -511,7 +495,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
*/ */
if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
ttm_bo_add_to_lru(bo); ttm_bo_move_to_lru_tail(bo, NULL);
} }
dma_resv_unlock(bo->base.resv); dma_resv_unlock(bo->base.resv);
...@@ -895,17 +879,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, ...@@ -895,17 +879,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
return ret; return ret;
} }
ttm_bo_del_from_lru(bo);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
ret = ttm_bo_evict(bo, ctx); ret = ttm_bo_evict(bo, ctx);
if (locked) { if (locked)
ttm_bo_unreserve(bo); ttm_bo_unreserve(bo);
} else {
spin_lock(&glob->lru_lock);
ttm_bo_add_to_lru(bo);
spin_unlock(&glob->lru_lock);
}
kref_put(&bo->list_kref, ttm_bo_release_list); kref_put(&bo->list_kref, ttm_bo_release_list);
return ret; return ret;
...@@ -1067,12 +1045,10 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, ...@@ -1067,12 +1045,10 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
mem->mem_type = mem_type; mem->mem_type = mem_type;
mem->placement = cur_flags; mem->placement = cur_flags;
if (bo->mem.mem_type < mem_type && !list_empty(&bo->lru)) { spin_lock(&bo->bdev->glob->lru_lock);
spin_lock(&bo->bdev->glob->lru_lock); ttm_bo_del_from_lru(bo);
ttm_bo_del_from_lru(bo); ttm_bo_add_mem_to_lru(bo, mem);
ttm_bo_add_mem_to_lru(bo, mem); spin_unlock(&bo->bdev->glob->lru_lock);
spin_unlock(&bo->bdev->glob->lru_lock);
}
return 0; return 0;
} }
...@@ -1377,11 +1353,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, ...@@ -1377,11 +1353,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
return ret; return ret;
} }
if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { spin_lock(&bdev->glob->lru_lock);
spin_lock(&bdev->glob->lru_lock); ttm_bo_move_to_lru_tail(bo, NULL);
ttm_bo_add_to_lru(bo); spin_unlock(&bdev->glob->lru_lock);
spin_unlock(&bdev->glob->lru_lock);
}
return ret; return ret;
} }
......
...@@ -43,16 +43,6 @@ static void ttm_eu_backoff_reservation_reverse(struct list_head *list, ...@@ -43,16 +43,6 @@ static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
} }
} }
static void ttm_eu_del_from_lru_locked(struct list_head *list)
{
struct ttm_validate_buffer *entry;
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
ttm_bo_del_from_lru(bo);
}
}
void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
struct list_head *list) struct list_head *list)
{ {
...@@ -69,8 +59,7 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, ...@@ -69,8 +59,7 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
list_for_each_entry(entry, list, head) { list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo; struct ttm_buffer_object *bo = entry->bo;
if (list_empty(&bo->lru)) ttm_bo_move_to_lru_tail(bo, NULL);
ttm_bo_add_to_lru(bo);
dma_resv_unlock(bo->base.resv); dma_resv_unlock(bo->base.resv);
} }
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
...@@ -94,7 +83,7 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation); ...@@ -94,7 +83,7 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr, struct list_head *list, bool intr,
struct list_head *dups, bool del_lru) struct list_head *dups)
{ {
struct ttm_bo_global *glob; struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry; struct ttm_validate_buffer *entry;
...@@ -168,11 +157,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, ...@@ -168,11 +157,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
list_add(&entry->head, list); list_add(&entry->head, list);
} }
if (del_lru) {
spin_lock(&glob->lru_lock);
ttm_eu_del_from_lru_locked(list);
spin_unlock(&glob->lru_lock);
}
return 0; return 0;
} }
EXPORT_SYMBOL(ttm_eu_reserve_buffers); EXPORT_SYMBOL(ttm_eu_reserve_buffers);
...@@ -199,10 +183,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, ...@@ -199,10 +183,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
dma_resv_add_shared_fence(bo->base.resv, fence); dma_resv_add_shared_fence(bo->base.resv, fence);
else else
dma_resv_add_excl_fence(bo->base.resv, fence); dma_resv_add_excl_fence(bo->base.resv, fence);
if (list_empty(&bo->lru)) ttm_bo_move_to_lru_tail(bo, NULL);
ttm_bo_add_to_lru(bo);
else
ttm_bo_move_to_lru_tail(bo, NULL);
dma_resv_unlock(bo->base.resv); dma_resv_unlock(bo->base.resv);
} }
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
......
...@@ -492,8 +492,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket, ...@@ -492,8 +492,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
val_buf->bo = &res->backup->base; val_buf->bo = &res->backup->base;
val_buf->num_shared = 0; val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list); list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL, ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_reserve; goto out_no_reserve;
......
...@@ -170,7 +170,7 @@ vmw_validation_bo_reserve(struct vmw_validation_context *ctx, ...@@ -170,7 +170,7 @@ vmw_validation_bo_reserve(struct vmw_validation_context *ctx,
bool intr) bool intr)
{ {
return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr, return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
NULL, true); NULL);
} }
/** /**
......
...@@ -360,30 +360,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, ...@@ -360,30 +360,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
*/ */
void ttm_bo_put(struct ttm_buffer_object *bo); void ttm_bo_put(struct ttm_buffer_object *bo);
/**
* ttm_bo_add_to_lru
*
* @bo: The buffer object.
*
* Add this bo to the relevant mem type lru and, if it's backed by
* system pages (ttms) to the swap list.
* This function must be called with struct ttm_bo_global::lru_lock held, and
* is typically called immediately prior to unreserving a bo.
*/
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
/**
* ttm_bo_del_from_lru
*
* @bo: The buffer object.
*
* Remove this bo from all lru lists used to lookup and reserve an object.
* This function must be called with struct ttm_bo_global::lru_lock held,
* and is usually called just immediately after the bo has been reserved to
* avoid recursive reservation from lru lists.
*/
void ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
/** /**
* ttm_bo_move_to_lru_tail * ttm_bo_move_to_lru_tail
* *
......
...@@ -631,9 +631,6 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object *bo); ...@@ -631,9 +631,6 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible); int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible);
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man); void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
/** /**
* __ttm_bo_reserve: * __ttm_bo_reserve:
* *
...@@ -727,15 +724,9 @@ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, ...@@ -727,15 +724,9 @@ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait, bool interruptible, bool no_wait,
struct ww_acquire_ctx *ticket) struct ww_acquire_ctx *ticket)
{ {
int ret;
WARN_ON(!kref_read(&bo->kref)); WARN_ON(!kref_read(&bo->kref));
ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket); return __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
if (likely(ret == 0))
ttm_bo_del_sub_from_lru(bo);
return ret;
} }
/** /**
...@@ -762,9 +753,7 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, ...@@ -762,9 +753,7 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
else else
dma_resv_lock_slow(bo->base.resv, ticket); dma_resv_lock_slow(bo->base.resv, ticket);
if (likely(ret == 0)) if (ret == -EINTR)
ttm_bo_del_sub_from_lru(bo);
else if (ret == -EINTR)
ret = -ERESTARTSYS; ret = -ERESTARTSYS;
return ret; return ret;
...@@ -780,10 +769,7 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, ...@@ -780,10 +769,7 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{ {
spin_lock(&bo->bdev->glob->lru_lock); spin_lock(&bo->bdev->glob->lru_lock);
if (list_empty(&bo->lru)) ttm_bo_move_to_lru_tail(bo, NULL);
ttm_bo_add_to_lru(bo);
else
ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&bo->bdev->glob->lru_lock); spin_unlock(&bo->bdev->glob->lru_lock);
dma_resv_unlock(bo->base.resv); dma_resv_unlock(bo->base.resv);
} }
......
...@@ -99,7 +99,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, ...@@ -99,7 +99,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr, struct list_head *list, bool intr,
struct list_head *dups, bool del_lru); struct list_head *dups);
/** /**
* function ttm_eu_fence_buffer_objects. * function ttm_eu_fence_buffer_objects.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment