Commit fb5ce730 authored by Christian König's avatar Christian König

dma-buf: rename and cleanup dma_resv_get_list v2

When the comment needs to state explicitly that this is doesn't get a reference
to the object then the function is named rather badly.

Rename the function and use it in even more places.

v2: use dma_resv_shared_list as new name
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210602111714.212426-5-christian.koenig@amd.com
parent 6edbd6ab
......@@ -149,8 +149,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
dma_resv_assert_held(obj);
old = dma_resv_get_list(obj);
old = dma_resv_shared_list(obj);
if (old && old->shared_max) {
if ((old->shared_count + num_fences) <= old->shared_max)
return 0;
......@@ -219,12 +218,13 @@ EXPORT_SYMBOL(dma_resv_reserve_shared);
*/
void dma_resv_reset_shared_max(struct dma_resv *obj)
{
/* Test shared fence slot reservation */
if (rcu_access_pointer(obj->fence)) {
struct dma_resv_list *fence = dma_resv_get_list(obj);
struct dma_resv_list *fences = dma_resv_shared_list(obj);
fence->shared_max = fence->shared_count;
}
dma_resv_assert_held(obj);
/* Test shared fence slot reservation */
if (fences)
fences->shared_max = fences->shared_count;
}
EXPORT_SYMBOL(dma_resv_reset_shared_max);
#endif
......@@ -247,7 +247,7 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
dma_resv_assert_held(obj);
fobj = dma_resv_get_list(obj);
fobj = dma_resv_shared_list(obj);
count = fobj->shared_count;
write_seqcount_begin(&obj->seq);
......@@ -290,7 +290,7 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
dma_resv_assert_held(obj);
old = dma_resv_get_list(obj);
old = dma_resv_shared_list(obj);
if (old)
i = old->shared_count;
......@@ -329,7 +329,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
dma_resv_assert_held(dst);
rcu_read_lock();
src_list = rcu_dereference(src->fence);
src_list = dma_resv_shared_list(src);
retry:
if (src_list) {
......@@ -342,7 +342,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
return -ENOMEM;
rcu_read_lock();
src_list = rcu_dereference(src->fence);
src_list = dma_resv_shared_list(src);
if (!src_list || src_list->shared_count > shared_count) {
kfree(dst_list);
goto retry;
......@@ -360,7 +360,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
if (!dma_fence_get_rcu(fence)) {
dma_resv_list_free(dst_list);
src_list = rcu_dereference(src->fence);
src_list = dma_resv_shared_list(src);
goto retry;
}
......@@ -379,7 +379,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
new = dma_fence_get_rcu_safe(&src->fence_excl);
rcu_read_unlock();
src_list = dma_resv_get_list(dst);
src_list = dma_resv_shared_list(dst);
old = dma_resv_excl_fence(dst);
write_seqcount_begin(&dst->seq);
......@@ -432,7 +432,7 @@ int dma_resv_get_fences_rcu(struct dma_resv *obj,
if (fence_excl && !dma_fence_get_rcu(fence_excl))
goto unlock;
fobj = rcu_dereference(obj->fence);
fobj = dma_resv_shared_list(obj);
if (fobj)
sz += sizeof(*shared) * fobj->shared_max;
......@@ -538,7 +538,7 @@ long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
}
if (wait_all) {
struct dma_resv_list *fobj = rcu_dereference(obj->fence);
struct dma_resv_list *fobj = dma_resv_shared_list(obj);
if (fobj)
shared_count = fobj->shared_count;
......@@ -623,7 +623,7 @@ bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
seq = read_seqcount_begin(&obj->seq);
if (test_all) {
struct dma_resv_list *fobj = rcu_dereference(obj->fence);
struct dma_resv_list *fobj = dma_resv_shared_list(obj);
unsigned int i;
if (fobj)
......
......@@ -247,7 +247,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
if (!ef)
return -EINVAL;
old = dma_resv_get_list(resv);
old = dma_resv_shared_list(resv);
if (!old)
return 0;
......
......@@ -49,7 +49,7 @@ __dma_resv_make_exclusive(struct dma_resv *obj)
unsigned int count;
int r;
if (!dma_resv_get_list(obj)) /* no shared fences to convert */
if (!dma_resv_shared_list(obj)) /* no shared fences to convert */
return 0;
r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
......
......@@ -213,7 +213,7 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
f = dma_resv_excl_fence(resv);
r = amdgpu_sync_fence(sync, f);
flist = dma_resv_get_list(resv);
flist = dma_resv_shared_list(resv);
if (!flist || r)
return r;
......
......@@ -1339,7 +1339,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
* If true, then return false as any KFD process needs all its BOs to
* be resident to run successfully
*/
flist = dma_resv_get_list(bo->base.resv);
flist = dma_resv_shared_list(bo->base.resv);
if (flist) {
for (i = 0; i < flist->shared_count; ++i) {
f = rcu_dereference_protected(flist->shared[i],
......
......@@ -461,7 +461,7 @@ static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
off, etnaviv_obj->vaddr, obj->size);
rcu_read_lock();
fobj = rcu_dereference(robj->fence);
fobj = dma_resv_shared_list(robj);
if (fobj) {
unsigned int i, shared_count = fobj->shared_count;
......
......@@ -116,7 +116,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
args->busy = busy_check_writer(dma_resv_excl_fence(obj->base.resv));
/* Translate shared fences to READ set of engines */
list = rcu_dereference(obj->base.resv->fence);
list = dma_resv_shared_list(obj->base.resv);
if (list) {
unsigned int shared_count = list->shared_count, i;
......
......@@ -817,7 +817,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
struct dma_fence *fence;
int i, ret;
fobj = dma_resv_get_list(obj->resv);
fobj = dma_resv_shared_list(obj->resv);
if (!fobj || (fobj->shared_count == 0)) {
fence = dma_resv_excl_fence(obj->resv);
/* don't need to wait on our own fences, since ring is fifo */
......@@ -1025,7 +1025,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m,
}
rcu_read_lock();
fobj = rcu_dereference(robj->fence);
fobj = dma_resv_shared_list(robj);
if (fobj) {
unsigned int i, shared_count = fobj->shared_count;
......
......@@ -355,7 +355,7 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
return ret;
}
fobj = dma_resv_get_list(resv);
fobj = dma_resv_shared_list(resv);
fence = dma_resv_excl_fence(resv);
if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
......
......@@ -61,7 +61,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
int rel;
rcu_read_lock();
fobj = rcu_dereference(bo->tbo.base.resv->fence);
fobj = dma_resv_shared_list(bo->tbo.base.resv);
rel = fobj ? fobj->shared_count : 0;
rcu_read_unlock();
......
......@@ -105,7 +105,7 @@ int radeon_sync_resv(struct radeon_device *rdev,
else if (f)
r = dma_fence_wait(f, true);
flist = dma_resv_get_list(resv);
flist = dma_resv_shared_list(resv);
if (shared || !flist || r)
return r;
......
......@@ -261,7 +261,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
int i;
rcu_read_lock();
fobj = rcu_dereference(resv->fence);
fobj = dma_resv_shared_list(resv);
fence = dma_resv_excl_fence(resv);
if (fence && !fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
......
......@@ -78,20 +78,6 @@ struct dma_resv {
#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
/**
* dma_resv_get_list - get the reservation object's
* shared fence list, with update-side lock held
* @obj: the reservation object
*
* Returns the shared fence list. Does NOT take references to
* the fence. The obj->lock must be held.
*/
static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
{
return rcu_dereference_protected(obj->fence,
dma_resv_held(obj));
}
#ifdef CONFIG_DEBUG_MUTEXES
void dma_resv_reset_shared_max(struct dma_resv *obj);
#else
......@@ -268,6 +254,19 @@ dma_resv_get_excl_rcu(struct dma_resv *obj)
return fence;
}
/**
* dma_resv_shared_list - get the reservation object's shared fence list
* @obj: the reservation object
*
* Returns the shared fence list. Caller must either hold the objects
* through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
* or one of the variants of each
*/
static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
{
return rcu_dereference_check(obj->fence, dma_resv_held(obj));
}
void dma_resv_init(struct dma_resv *obj);
void dma_resv_fini(struct dma_resv *obj);
int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment