Commit a2837733 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2023-01-12' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes

Several fixes for amdgpu (all addressing issues with fences), yet
another orientation quirk for a Lenovo device, a use-after-free fix for
virtio, a regression fix in TTM and a performance regression in drm
buddy.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20230112130954.pxt77g3a7rokha42@houat
parents b7bfaa76 5640e816
...@@ -61,6 +61,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, ...@@ -61,6 +61,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
amdgpu_ctx_put(p->ctx); amdgpu_ctx_put(p->ctx);
return -ECANCELED; return -ECANCELED;
} }
amdgpu_sync_create(&p->sync);
return 0; return 0;
} }
...@@ -452,18 +454,6 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p, ...@@ -452,18 +454,6 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
} }
r = amdgpu_sync_fence(&p->sync, fence); r = amdgpu_sync_fence(&p->sync, fence);
if (r)
goto error;
/*
* When we have an explicit dependency it might be necessary to insert a
* pipeline sync to make sure that all caches etc are flushed and the
* next job actually sees the results from the previous one.
*/
if (fence->context == p->gang_leader->base.entity->fence_context)
r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
error:
dma_fence_put(fence); dma_fence_put(fence);
return r; return r;
} }
...@@ -1188,10 +1178,19 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) ...@@ -1188,10 +1178,19 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
{ {
struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct drm_gpu_scheduler *sched;
struct amdgpu_bo_list_entry *e; struct amdgpu_bo_list_entry *e;
struct dma_fence *fence;
unsigned int i; unsigned int i;
int r; int r;
r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
return r;
}
list_for_each_entry(e, &p->validated, tv.head) { list_for_each_entry(e, &p->validated, tv.head) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
struct dma_resv *resv = bo->tbo.base.resv; struct dma_resv *resv = bo->tbo.base.resv;
...@@ -1211,10 +1210,24 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) ...@@ -1211,10 +1210,24 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
return r; return r;
} }
r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]); sched = p->gang_leader->base.entity->rq->sched;
if (r && r != -ERESTARTSYS) while ((fence = amdgpu_sync_get_fence(&p->sync))) {
DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n"); struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
return r;
/*
* When we have an dependency it might be necessary to insert a
* pipeline sync to make sure that all caches etc are flushed and the
* next job actually sees the results from the previous one
* before we start executing on the same scheduler ring.
*/
if (!s_fence || s_fence->sched != sched)
continue;
r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
if (r)
return r;
}
return 0;
} }
static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
...@@ -1254,9 +1267,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -1254,9 +1267,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
continue; continue;
fence = &p->jobs[i]->base.s_fence->scheduled; fence = &p->jobs[i]->base.s_fence->scheduled;
dma_fence_get(fence);
r = drm_sched_job_add_dependency(&leader->base, fence); r = drm_sched_job_add_dependency(&leader->base, fence);
if (r) if (r) {
dma_fence_put(fence);
goto error_cleanup; goto error_cleanup;
}
} }
if (p->gang_size > 1) { if (p->gang_size > 1) {
...@@ -1344,6 +1360,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser) ...@@ -1344,6 +1360,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
{ {
unsigned i; unsigned i;
amdgpu_sync_free(&parser->sync);
for (i = 0; i < parser->num_post_deps; i++) { for (i = 0; i < parser->num_post_deps; i++) {
drm_syncobj_put(parser->post_deps[i].syncobj); drm_syncobj_put(parser->post_deps[i].syncobj);
kfree(parser->post_deps[i].chain); kfree(parser->post_deps[i].chain);
......
...@@ -391,8 +391,10 @@ int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job) ...@@ -391,8 +391,10 @@ int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job)
dma_fence_get(f); dma_fence_get(f);
r = drm_sched_job_add_dependency(&job->base, f); r = drm_sched_job_add_dependency(&job->base, f);
if (r) if (r) {
dma_fence_put(f);
return r; return r;
}
} }
return 0; return 0;
} }
......
...@@ -38,6 +38,25 @@ static void drm_block_free(struct drm_buddy *mm, ...@@ -38,6 +38,25 @@ static void drm_block_free(struct drm_buddy *mm,
kmem_cache_free(slab_blocks, block); kmem_cache_free(slab_blocks, block);
} }
static void list_insert_sorted(struct drm_buddy *mm,
struct drm_buddy_block *block)
{
struct drm_buddy_block *node;
struct list_head *head;
head = &mm->free_list[drm_buddy_block_order(block)];
if (list_empty(head)) {
list_add(&block->link, head);
return;
}
list_for_each_entry(node, head, link)
if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
break;
__list_add(&block->link, node->link.prev, &node->link);
}
static void mark_allocated(struct drm_buddy_block *block) static void mark_allocated(struct drm_buddy_block *block)
{ {
block->header &= ~DRM_BUDDY_HEADER_STATE; block->header &= ~DRM_BUDDY_HEADER_STATE;
...@@ -52,8 +71,7 @@ static void mark_free(struct drm_buddy *mm, ...@@ -52,8 +71,7 @@ static void mark_free(struct drm_buddy *mm,
block->header &= ~DRM_BUDDY_HEADER_STATE; block->header &= ~DRM_BUDDY_HEADER_STATE;
block->header |= DRM_BUDDY_FREE; block->header |= DRM_BUDDY_FREE;
list_add(&block->link, list_insert_sorted(mm, block);
&mm->free_list[drm_buddy_block_order(block)]);
} }
static void mark_split(struct drm_buddy_block *block) static void mark_split(struct drm_buddy_block *block)
...@@ -387,20 +405,26 @@ alloc_range_bias(struct drm_buddy *mm, ...@@ -387,20 +405,26 @@ alloc_range_bias(struct drm_buddy *mm,
} }
static struct drm_buddy_block * static struct drm_buddy_block *
get_maxblock(struct list_head *head) get_maxblock(struct drm_buddy *mm, unsigned int order)
{ {
struct drm_buddy_block *max_block = NULL, *node; struct drm_buddy_block *max_block = NULL, *node;
unsigned int i;
max_block = list_first_entry_or_null(head, for (i = order; i <= mm->max_order; ++i) {
struct drm_buddy_block, if (!list_empty(&mm->free_list[i])) {
link); node = list_last_entry(&mm->free_list[i],
if (!max_block) struct drm_buddy_block,
return NULL; link);
if (!max_block) {
max_block = node;
continue;
}
list_for_each_entry(node, head, link) { if (drm_buddy_block_offset(node) >
if (drm_buddy_block_offset(node) > drm_buddy_block_offset(max_block)) {
drm_buddy_block_offset(max_block)) max_block = node;
max_block = node; }
}
} }
return max_block; return max_block;
...@@ -412,20 +436,23 @@ alloc_from_freelist(struct drm_buddy *mm, ...@@ -412,20 +436,23 @@ alloc_from_freelist(struct drm_buddy *mm,
unsigned long flags) unsigned long flags)
{ {
struct drm_buddy_block *block = NULL; struct drm_buddy_block *block = NULL;
unsigned int i; unsigned int tmp;
int err; int err;
for (i = order; i <= mm->max_order; ++i) { if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) { block = get_maxblock(mm, order);
block = get_maxblock(&mm->free_list[i]); if (block)
if (block) /* Store the obtained block order */
break; tmp = drm_buddy_block_order(block);
} else { } else {
block = list_first_entry_or_null(&mm->free_list[i], for (tmp = order; tmp <= mm->max_order; ++tmp) {
struct drm_buddy_block, if (!list_empty(&mm->free_list[tmp])) {
link); block = list_last_entry(&mm->free_list[tmp],
if (block) struct drm_buddy_block,
break; link);
if (block)
break;
}
} }
} }
...@@ -434,18 +461,18 @@ alloc_from_freelist(struct drm_buddy *mm, ...@@ -434,18 +461,18 @@ alloc_from_freelist(struct drm_buddy *mm,
BUG_ON(!drm_buddy_block_is_free(block)); BUG_ON(!drm_buddy_block_is_free(block));
while (i != order) { while (tmp != order) {
err = split_block(mm, block); err = split_block(mm, block);
if (unlikely(err)) if (unlikely(err))
goto err_undo; goto err_undo;
block = block->right; block = block->right;
i--; tmp--;
} }
return block; return block;
err_undo: err_undo:
if (i != order) if (tmp != order)
__drm_buddy_free(mm, block); __drm_buddy_free(mm, block);
return ERR_PTR(err); return ERR_PTR(err);
} }
......
...@@ -304,6 +304,12 @@ static const struct dmi_system_id orientation_data[] = { ...@@ -304,6 +304,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"), DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
}, },
.driver_data = (void *)&lcd1200x1920_rightside_up, .driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Ideapad D330-10IGL (HD) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Lenovo Yoga Book X90F / X91F / X91L */ }, { /* Lenovo Yoga Book X90F / X91F / X91L */
.matches = { .matches = {
/* Non exact match to match all versions */ /* Non exact match to match all versions */
......
This diff is collapsed.
...@@ -173,7 +173,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, ...@@ -173,7 +173,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm)); clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
ttm_move_memcpy(clear, ttm->num_pages, dst_iter, src_iter); ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter);
if (!src_iter->ops->maps_tt) if (!src_iter->ops->maps_tt)
ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem); ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
......
...@@ -358,10 +358,18 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, ...@@ -358,10 +358,18 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
drm_gem_object_release(obj); drm_gem_object_release(obj);
return ret; return ret;
} }
drm_gem_object_put(obj);
rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */ rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
rc->bo_handle = handle; rc->bo_handle = handle;
/*
* The handle owns the reference now. But we must drop our
* remaining reference *after* we no longer need to dereference
* the obj. Otherwise userspace could guess the handle and
* race closing it from another thread.
*/
drm_gem_object_put(obj);
return 0; return 0;
} }
...@@ -723,11 +731,18 @@ static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev, ...@@ -723,11 +731,18 @@ static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
drm_gem_object_release(obj); drm_gem_object_release(obj);
return ret; return ret;
} }
drm_gem_object_put(obj);
rc_blob->res_handle = bo->hw_res_handle; rc_blob->res_handle = bo->hw_res_handle;
rc_blob->bo_handle = handle; rc_blob->bo_handle = handle;
/*
* The handle owns the reference now. But we must drop our
* remaining reference *after* we no longer need to dereference
* the obj. Otherwise userspace could guess the handle and
* race closing it from another thread.
*/
drm_gem_object_put(obj);
return 0; return 0;
} }
......
...@@ -254,40 +254,6 @@ void ttm_base_object_unref(struct ttm_base_object **p_base) ...@@ -254,40 +254,6 @@ void ttm_base_object_unref(struct ttm_base_object **p_base)
kref_put(&base->refcount, ttm_release_base); kref_put(&base->refcount, ttm_release_base);
} }
/**
* ttm_base_object_noref_lookup - look up a base object without reference
* @tfile: The struct ttm_object_file the object is registered with.
* @key: The object handle.
*
* This function looks up a ttm base object and returns a pointer to it
* without refcounting the pointer. The returned pointer is only valid
* until ttm_base_object_noref_release() is called, and the object
* pointed to by the returned pointer may be doomed. Any persistent usage
* of the object requires a refcount to be taken using kref_get_unless_zero().
* Iff this function returns successfully it needs to be paired with
* ttm_base_object_noref_release() and no sleeping- or scheduling functions
* may be called inbetween these function callse.
*
* Return: A pointer to the object if successful or NULL otherwise.
*/
struct ttm_base_object *
ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint64_t key)
{
struct vmwgfx_hash_item *hash;
int ret;
rcu_read_lock();
ret = ttm_tfile_find_ref_rcu(tfile, key, &hash);
if (ret) {
rcu_read_unlock();
return NULL;
}
__release(RCU);
return hlist_entry(hash, struct ttm_ref_object, hash)->obj;
}
EXPORT_SYMBOL(ttm_base_object_noref_lookup);
struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
uint64_t key) uint64_t key)
{ {
...@@ -295,15 +261,16 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile, ...@@ -295,15 +261,16 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
struct vmwgfx_hash_item *hash; struct vmwgfx_hash_item *hash;
int ret; int ret;
rcu_read_lock(); spin_lock(&tfile->lock);
ret = ttm_tfile_find_ref_rcu(tfile, key, &hash); ret = ttm_tfile_find_ref(tfile, key, &hash);
if (likely(ret == 0)) { if (likely(ret == 0)) {
base = hlist_entry(hash, struct ttm_ref_object, hash)->obj; base = hlist_entry(hash, struct ttm_ref_object, hash)->obj;
if (!kref_get_unless_zero(&base->refcount)) if (!kref_get_unless_zero(&base->refcount))
base = NULL; base = NULL;
} }
rcu_read_unlock(); spin_unlock(&tfile->lock);
return base; return base;
} }
......
...@@ -307,18 +307,4 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile, ...@@ -307,18 +307,4 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
#define ttm_prime_object_kfree(__obj, __prime) \ #define ttm_prime_object_kfree(__obj, __prime) \
kfree_rcu(__obj, __prime.base.rhead) kfree_rcu(__obj, __prime.base.rhead)
struct ttm_base_object *
ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint64_t key);
/**
* ttm_base_object_noref_release - release a base object pointer looked up
* without reference
*
* Releases a base object pointer looked up with ttm_base_object_noref_lookup().
*/
static inline void ttm_base_object_noref_release(void)
{
__acquire(RCU);
rcu_read_unlock();
}
#endif #endif
...@@ -715,44 +715,6 @@ int vmw_user_bo_lookup(struct drm_file *filp, ...@@ -715,44 +715,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
return 0; return 0;
} }
/**
* vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
* @filp: The TTM object file the handle is registered with.
* @handle: The user buffer object handle.
*
* This function looks up a struct vmw_bo and returns a pointer to the
* struct vmw_buffer_object it derives from without refcounting the pointer.
* The returned pointer is only valid until vmw_user_bo_noref_release() is
* called, and the object pointed to by the returned pointer may be doomed.
* Any persistent usage of the object requires a refcount to be taken using
* ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
* needs to be paired with vmw_user_bo_noref_release() and no sleeping-
* or scheduling functions may be called in between these function calls.
*
* Return: A struct vmw_buffer_object pointer if successful or negative
* error pointer on failure.
*/
struct vmw_buffer_object *
vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle)
{
struct vmw_buffer_object *vmw_bo;
struct ttm_buffer_object *bo;
struct drm_gem_object *gobj = drm_gem_object_lookup(filp, handle);
if (!gobj) {
DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return ERR_PTR(-ESRCH);
}
vmw_bo = gem_to_vmw_bo(gobj);
bo = ttm_bo_get_unless_zero(&vmw_bo->base);
vmw_bo = vmw_buffer_object(bo);
drm_gem_object_put(gobj);
return vmw_bo;
}
/** /**
* vmw_bo_fence_single - Utility function to fence a single TTM buffer * vmw_bo_fence_single - Utility function to fence a single TTM buffer
* object without unreserving it. * object without unreserving it.
......
...@@ -830,12 +830,7 @@ extern int vmw_user_resource_lookup_handle( ...@@ -830,12 +830,7 @@ extern int vmw_user_resource_lookup_handle(
uint32_t handle, uint32_t handle,
const struct vmw_user_resource_conv *converter, const struct vmw_user_resource_conv *converter,
struct vmw_resource **p_res); struct vmw_resource **p_res);
extern struct vmw_resource *
vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
const struct vmw_user_resource_conv *
converter);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
...@@ -874,15 +869,6 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res) ...@@ -874,15 +869,6 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
return !RB_EMPTY_NODE(&res->mob_node); return !RB_EMPTY_NODE(&res->mob_node);
} }
/**
* vmw_user_resource_noref_release - release a user resource pointer looked up
* without reference
*/
static inline void vmw_user_resource_noref_release(void)
{
ttm_base_object_noref_release();
}
/** /**
* Buffer object helper functions - vmwgfx_bo.c * Buffer object helper functions - vmwgfx_bo.c
*/ */
...@@ -934,8 +920,6 @@ extern void vmw_bo_unmap(struct vmw_buffer_object *vbo); ...@@ -934,8 +920,6 @@ extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
extern void vmw_bo_move_notify(struct ttm_buffer_object *bo, extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem); struct ttm_resource *mem);
extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo); extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
extern struct vmw_buffer_object *
vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle);
/** /**
* vmw_bo_adjust_prio - Adjust the buffer object eviction priority * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
......
This diff is collapsed.
...@@ -281,39 +281,6 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, ...@@ -281,39 +281,6 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
return ret; return ret;
} }
/**
* vmw_user_resource_noref_lookup_handle - lookup a struct resource from a
* TTM user-space handle and perform basic type checks
*
* @dev_priv: Pointer to a device private struct
* @tfile: Pointer to a struct ttm_object_file identifying the caller
* @handle: The TTM user-space handle
* @converter: Pointer to an object describing the resource type
*
* If the handle can't be found or is associated with an incorrect resource
* type, -EINVAL will be returned.
*/
struct vmw_resource *
vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
const struct vmw_user_resource_conv
*converter)
{
struct ttm_base_object *base;
base = ttm_base_object_noref_lookup(tfile, handle);
if (!base)
return ERR_PTR(-ESRCH);
if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
ttm_base_object_noref_release();
return ERR_PTR(-EINVAL);
}
return converter->base_obj_to_res(base);
}
/* /*
* Helper function that looks either a surface or bo. * Helper function that looks either a surface or bo.
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment