Commit 3c064aea authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-next-fixes-2024-01-04' of...

Merge tag 'drm-misc-next-fixes-2024-01-04' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

One fix for drm/plane to avoid a use-after-free and some additional
warnings to prevent more of these occurences, a lock inversion
dependency fix and an indentation fix for drm/rockchip, and some doc
warning fixes for imagination and gpuvm.
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Maxime Ripard <mripard@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/enhl33v2oeihktta2yfyc4exvezdvm3eexcuwxkethc5ommrjo@lkidkv2kwakq
parents cff601b4 eee70683
......@@ -461,6 +461,7 @@ int drm_mode_rmfb(struct drm_device *dev, u32 fb_id,
INIT_WORK_ONSTACK(&arg.work, drm_mode_rmfb_work_fn);
INIT_LIST_HEAD(&arg.fbs);
drm_WARN_ON(dev, !list_empty(&fb->filp_head));
list_add_tail(&fb->filp_head, &arg.fbs);
schedule_work(&arg.work);
......@@ -827,6 +828,8 @@ void drm_framebuffer_free(struct kref *kref)
container_of(kref, struct drm_framebuffer, base.refcount);
struct drm_device *dev = fb->dev;
drm_WARN_ON(dev, !list_empty(&fb->filp_head));
/*
* The lookup idr holds a weak reference, which has not necessarily been
* removed at this point. Check for that.
......@@ -1119,7 +1122,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
dev = fb->dev;
WARN_ON(!list_empty(&fb->filp_head));
drm_WARN_ON(dev, !list_empty(&fb->filp_head));
/*
* drm ABI mandates that we remove any deleted framebuffers from active
......
......@@ -1503,6 +1503,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
out:
if (fb)
drm_framebuffer_put(fb);
fb = NULL;
if (plane->old_fb)
drm_framebuffer_put(plane->old_fb);
plane->old_fb = NULL;
......
......@@ -193,13 +193,14 @@ struct pvr_device {
* @queues: Queue-related fields.
*/
struct {
/** @active: Active queue list. */
/** @queues.active: Active queue list. */
struct list_head active;
/** @idle: Idle queue list. */
/** @queues.idle: Idle queue list. */
struct list_head idle;
/** @lock: Lock protecting access to the active/idle lists. */
/** @queues.lock: Lock protecting access to the active/idle
* lists. */
struct mutex lock;
} queues;
......@@ -207,18 +208,18 @@ struct pvr_device {
* @watchdog: Watchdog for communications with firmware.
*/
struct {
/** @work: Work item for watchdog callback. */
/** @watchdog.work: Work item for watchdog callback. */
struct delayed_work work;
/**
* @old_kccb_cmds_executed: KCCB command execution count at last
* watchdog poll.
* @watchdog.old_kccb_cmds_executed: KCCB command execution
* count at last watchdog poll.
*/
u32 old_kccb_cmds_executed;
/**
* @kccb_stall_count: Number of watchdog polls KCCB has been
* stalled for.
* @watchdog.kccb_stall_count: Number of watchdog polls
* KCCB has been stalled for.
*/
u32 kccb_stall_count;
} watchdog;
......@@ -227,43 +228,46 @@ struct pvr_device {
* @kccb: Circular buffer for communications with firmware.
*/
struct {
/** @ccb: Kernel CCB. */
/** @kccb.ccb: Kernel CCB. */
struct pvr_ccb ccb;
/** @rtn_q: Waitqueue for KCCB command return waiters. */
/** @kccb.rtn_q: Waitqueue for KCCB command return waiters. */
wait_queue_head_t rtn_q;
/** @rtn_obj: Object representing KCCB return slots. */
/** @kccb.rtn_obj: Object representing KCCB return slots. */
struct pvr_fw_object *rtn_obj;
/**
* @rtn: Pointer to CPU mapping of KCCB return slots. Must be
* accessed by READ_ONCE()/WRITE_ONCE().
* @kccb.rtn: Pointer to CPU mapping of KCCB return slots.
* Must be accessed by READ_ONCE()/WRITE_ONCE().
*/
u32 *rtn;
/** @slot_count: Total number of KCCB slots available. */
/** @kccb.slot_count: Total number of KCCB slots available. */
u32 slot_count;
/** @reserved_count: Number of KCCB slots reserved for future use. */
/** @kccb.reserved_count: Number of KCCB slots reserved for
* future use. */
u32 reserved_count;
/**
* @waiters: List of KCCB slot waiters.
* @kccb.waiters: List of KCCB slot waiters.
*/
struct list_head waiters;
/** @fence_ctx: KCCB fence context. */
/** @kccb.fence_ctx: KCCB fence context. */
struct {
/** @id: KCCB fence context ID allocated with dma_fence_context_alloc(). */
/** @kccb.fence_ctx.id: KCCB fence context ID
* allocated with dma_fence_context_alloc(). */
u64 id;
/** @seqno: Sequence number incremented each time a fence is created. */
/** @kccb.fence_ctx.seqno: Sequence number incremented
* each time a fence is created. */
atomic_t seqno;
/**
* @lock: Lock used to synchronize access to fences allocated by this
* context.
* @kccb.fence_ctx.lock: Lock used to synchronize
* access to fences allocated by this context.
*/
spinlock_t lock;
} fence_ctx;
......
......@@ -959,12 +959,6 @@ static void vop2_enable(struct vop2 *vop2)
return;
}
ret = regmap_reinit_cache(vop2->map, &vop2_regmap_config);
if (ret) {
drm_err(vop2->drm, "failed to reinit cache: %d\n", ret);
return;
}
if (vop2->data->soc_id == 3566)
vop2_writel(vop2, RK3568_OTP_WIN_EN, 1);
......@@ -996,6 +990,8 @@ static void vop2_disable(struct vop2 *vop2)
pm_runtime_put_sync(vop2->dev);
regcache_drop_region(vop2->map, 0, vop2_regmap_config.max_register);
clk_disable_unprepare(vop2->pclk);
clk_disable_unprepare(vop2->aclk);
clk_disable_unprepare(vop2->hclk);
......
......@@ -92,7 +92,7 @@ struct drm_gpuva {
*/
struct {
/**
* @addr: the start address
* @va.addr: the start address
*/
u64 addr;
......@@ -107,17 +107,17 @@ struct drm_gpuva {
*/
struct {
/**
* @offset: the offset within the &drm_gem_object
* @gem.offset: the offset within the &drm_gem_object
*/
u64 offset;
/**
* @obj: the mapped &drm_gem_object
* @gem.obj: the mapped &drm_gem_object
*/
struct drm_gem_object *obj;
/**
* @entry: the &list_head to attach this object to a &drm_gpuvm_bo
* @gem.entry: the &list_head to attach this object to a &drm_gpuvm_bo
*/
struct list_head entry;
} gem;
......@@ -127,12 +127,12 @@ struct drm_gpuva {
*/
struct {
/**
* @rb: the rb-tree node
* @rb.node: the rb-tree node
*/
struct rb_node node;
/**
* @entry: The &list_head to additionally connect &drm_gpuvas
* @rb.entry: The &list_head to additionally connect &drm_gpuvas
* in the same order they appear in the interval tree. This is
* useful to keep iterating &drm_gpuvas from a start node found
* through the rb-tree while doing modifications on the rb-tree
......@@ -141,7 +141,7 @@ struct drm_gpuva {
struct list_head entry;
/**
* @__subtree_last: needed by the interval tree, holding last-in-subtree
* @rb.__subtree_last: needed by the interval tree, holding last-in-subtree
*/
u64 __subtree_last;
} rb;
......@@ -187,6 +187,8 @@ static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate)
* drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva
* is invalidated
* @va: the &drm_gpuva to check
*
* Returns: %true if the GPU VA is invalidated, %false otherwise
*/
static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
{
......@@ -252,12 +254,12 @@ struct drm_gpuvm {
*/
struct {
/**
* @tree: the rb-tree to track GPU VA mappings
* @rb.tree: the rb-tree to track GPU VA mappings
*/
struct rb_root_cached tree;
/**
* @list: the &list_head to track GPU VA mappings
* @rb.list: the &list_head to track GPU VA mappings
*/
struct list_head list;
} rb;
......@@ -290,19 +292,19 @@ struct drm_gpuvm {
*/
struct {
/**
* @list: &list_head storing &drm_gpuvm_bos serving as
* @extobj.list: &list_head storing &drm_gpuvm_bos serving as
* external object
*/
struct list_head list;
/**
* @local_list: pointer to the local list temporarily storing
* entries from the external object list
* @extobj.local_list: pointer to the local list temporarily
* storing entries from the external object list
*/
struct list_head *local_list;
/**
* @lock: spinlock to protect the extobj list
* @extobj.lock: spinlock to protect the extobj list
*/
spinlock_t lock;
} extobj;
......@@ -312,19 +314,19 @@ struct drm_gpuvm {
*/
struct {
/**
* @list: &list_head storing &drm_gpuvm_bos currently being
* evicted
* @evict.list: &list_head storing &drm_gpuvm_bos currently
* being evicted
*/
struct list_head list;
/**
* @local_list: pointer to the local list temporarily storing
* entries from the evicted object list
* @evict.local_list: pointer to the local list temporarily
* storing entries from the evicted object list
*/
struct list_head *local_list;
/**
* @lock: spinlock to protect the evict list
* @evict.lock: spinlock to protect the evict list
*/
spinlock_t lock;
} evict;
......@@ -344,6 +346,8 @@ void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name,
*
* This function acquires an additional reference to @gpuvm. It is illegal to
* call this without already holding a reference. No locks required.
*
* Returns: the &struct drm_gpuvm pointer
*/
static inline struct drm_gpuvm *
drm_gpuvm_get(struct drm_gpuvm *gpuvm)
......@@ -533,12 +537,13 @@ struct drm_gpuvm_exec {
*/
struct {
/**
* @fn: The driver callback to lock additional &drm_gem_objects.
* @extra.fn: The driver callback to lock additional
* &drm_gem_objects.
*/
int (*fn)(struct drm_gpuvm_exec *vm_exec);
/**
* @priv: driver private data for the @fn callback
* @extra.priv: driver private data for the @fn callback
*/
void *priv;
} extra;
......@@ -589,7 +594,7 @@ void drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm,
enum dma_resv_usage extobj_usage);
/**
* drm_gpuvm_exec_resv_add_fence()
* drm_gpuvm_exec_resv_add_fence() - add fence to private and all extobj
* @vm_exec: the &drm_gpuvm_exec wrapper
* @fence: fence to add
* @private_usage: private dma-resv usage
......@@ -608,10 +613,12 @@ drm_gpuvm_exec_resv_add_fence(struct drm_gpuvm_exec *vm_exec,
}
/**
* drm_gpuvm_exec_validate()
* drm_gpuvm_exec_validate() - validate all BOs marked as evicted
* @vm_exec: the &drm_gpuvm_exec wrapper
*
* See drm_gpuvm_validate().
*
* Returns: 0 on success, negative error code on failure.
*/
static inline int
drm_gpuvm_exec_validate(struct drm_gpuvm_exec *vm_exec)
......@@ -664,7 +671,7 @@ struct drm_gpuvm_bo {
*/
struct {
/**
* @gpuva: The list of linked &drm_gpuvas.
* @list.gpuva: The list of linked &drm_gpuvas.
*
* It is safe to access entries from this list as long as the
* GEM's gpuva lock is held. See also struct drm_gem_object.
......@@ -672,25 +679,25 @@ struct drm_gpuvm_bo {
struct list_head gpuva;
/**
* @entry: Structure containing all &list_heads serving as
* @list.entry: Structure containing all &list_heads serving as
* entry.
*/
struct {
/**
* @gem: List entry to attach to the &drm_gem_objects
* gpuva list.
* @list.entry.gem: List entry to attach to the
* &drm_gem_objects gpuva list.
*/
struct list_head gem;
/**
* @evict: List entry to attach to the &drm_gpuvms
* extobj list.
* @list.entry.evict: List entry to attach to the
* &drm_gpuvms extobj list.
*/
struct list_head extobj;
/**
* @evict: List entry to attach to the &drm_gpuvms evict
* list.
* @list.entry.evict: List entry to attach to the
* &drm_gpuvms evict list.
*/
struct list_head evict;
} entry;
......@@ -713,6 +720,8 @@ drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo);
*
* This function acquires an additional reference to @vm_bo. It is illegal to
* call this without already holding a reference. No locks required.
*
* Returns: the &struct vm_bo pointer
*/
static inline struct drm_gpuvm_bo *
drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
......@@ -730,7 +739,8 @@ drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm,
void drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict);
/**
* drm_gpuvm_bo_gem_evict()
* drm_gpuvm_bo_gem_evict() - add/remove all &drm_gpuvm_bo's in the list
* to/from the &drm_gpuvms evicted list
* @obj: the &drm_gem_object
* @evict: indicates whether @obj is evicted
*
......@@ -817,12 +827,12 @@ struct drm_gpuva_op_map {
*/
struct {
/**
* @addr: the base address of the new mapping
* @va.addr: the base address of the new mapping
*/
u64 addr;
/**
* @range: the range of the new mapping
* @va.range: the range of the new mapping
*/
u64 range;
} va;
......@@ -832,12 +842,12 @@ struct drm_gpuva_op_map {
*/
struct {
/**
* @offset: the offset within the &drm_gem_object
* @gem.offset: the offset within the &drm_gem_object
*/
u64 offset;
/**
* @obj: the &drm_gem_object to map
* @gem.obj: the &drm_gem_object to map
*/
struct drm_gem_object *obj;
} gem;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment