Commit b634acb2 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2024-10-10' of...

Merge tag 'drm-misc-fixes-2024-10-10' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-fixes

Short summary of fixes pull:

fbdev-dma:
- Only clean up deferred I/O if instanciated

nouveau:
- dmem: Fix privileged error in copy engine channel; Fix possible
data leak in migrate_to_ram()
- gsp: Fix coding style

sched:
- Avoid leaking lockdep map

v3d:
- Stop active perfmon before destroying it

vc4:
- Stop active perfmon before destroying it

xe:
- Drop GuC submit_wq pool
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20241010133708.GA461532@localhost.localdomain
parents fe4a435b fcddc71e
...@@ -50,6 +50,7 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info) ...@@ -50,6 +50,7 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
if (!fb_helper->dev) if (!fb_helper->dev)
return; return;
if (info->fbdefio)
fb_deferred_io_cleanup(info); fb_deferred_io_cleanup(info);
drm_fb_helper_fini(fb_helper); drm_fb_helper_fini(fb_helper);
......
...@@ -210,7 +210,7 @@ struct nvkm_gsp { ...@@ -210,7 +210,7 @@ struct nvkm_gsp {
} *rm; } *rm;
struct { struct {
struct mutex mutex;; struct mutex mutex;
struct idr idr; struct idr idr;
} client_id; } client_id;
......
...@@ -193,7 +193,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) ...@@ -193,7 +193,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
if (!spage || !(src & MIGRATE_PFN_MIGRATE)) if (!spage || !(src & MIGRATE_PFN_MIGRATE))
goto done; goto done;
dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address); dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma, vmf->address);
if (!dpage) if (!dpage)
goto done; goto done;
......
...@@ -331,7 +331,7 @@ nouveau_accel_ce_init(struct nouveau_drm *drm) ...@@ -331,7 +331,7 @@ nouveau_accel_ce_init(struct nouveau_drm *drm)
return; return;
} }
ret = nouveau_channel_new(&drm->client, false, runm, NvDmaFB, NvDmaTT, &drm->cechan); ret = nouveau_channel_new(&drm->client, true, runm, NvDmaFB, NvDmaTT, &drm->cechan);
if (ret) if (ret)
NV_ERROR(drm, "failed to create ce channel, %d\n", ret); NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
} }
......
...@@ -87,6 +87,12 @@ ...@@ -87,6 +87,12 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include "gpu_scheduler_trace.h" #include "gpu_scheduler_trace.h"
#ifdef CONFIG_LOCKDEP
static struct lockdep_map drm_sched_lockdep_map = {
.name = "drm_sched_lockdep_map"
};
#endif
#define to_drm_sched_job(sched_job) \ #define to_drm_sched_job(sched_job) \
container_of((sched_job), struct drm_sched_job, queue_node) container_of((sched_job), struct drm_sched_job, queue_node)
...@@ -1269,7 +1275,12 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, ...@@ -1269,7 +1275,12 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->submit_wq = submit_wq; sched->submit_wq = submit_wq;
sched->own_submit_wq = false; sched->own_submit_wq = false;
} else { } else {
#ifdef CONFIG_LOCKDEP
sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, 0,
&drm_sched_lockdep_map);
#else
sched->submit_wq = alloc_ordered_workqueue(name, 0); sched->submit_wq = alloc_ordered_workqueue(name, 0);
#endif
if (!sched->submit_wq) if (!sched->submit_wq)
return -ENOMEM; return -ENOMEM;
......
...@@ -306,6 +306,11 @@ void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv) ...@@ -306,6 +306,11 @@ void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv)
static int v3d_perfmon_idr_del(int id, void *elem, void *data) static int v3d_perfmon_idr_del(int id, void *elem, void *data)
{ {
struct v3d_perfmon *perfmon = elem; struct v3d_perfmon *perfmon = elem;
struct v3d_dev *v3d = (struct v3d_dev *)data;
/* If the active perfmon is being destroyed, stop it first */
if (perfmon == v3d->active_perfmon)
v3d_perfmon_stop(v3d, perfmon, false);
v3d_perfmon_put(perfmon); v3d_perfmon_put(perfmon);
...@@ -314,8 +319,10 @@ static int v3d_perfmon_idr_del(int id, void *elem, void *data) ...@@ -314,8 +319,10 @@ static int v3d_perfmon_idr_del(int id, void *elem, void *data)
void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv) void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv)
{ {
struct v3d_dev *v3d = v3d_priv->v3d;
mutex_lock(&v3d_priv->perfmon.lock); mutex_lock(&v3d_priv->perfmon.lock);
idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, NULL); idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, v3d);
idr_destroy(&v3d_priv->perfmon.idr); idr_destroy(&v3d_priv->perfmon.idr);
mutex_unlock(&v3d_priv->perfmon.lock); mutex_unlock(&v3d_priv->perfmon.lock);
mutex_destroy(&v3d_priv->perfmon.lock); mutex_destroy(&v3d_priv->perfmon.lock);
......
...@@ -116,6 +116,11 @@ void vc4_perfmon_open_file(struct vc4_file *vc4file) ...@@ -116,6 +116,11 @@ void vc4_perfmon_open_file(struct vc4_file *vc4file)
static int vc4_perfmon_idr_del(int id, void *elem, void *data) static int vc4_perfmon_idr_del(int id, void *elem, void *data)
{ {
struct vc4_perfmon *perfmon = elem; struct vc4_perfmon *perfmon = elem;
struct vc4_dev *vc4 = (struct vc4_dev *)data;
/* If the active perfmon is being destroyed, stop it first */
if (perfmon == vc4->active_perfmon)
vc4_perfmon_stop(vc4, perfmon, false);
vc4_perfmon_put(perfmon); vc4_perfmon_put(perfmon);
...@@ -130,7 +135,7 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file) ...@@ -130,7 +135,7 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file)
return; return;
mutex_lock(&vc4file->perfmon.lock); mutex_lock(&vc4file->perfmon.lock);
idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL); idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, vc4);
idr_destroy(&vc4file->perfmon.idr); idr_destroy(&vc4file->perfmon.idr);
mutex_unlock(&vc4file->perfmon.lock); mutex_unlock(&vc4file->perfmon.lock);
mutex_destroy(&vc4file->perfmon.lock); mutex_destroy(&vc4file->perfmon.lock);
......
...@@ -224,80 +224,11 @@ static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q) ...@@ -224,80 +224,11 @@ static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q)
EXEC_QUEUE_STATE_BANNED)); EXEC_QUEUE_STATE_BANNED));
} }
#ifdef CONFIG_PROVE_LOCKING
static int alloc_submit_wq(struct xe_guc *guc)
{
int i;
for (i = 0; i < NUM_SUBMIT_WQ; ++i) {
guc->submission_state.submit_wq_pool[i] =
alloc_ordered_workqueue("submit_wq", 0);
if (!guc->submission_state.submit_wq_pool[i])
goto err_free;
}
return 0;
err_free:
while (i)
destroy_workqueue(guc->submission_state.submit_wq_pool[--i]);
return -ENOMEM;
}
static void free_submit_wq(struct xe_guc *guc)
{
int i;
for (i = 0; i < NUM_SUBMIT_WQ; ++i)
destroy_workqueue(guc->submission_state.submit_wq_pool[i]);
}
static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
{
int idx = guc->submission_state.submit_wq_idx++ % NUM_SUBMIT_WQ;
return guc->submission_state.submit_wq_pool[idx];
}
#else
static int alloc_submit_wq(struct xe_guc *guc)
{
return 0;
}
static void free_submit_wq(struct xe_guc *guc)
{
}
static struct workqueue_struct *get_submit_wq(struct xe_guc *guc)
{
return NULL;
}
#endif
static void xe_guc_submit_fini(struct xe_guc *guc)
{
struct xe_device *xe = guc_to_xe(guc);
struct xe_gt *gt = guc_to_gt(guc);
int ret;
ret = wait_event_timeout(guc->submission_state.fini_wq,
xa_empty(&guc->submission_state.exec_queue_lookup),
HZ * 5);
drain_workqueue(xe->destroy_wq);
xe_gt_assert(gt, ret);
}
static void guc_submit_fini(struct drm_device *drm, void *arg) static void guc_submit_fini(struct drm_device *drm, void *arg)
{ {
struct xe_guc *guc = arg; struct xe_guc *guc = arg;
xe_guc_submit_fini(guc);
xa_destroy(&guc->submission_state.exec_queue_lookup); xa_destroy(&guc->submission_state.exec_queue_lookup);
free_submit_wq(guc);
} }
static void guc_submit_wedged_fini(void *arg) static void guc_submit_wedged_fini(void *arg)
...@@ -359,10 +290,6 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids) ...@@ -359,10 +290,6 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids)
if (err) if (err)
return err; return err;
err = alloc_submit_wq(guc);
if (err)
return err;
gt->exec_queue_ops = &guc_exec_queue_ops; gt->exec_queue_ops = &guc_exec_queue_ops;
xa_init(&guc->submission_state.exec_queue_lookup); xa_init(&guc->submission_state.exec_queue_lookup);
...@@ -1482,8 +1409,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q) ...@@ -1482,8 +1409,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT : timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT :
msecs_to_jiffies(q->sched_props.job_timeout_ms); msecs_to_jiffies(q->sched_props.job_timeout_ms);
err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops, err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops,
get_submit_wq(guc), NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64,
timeout, guc_to_gt(guc)->ordered_wq, NULL, timeout, guc_to_gt(guc)->ordered_wq, NULL,
q->name, gt_to_xe(q->gt)->drm.dev); q->name, gt_to_xe(q->gt)->drm.dev);
if (err) if (err)
......
...@@ -72,13 +72,6 @@ struct xe_guc { ...@@ -72,13 +72,6 @@ struct xe_guc {
atomic_t stopped; atomic_t stopped;
/** @submission_state.lock: protects submission state */ /** @submission_state.lock: protects submission state */
struct mutex lock; struct mutex lock;
#ifdef CONFIG_PROVE_LOCKING
#define NUM_SUBMIT_WQ 256
/** @submission_state.submit_wq_pool: submission ordered workqueues pool */
struct workqueue_struct *submit_wq_pool[NUM_SUBMIT_WQ];
/** @submission_state.submit_wq_idx: submission ordered workqueue index */
int submit_wq_idx;
#endif
/** @submission_state.enabled: submission is enabled */ /** @submission_state.enabled: submission is enabled */
bool enabled; bool enabled;
/** @submission_state.fini_wq: submit fini wait queue */ /** @submission_state.fini_wq: submit fini wait queue */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment