Commit 7c548869 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-xe-fixes-2024-02-22' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-fixes

UAPI Changes:
- Remove support for persistent exec_queues
- Drop a reduntant sysfs newline printout

Cross-subsystem Changes:

Core Changes:

Driver Changes:
- A three-patch fix for a VM_BIND rebind optimization path
- Fix a modpost warning on an xe KUNIT module
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Thomas Hellstrom <thomas.hellstrom@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ZdcsNrxdWMMM417v@fedora
parents bfc7746a 6650d23f
...@@ -21,4 +21,5 @@ kunit_test_suite(xe_mocs_test_suite); ...@@ -21,4 +21,5 @@ kunit_test_suite(xe_mocs_test_suite);
MODULE_AUTHOR("Intel Corporation"); MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("xe_mocs kunit test");
MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
...@@ -83,9 +83,6 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file) ...@@ -83,9 +83,6 @@ static int xe_file_open(struct drm_device *dev, struct drm_file *file)
return 0; return 0;
} }
static void device_kill_persistent_exec_queues(struct xe_device *xe,
struct xe_file *xef);
static void xe_file_close(struct drm_device *dev, struct drm_file *file) static void xe_file_close(struct drm_device *dev, struct drm_file *file)
{ {
struct xe_device *xe = to_xe_device(dev); struct xe_device *xe = to_xe_device(dev);
...@@ -102,8 +99,6 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file) ...@@ -102,8 +99,6 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
mutex_unlock(&xef->exec_queue.lock); mutex_unlock(&xef->exec_queue.lock);
xa_destroy(&xef->exec_queue.xa); xa_destroy(&xef->exec_queue.xa);
mutex_destroy(&xef->exec_queue.lock); mutex_destroy(&xef->exec_queue.lock);
device_kill_persistent_exec_queues(xe, xef);
mutex_lock(&xef->vm.lock); mutex_lock(&xef->vm.lock);
xa_for_each(&xef->vm.xa, idx, vm) xa_for_each(&xef->vm.xa, idx, vm)
xe_vm_close_and_put(vm); xe_vm_close_and_put(vm);
...@@ -255,9 +250,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev, ...@@ -255,9 +250,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
xa_erase(&xe->usm.asid_to_vm, asid); xa_erase(&xe->usm.asid_to_vm, asid);
} }
drmm_mutex_init(&xe->drm, &xe->persistent_engines.lock);
INIT_LIST_HEAD(&xe->persistent_engines.list);
spin_lock_init(&xe->pinned.lock); spin_lock_init(&xe->pinned.lock);
INIT_LIST_HEAD(&xe->pinned.kernel_bo_present); INIT_LIST_HEAD(&xe->pinned.kernel_bo_present);
INIT_LIST_HEAD(&xe->pinned.external_vram); INIT_LIST_HEAD(&xe->pinned.external_vram);
...@@ -570,37 +562,6 @@ void xe_device_shutdown(struct xe_device *xe) ...@@ -570,37 +562,6 @@ void xe_device_shutdown(struct xe_device *xe)
{ {
} }
void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q)
{
mutex_lock(&xe->persistent_engines.lock);
list_add_tail(&q->persistent.link, &xe->persistent_engines.list);
mutex_unlock(&xe->persistent_engines.lock);
}
void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
struct xe_exec_queue *q)
{
mutex_lock(&xe->persistent_engines.lock);
if (!list_empty(&q->persistent.link))
list_del(&q->persistent.link);
mutex_unlock(&xe->persistent_engines.lock);
}
static void device_kill_persistent_exec_queues(struct xe_device *xe,
struct xe_file *xef)
{
struct xe_exec_queue *q, *next;
mutex_lock(&xe->persistent_engines.lock);
list_for_each_entry_safe(q, next, &xe->persistent_engines.list,
persistent.link)
if (q->persistent.xef == xef) {
xe_exec_queue_kill(q);
list_del_init(&q->persistent.link);
}
mutex_unlock(&xe->persistent_engines.lock);
}
void xe_device_wmb(struct xe_device *xe) void xe_device_wmb(struct xe_device *xe)
{ {
struct xe_gt *gt = xe_root_mmio_gt(xe); struct xe_gt *gt = xe_root_mmio_gt(xe);
......
...@@ -42,10 +42,6 @@ int xe_device_probe(struct xe_device *xe); ...@@ -42,10 +42,6 @@ int xe_device_probe(struct xe_device *xe);
void xe_device_remove(struct xe_device *xe); void xe_device_remove(struct xe_device *xe);
void xe_device_shutdown(struct xe_device *xe); void xe_device_shutdown(struct xe_device *xe);
void xe_device_add_persistent_exec_queues(struct xe_device *xe, struct xe_exec_queue *q);
void xe_device_remove_persistent_exec_queues(struct xe_device *xe,
struct xe_exec_queue *q);
void xe_device_wmb(struct xe_device *xe); void xe_device_wmb(struct xe_device *xe);
static inline struct xe_file *to_xe_file(const struct drm_file *file) static inline struct xe_file *to_xe_file(const struct drm_file *file)
......
...@@ -341,14 +341,6 @@ struct xe_device { ...@@ -341,14 +341,6 @@ struct xe_device {
struct mutex lock; struct mutex lock;
} usm; } usm;
/** @persistent_engines: engines that are closed but still running */
struct {
/** @lock: protects persistent engines */
struct mutex lock;
/** @list: list of persistent engines */
struct list_head list;
} persistent_engines;
/** @pinned: pinned BO state */ /** @pinned: pinned BO state */
struct { struct {
/** @lock: protected pinned BO list state */ /** @lock: protected pinned BO list state */
......
...@@ -60,7 +60,6 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe, ...@@ -60,7 +60,6 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
q->fence_irq = &gt->fence_irq[hwe->class]; q->fence_irq = &gt->fence_irq[hwe->class];
q->ring_ops = gt->ring_ops[hwe->class]; q->ring_ops = gt->ring_ops[hwe->class];
q->ops = gt->exec_queue_ops; q->ops = gt->exec_queue_ops;
INIT_LIST_HEAD(&q->persistent.link);
INIT_LIST_HEAD(&q->compute.link); INIT_LIST_HEAD(&q->compute.link);
INIT_LIST_HEAD(&q->multi_gt_link); INIT_LIST_HEAD(&q->multi_gt_link);
...@@ -326,23 +325,6 @@ static int exec_queue_set_preemption_timeout(struct xe_device *xe, ...@@ -326,23 +325,6 @@ static int exec_queue_set_preemption_timeout(struct xe_device *xe,
return q->ops->set_preempt_timeout(q, value); return q->ops->set_preempt_timeout(q, value);
} }
static int exec_queue_set_persistence(struct xe_device *xe, struct xe_exec_queue *q,
u64 value, bool create)
{
if (XE_IOCTL_DBG(xe, !create))
return -EINVAL;
if (XE_IOCTL_DBG(xe, xe_vm_in_preempt_fence_mode(q->vm)))
return -EINVAL;
if (value)
q->flags |= EXEC_QUEUE_FLAG_PERSISTENT;
else
q->flags &= ~EXEC_QUEUE_FLAG_PERSISTENT;
return 0;
}
static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue *q, static int exec_queue_set_job_timeout(struct xe_device *xe, struct xe_exec_queue *q,
u64 value, bool create) u64 value, bool create)
{ {
...@@ -414,7 +396,6 @@ static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = { ...@@ -414,7 +396,6 @@ static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs[] = {
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority, [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY] = exec_queue_set_priority,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice, [DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE] = exec_queue_set_timeslice,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout, [DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT] = exec_queue_set_preemption_timeout,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE] = exec_queue_set_persistence,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout, [DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT] = exec_queue_set_job_timeout,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger, [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER] = exec_queue_set_acc_trigger,
[DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify, [DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY] = exec_queue_set_acc_notify,
...@@ -441,6 +422,9 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe, ...@@ -441,6 +422,9 @@ static int exec_queue_user_ext_set_property(struct xe_device *xe,
return -EINVAL; return -EINVAL;
idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs)); idx = array_index_nospec(ext.property, ARRAY_SIZE(exec_queue_set_property_funcs));
if (!exec_queue_set_property_funcs[idx])
return -EINVAL;
return exec_queue_set_property_funcs[idx](xe, q, ext.value, create); return exec_queue_set_property_funcs[idx](xe, q, ext.value, create);
} }
...@@ -704,9 +688,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, ...@@ -704,9 +688,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
} }
q = xe_exec_queue_create(xe, vm, logical_mask, q = xe_exec_queue_create(xe, vm, logical_mask,
args->width, hwe, args->width, hwe, 0);
xe_vm_in_lr_mode(vm) ? 0 :
EXEC_QUEUE_FLAG_PERSISTENT);
up_read(&vm->lock); up_read(&vm->lock);
xe_vm_put(vm); xe_vm_put(vm);
if (IS_ERR(q)) if (IS_ERR(q))
...@@ -728,8 +710,6 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, ...@@ -728,8 +710,6 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
goto kill_exec_queue; goto kill_exec_queue;
} }
q->persistent.xef = xef;
mutex_lock(&xef->exec_queue.lock); mutex_lock(&xef->exec_queue.lock);
err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL); err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
mutex_unlock(&xef->exec_queue.lock); mutex_unlock(&xef->exec_queue.lock);
...@@ -872,10 +852,7 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data, ...@@ -872,10 +852,7 @@ int xe_exec_queue_destroy_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_DBG(xe, !q)) if (XE_IOCTL_DBG(xe, !q))
return -ENOENT; return -ENOENT;
if (!(q->flags & EXEC_QUEUE_FLAG_PERSISTENT)) xe_exec_queue_kill(q);
xe_exec_queue_kill(q);
else
xe_device_add_persistent_exec_queues(xe, q);
trace_xe_exec_queue_close(q); trace_xe_exec_queue_close(q);
xe_exec_queue_put(q); xe_exec_queue_put(q);
......
...@@ -105,16 +105,6 @@ struct xe_exec_queue { ...@@ -105,16 +105,6 @@ struct xe_exec_queue {
struct xe_guc_exec_queue *guc; struct xe_guc_exec_queue *guc;
}; };
/**
* @persistent: persistent exec queue state
*/
struct {
/** @xef: file which this exec queue belongs to */
struct xe_file *xef;
/** @link: link in list of persistent exec queues */
struct list_head link;
} persistent;
union { union {
/** /**
* @parallel: parallel submission state * @parallel: parallel submission state
......
...@@ -378,8 +378,6 @@ static void execlist_exec_queue_fini_async(struct work_struct *w) ...@@ -378,8 +378,6 @@ static void execlist_exec_queue_fini_async(struct work_struct *w)
list_del(&exl->active_link); list_del(&exl->active_link);
spin_unlock_irqrestore(&exl->port->lock, flags); spin_unlock_irqrestore(&exl->port->lock, flags);
if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
xe_device_remove_persistent_exec_queues(xe, q);
drm_sched_entity_fini(&exl->entity); drm_sched_entity_fini(&exl->entity);
drm_sched_fini(&exl->sched); drm_sched_fini(&exl->sched);
kfree(exl); kfree(exl);
......
...@@ -145,10 +145,10 @@ void xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle) ...@@ -145,10 +145,10 @@ void xe_gt_idle_sysfs_init(struct xe_gt_idle *gtidle)
} }
if (xe_gt_is_media_type(gt)) { if (xe_gt_is_media_type(gt)) {
sprintf(gtidle->name, "gt%d-mc\n", gt->info.id); sprintf(gtidle->name, "gt%d-mc", gt->info.id);
gtidle->idle_residency = xe_guc_pc_mc6_residency; gtidle->idle_residency = xe_guc_pc_mc6_residency;
} else { } else {
sprintf(gtidle->name, "gt%d-rc\n", gt->info.id); sprintf(gtidle->name, "gt%d-rc", gt->info.id);
gtidle->idle_residency = xe_guc_pc_rc6_residency; gtidle->idle_residency = xe_guc_pc_rc6_residency;
} }
......
...@@ -1028,8 +1028,6 @@ static void __guc_exec_queue_fini_async(struct work_struct *w) ...@@ -1028,8 +1028,6 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
if (xe_exec_queue_is_lr(q)) if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr); cancel_work_sync(&ge->lr_tdr);
if (q->flags & EXEC_QUEUE_FLAG_PERSISTENT)
xe_device_remove_persistent_exec_queues(gt_to_xe(q->gt), q);
release_guc_id(guc, q); release_guc_id(guc, q);
xe_sched_entity_fini(&ge->entity); xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched); xe_sched_fini(&ge->sched);
......
...@@ -499,10 +499,12 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset, ...@@ -499,10 +499,12 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
* this device *requires* 64K PTE size for VRAM, fail. * this device *requires* 64K PTE size for VRAM, fail.
*/ */
if (level == 0 && !xe_parent->is_compact) { if (level == 0 && !xe_parent->is_compact) {
if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) {
xe_walk->vma->gpuva.flags |= XE_VMA_PTE_64K;
pte |= XE_PTE_PS64; pte |= XE_PTE_PS64;
else if (XE_WARN_ON(xe_walk->needs_64K)) } else if (XE_WARN_ON(xe_walk->needs_64K)) {
return -EINVAL; return -EINVAL;
}
} }
ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, NULL, pte); ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, NULL, pte);
...@@ -545,13 +547,16 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset, ...@@ -545,13 +547,16 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
*child = &xe_child->base; *child = &xe_child->base;
/* /*
* Prefer the compact pagetable layout for L0 if possible. * Prefer the compact pagetable layout for L0 if possible. Only
* possible if VMA covers entire 2MB region as compact 64k and
* 4k pages cannot be mixed within a 2MB region.
* TODO: Suballocate the pt bo to avoid wasting a lot of * TODO: Suballocate the pt bo to avoid wasting a lot of
* memory. * memory.
*/ */
if (GRAPHICS_VERx100(tile_to_xe(xe_walk->tile)) >= 1250 && level == 1 && if (GRAPHICS_VERx100(tile_to_xe(xe_walk->tile)) >= 1250 && level == 1 &&
covers && xe_pt_scan_64K(addr, next, xe_walk)) { covers && xe_pt_scan_64K(addr, next, xe_walk)) {
walk->shifts = xe_compact_pt_shifts; walk->shifts = xe_compact_pt_shifts;
xe_walk->vma->gpuva.flags |= XE_VMA_PTE_COMPACT;
flags |= XE_PDE_64K; flags |= XE_PDE_64K;
xe_child->is_compact = true; xe_child->is_compact = true;
} }
......
...@@ -2190,15 +2190,17 @@ static u64 xe_vma_max_pte_size(struct xe_vma *vma) ...@@ -2190,15 +2190,17 @@ static u64 xe_vma_max_pte_size(struct xe_vma *vma)
{ {
if (vma->gpuva.flags & XE_VMA_PTE_1G) if (vma->gpuva.flags & XE_VMA_PTE_1G)
return SZ_1G; return SZ_1G;
else if (vma->gpuva.flags & XE_VMA_PTE_2M) else if (vma->gpuva.flags & (XE_VMA_PTE_2M | XE_VMA_PTE_COMPACT))
return SZ_2M; return SZ_2M;
else if (vma->gpuva.flags & XE_VMA_PTE_64K)
return SZ_64K;
else if (vma->gpuva.flags & XE_VMA_PTE_4K) else if (vma->gpuva.flags & XE_VMA_PTE_4K)
return SZ_4K; return SZ_4K;
return SZ_1G; /* Uninitialized, used max size */ return SZ_1G; /* Uninitialized, used max size */
} }
static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size) static void xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
{ {
switch (size) { switch (size) {
case SZ_1G: case SZ_1G:
...@@ -2207,9 +2209,13 @@ static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size) ...@@ -2207,9 +2209,13 @@ static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size)
case SZ_2M: case SZ_2M:
vma->gpuva.flags |= XE_VMA_PTE_2M; vma->gpuva.flags |= XE_VMA_PTE_2M;
break; break;
case SZ_64K:
vma->gpuva.flags |= XE_VMA_PTE_64K;
break;
case SZ_4K:
vma->gpuva.flags |= XE_VMA_PTE_4K;
break;
} }
return SZ_4K;
} }
static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op)
......
...@@ -29,6 +29,8 @@ struct xe_vm; ...@@ -29,6 +29,8 @@ struct xe_vm;
#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5) #define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5)
#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6) #define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6)
#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7) #define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 8)
#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 9)
/** struct xe_userptr - User pointer */ /** struct xe_userptr - User pointer */
struct xe_userptr { struct xe_userptr {
......
...@@ -1046,7 +1046,6 @@ struct drm_xe_exec_queue_create { ...@@ -1046,7 +1046,6 @@ struct drm_xe_exec_queue_create {
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6 #define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment