Commit 731e46c0 authored by Francois Dugast's avatar Francois Dugast Committed by Rodrigo Vivi

drm/xe/exec_queue: Rename xe_exec_queue::compute to xe_exec_queue::lr

The properties of this struct are used in long running context so
make that clear by renaming it to lr, in alignment with the rest
of the code.

Cc: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarFrancois Dugast <francois.dugast@intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240613170348.723245-1-francois.dugast@intel.comSigned-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 5d7612ae
...@@ -67,7 +67,7 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe, ...@@ -67,7 +67,7 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
q->fence_irq = &gt->fence_irq[hwe->class]; q->fence_irq = &gt->fence_irq[hwe->class];
q->ring_ops = gt->ring_ops[hwe->class]; q->ring_ops = gt->ring_ops[hwe->class];
q->ops = gt->exec_queue_ops; q->ops = gt->exec_queue_ops;
INIT_LIST_HEAD(&q->compute.link); INIT_LIST_HEAD(&q->lr.link);
INIT_LIST_HEAD(&q->multi_gt_link); INIT_LIST_HEAD(&q->multi_gt_link);
q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us; q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us;
...@@ -633,8 +633,8 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, ...@@ -633,8 +633,8 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
return PTR_ERR(q); return PTR_ERR(q);
if (xe_vm_in_preempt_fence_mode(vm)) { if (xe_vm_in_preempt_fence_mode(vm)) {
q->compute.context = dma_fence_context_alloc(1); q->lr.context = dma_fence_context_alloc(1);
spin_lock_init(&q->compute.lock); spin_lock_init(&q->lr.lock);
err = xe_vm_add_compute_exec_queue(vm, q); err = xe_vm_add_compute_exec_queue(vm, q);
if (XE_IOCTL_DBG(xe, err)) if (XE_IOCTL_DBG(xe, err))
......
...@@ -113,19 +113,19 @@ struct xe_exec_queue { ...@@ -113,19 +113,19 @@ struct xe_exec_queue {
enum xe_exec_queue_priority priority; enum xe_exec_queue_priority priority;
} sched_props; } sched_props;
/** @compute: compute exec queue state */ /** @lr: long-running exec queue state */
struct { struct {
/** @compute.pfence: preemption fence */ /** @lr.pfence: preemption fence */
struct dma_fence *pfence; struct dma_fence *pfence;
/** @compute.context: preemption fence context */ /** @lr.context: preemption fence context */
u64 context; u64 context;
/** @compute.seqno: preemption fence seqno */ /** @lr.seqno: preemption fence seqno */
u32 seqno; u32 seqno;
/** @compute.link: link into VM's list of exec queues */ /** @lr.link: link into VM's list of exec queues */
struct list_head link; struct list_head link;
/** @compute.lock: preemption fences lock */ /** @lr.lock: preemption fences lock */
spinlock_t lock; spinlock_t lock;
} compute; } lr;
/** @ops: submission backend exec queue operations */ /** @ops: submission backend exec queue operations */
const struct xe_exec_queue_ops *ops; const struct xe_exec_queue_ops *ops;
......
...@@ -129,7 +129,7 @@ xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q, ...@@ -129,7 +129,7 @@ xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
list_del_init(&pfence->link); list_del_init(&pfence->link);
pfence->q = xe_exec_queue_get(q); pfence->q = xe_exec_queue_get(q);
dma_fence_init(&pfence->base, &preempt_fence_ops, dma_fence_init(&pfence->base, &preempt_fence_ops,
&q->compute.lock, context, seqno); &q->lr.lock, context, seqno);
return &pfence->base; return &pfence->base;
} }
......
...@@ -83,10 +83,10 @@ static bool preempt_fences_waiting(struct xe_vm *vm) ...@@ -83,10 +83,10 @@ static bool preempt_fences_waiting(struct xe_vm *vm)
lockdep_assert_held(&vm->lock); lockdep_assert_held(&vm->lock);
xe_vm_assert_held(vm); xe_vm_assert_held(vm);
list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) { list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
if (!q->compute.pfence || if (!q->lr.pfence ||
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&q->compute.pfence->flags)) { &q->lr.pfence->flags)) {
return true; return true;
} }
} }
...@@ -129,14 +129,14 @@ static int wait_for_existing_preempt_fences(struct xe_vm *vm) ...@@ -129,14 +129,14 @@ static int wait_for_existing_preempt_fences(struct xe_vm *vm)
xe_vm_assert_held(vm); xe_vm_assert_held(vm);
list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) { list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
if (q->compute.pfence) { if (q->lr.pfence) {
long timeout = dma_fence_wait(q->compute.pfence, false); long timeout = dma_fence_wait(q->lr.pfence, false);
if (timeout < 0) if (timeout < 0)
return -ETIME; return -ETIME;
dma_fence_put(q->compute.pfence); dma_fence_put(q->lr.pfence);
q->compute.pfence = NULL; q->lr.pfence = NULL;
} }
} }
...@@ -148,7 +148,7 @@ static bool xe_vm_is_idle(struct xe_vm *vm) ...@@ -148,7 +148,7 @@ static bool xe_vm_is_idle(struct xe_vm *vm)
struct xe_exec_queue *q; struct xe_exec_queue *q;
xe_vm_assert_held(vm); xe_vm_assert_held(vm);
list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) { list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
if (!xe_exec_queue_is_idle(q)) if (!xe_exec_queue_is_idle(q))
return false; return false;
} }
...@@ -161,17 +161,17 @@ static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list) ...@@ -161,17 +161,17 @@ static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list)
struct list_head *link; struct list_head *link;
struct xe_exec_queue *q; struct xe_exec_queue *q;
list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) { list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
struct dma_fence *fence; struct dma_fence *fence;
link = list->next; link = list->next;
xe_assert(vm->xe, link != list); xe_assert(vm->xe, link != list);
fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link), fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link),
q, q->compute.context, q, q->lr.context,
++q->compute.seqno); ++q->lr.seqno);
dma_fence_put(q->compute.pfence); dma_fence_put(q->lr.pfence);
q->compute.pfence = fence; q->lr.pfence = fence;
} }
} }
...@@ -191,10 +191,10 @@ static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo) ...@@ -191,10 +191,10 @@ static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo)
if (err) if (err)
goto out_unlock; goto out_unlock;
list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
if (q->compute.pfence) { if (q->lr.pfence) {
dma_resv_add_fence(bo->ttm.base.resv, dma_resv_add_fence(bo->ttm.base.resv,
q->compute.pfence, q->lr.pfence,
DMA_RESV_USAGE_BOOKKEEP); DMA_RESV_USAGE_BOOKKEEP);
} }
...@@ -211,10 +211,10 @@ static void resume_and_reinstall_preempt_fences(struct xe_vm *vm, ...@@ -211,10 +211,10 @@ static void resume_and_reinstall_preempt_fences(struct xe_vm *vm,
lockdep_assert_held(&vm->lock); lockdep_assert_held(&vm->lock);
xe_vm_assert_held(vm); xe_vm_assert_held(vm);
list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) { list_for_each_entry(q, &vm->preempt.exec_queues, lr.link) {
q->ops->resume(q); q->ops->resume(q);
drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->compute.pfence, drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->lr.pfence,
DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP); DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP);
} }
} }
...@@ -238,16 +238,16 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) ...@@ -238,16 +238,16 @@ int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
if (err) if (err)
goto out_up_write; goto out_up_write;
pfence = xe_preempt_fence_create(q, q->compute.context, pfence = xe_preempt_fence_create(q, q->lr.context,
++q->compute.seqno); ++q->lr.seqno);
if (!pfence) { if (!pfence) {
err = -ENOMEM; err = -ENOMEM;
goto out_fini; goto out_fini;
} }
list_add(&q->compute.link, &vm->preempt.exec_queues); list_add(&q->lr.link, &vm->preempt.exec_queues);
++vm->preempt.num_exec_queues; ++vm->preempt.num_exec_queues;
q->compute.pfence = pfence; q->lr.pfence = pfence;
down_read(&vm->userptr.notifier_lock); down_read(&vm->userptr.notifier_lock);
...@@ -284,12 +284,12 @@ void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) ...@@ -284,12 +284,12 @@ void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q)
return; return;
down_write(&vm->lock); down_write(&vm->lock);
list_del(&q->compute.link); list_del(&q->lr.link);
--vm->preempt.num_exec_queues; --vm->preempt.num_exec_queues;
if (q->compute.pfence) { if (q->lr.pfence) {
dma_fence_enable_sw_signaling(q->compute.pfence); dma_fence_enable_sw_signaling(q->lr.pfence);
dma_fence_put(q->compute.pfence); dma_fence_put(q->lr.pfence);
q->compute.pfence = NULL; q->lr.pfence = NULL;
} }
up_write(&vm->lock); up_write(&vm->lock);
} }
...@@ -327,7 +327,7 @@ static void xe_vm_kill(struct xe_vm *vm, bool unlocked) ...@@ -327,7 +327,7 @@ static void xe_vm_kill(struct xe_vm *vm, bool unlocked)
vm->flags |= XE_VM_FLAG_BANNED; vm->flags |= XE_VM_FLAG_BANNED;
trace_xe_vm_kill(vm); trace_xe_vm_kill(vm);
list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) list_for_each_entry(q, &vm->preempt.exec_queues, lr.link)
q->ops->kill(q); q->ops->kill(q);
if (unlocked) if (unlocked)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment