Commit ba6b035d authored by Stanislaw Gruszka's avatar Stanislaw Gruszka

accel/ivpu: Abort pending rx ipc on reset

Waking up process, which wait for particular condition, will go to
sleep again on wake_up() if the condition is not met. Add abort flag
to wake up IPC receivers, which will finish with -ECANCELED error.

This is only needed for reset, run time power management prevent to
suspend VPU when there is pending IPC processing or pending job.
Reviewed-by: default avatarKarol Wachowski <karol.wachowski@linux.intel.com>
Reviewed-by: default avatarJeffrey Hugo <quic_jhugo@quicinc.com>
Signed-off-by: default avatarStanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231028155936.1183342-5-stanislaw.gruszka@linux.intel.com
parent 57c7e3e4
......@@ -148,6 +148,7 @@ ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
cons->channel = channel;
cons->tx_vpu_addr = 0;
cons->request_id = 0;
cons->aborted = false;
spin_lock_init(&cons->rx_msg_lock);
INIT_LIST_HEAD(&cons->rx_msg_list);
init_waitqueue_head(&cons->rx_msg_wq);
......@@ -169,7 +170,8 @@ void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *c
spin_lock_irq(&cons->rx_msg_lock);
list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link) {
list_del(&rx_msg->link);
ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
if (!cons->aborted)
ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
atomic_dec(&ipc->rx_msg_count);
kfree(rx_msg);
}
......@@ -210,7 +212,7 @@ static int ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer *cons)
ret |= (kthread_should_stop() || kthread_should_park());
spin_lock_irq(&cons->rx_msg_lock);
ret |= !list_empty(&cons->rx_msg_list);
ret |= !list_empty(&cons->rx_msg_list) || cons->aborted;
spin_unlock_irq(&cons->rx_msg_lock);
return ret;
......@@ -244,6 +246,12 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
return -EAGAIN;
}
list_del(&rx_msg->link);
if (cons->aborted) {
spin_unlock_irq(&cons->rx_msg_lock);
ret = -ECANCELED;
goto out;
}
spin_unlock_irq(&cons->rx_msg_lock);
if (ipc_buf)
......@@ -261,6 +269,7 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
}
ivpu_ipc_rx_mark_free(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg);
out:
atomic_dec(&ipc->rx_msg_count);
kfree(rx_msg);
......@@ -522,8 +531,12 @@ void ivpu_ipc_disable(struct ivpu_device *vdev)
mutex_unlock(&ipc->lock);
spin_lock_irqsave(&ipc->cons_list_lock, flags);
list_for_each_entry_safe(cons, c, &ipc->cons_list, link)
list_for_each_entry_safe(cons, c, &ipc->cons_list, link) {
spin_lock(&cons->rx_msg_lock);
cons->aborted = true;
spin_unlock(&cons->rx_msg_lock);
wake_up(&cons->rx_msg_wq);
}
spin_unlock_irqrestore(&ipc->cons_list_lock, flags);
}
......@@ -532,6 +545,7 @@ void ivpu_ipc_reset(struct ivpu_device *vdev)
struct ivpu_ipc_info *ipc = vdev->ipc;
mutex_lock(&ipc->lock);
drm_WARN_ON(&vdev->drm, ipc->on);
memset(ivpu_bo_vaddr(ipc->mem_tx), 0, ivpu_bo_size(ipc->mem_tx));
memset(ivpu_bo_vaddr(ipc->mem_rx), 0, ivpu_bo_size(ipc->mem_rx));
......
......@@ -47,8 +47,9 @@ struct ivpu_ipc_consumer {
u32 channel;
u32 tx_vpu_addr;
u32 request_id;
bool aborted;
spinlock_t rx_msg_lock; /* Protects rx_msg_list */
spinlock_t rx_msg_lock; /* Protects rx_msg_list and aborted */
struct list_head rx_msg_list;
wait_queue_head_t rx_msg_wq;
};
......
......@@ -578,6 +578,7 @@ static int ivpu_job_done_thread(void *arg)
ivpu_ipc_consumer_add(vdev, &cons, VPU_IPC_CHAN_JOB_RET);
while (!kthread_should_stop()) {
cons.aborted = false;
timeout = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
jobs_submitted = !xa_empty(&vdev->submitted_jobs_xa);
ret = ivpu_ipc_receive(vdev, &cons, NULL, &jsm_msg, timeout);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment