Commit 57c7e3e4 authored by Stanislaw Gruszka's avatar Stanislaw Gruszka

accel/ivpu: Stop job_done_thread on suspend

Stop job_done thread when going to suspend. Use kthread_park() instead
of kthread_stop() to avoid memory allocation and potential failure
on resume.

Use separate function as thread wake up condition. Use spin lock to assure
rx_msg_list is properly protected against concurrent access. This avoid
race condition when the rx_msg_list list is modified and read in
ivpu_ipc_recive() at the same time.
Reviewed-by: default avatarKarol Wachowski <karol.wachowski@linux.intel.com>
Reviewed-by: default avatarJeffrey Hugo <quic_jhugo@quicinc.com>
Signed-off-by: default avatarStanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20231028155936.1183342-4-stanislaw.gruszka@linux.intel.com
parent a06eb9be
...@@ -378,6 +378,7 @@ int ivpu_boot(struct ivpu_device *vdev) ...@@ -378,6 +378,7 @@ int ivpu_boot(struct ivpu_device *vdev)
enable_irq(vdev->irq); enable_irq(vdev->irq);
ivpu_hw_irq_enable(vdev); ivpu_hw_irq_enable(vdev);
ivpu_ipc_enable(vdev); ivpu_ipc_enable(vdev);
ivpu_job_done_thread_enable(vdev);
return 0; return 0;
} }
...@@ -389,6 +390,7 @@ int ivpu_shutdown(struct ivpu_device *vdev) ...@@ -389,6 +390,7 @@ int ivpu_shutdown(struct ivpu_device *vdev)
disable_irq(vdev->irq); disable_irq(vdev->irq);
ivpu_ipc_disable(vdev); ivpu_ipc_disable(vdev);
ivpu_mmu_disable(vdev); ivpu_mmu_disable(vdev);
ivpu_job_done_thread_disable(vdev);
ret = ivpu_hw_power_down(vdev); ret = ivpu_hw_power_down(vdev);
if (ret) if (ret)
......
...@@ -202,6 +202,20 @@ ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct v ...@@ -202,6 +202,20 @@ ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct v
return ret; return ret;
} }
static int ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer *cons)
{
int ret = 0;
if (IS_KTHREAD())
ret |= (kthread_should_stop() || kthread_should_park());
spin_lock_irq(&cons->rx_msg_lock);
ret |= !list_empty(&cons->rx_msg_list);
spin_unlock_irq(&cons->rx_msg_lock);
return ret;
}
int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
struct ivpu_ipc_hdr *ipc_buf, struct ivpu_ipc_hdr *ipc_buf,
struct vpu_jsm_msg *ipc_payload, unsigned long timeout_ms) struct vpu_jsm_msg *ipc_payload, unsigned long timeout_ms)
...@@ -211,8 +225,7 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, ...@@ -211,8 +225,7 @@ int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
int wait_ret, ret = 0; int wait_ret, ret = 0;
wait_ret = wait_event_interruptible_timeout(cons->rx_msg_wq, wait_ret = wait_event_interruptible_timeout(cons->rx_msg_wq,
(IS_KTHREAD() && kthread_should_stop()) || ivpu_ipc_rx_need_wakeup(cons),
!list_empty(&cons->rx_msg_list),
msecs_to_jiffies(timeout_ms)); msecs_to_jiffies(timeout_ms));
if (IS_KTHREAD() && kthread_should_stop()) if (IS_KTHREAD() && kthread_should_stop())
......
...@@ -590,6 +590,11 @@ static int ivpu_job_done_thread(void *arg) ...@@ -590,6 +590,11 @@ static int ivpu_job_done_thread(void *arg)
ivpu_pm_schedule_recovery(vdev); ivpu_pm_schedule_recovery(vdev);
} }
} }
if (kthread_should_park()) {
ivpu_dbg(vdev, JOB, "Parked %s\n", __func__);
kthread_parkme();
ivpu_dbg(vdev, JOB, "Unparked %s\n", __func__);
}
} }
ivpu_ipc_consumer_del(vdev, &cons); ivpu_ipc_consumer_del(vdev, &cons);
...@@ -610,9 +615,6 @@ int ivpu_job_done_thread_init(struct ivpu_device *vdev) ...@@ -610,9 +615,6 @@ int ivpu_job_done_thread_init(struct ivpu_device *vdev)
return -EIO; return -EIO;
} }
get_task_struct(thread);
wake_up_process(thread);
vdev->job_done_thread = thread; vdev->job_done_thread = thread;
return 0; return 0;
...@@ -620,6 +622,16 @@ int ivpu_job_done_thread_init(struct ivpu_device *vdev) ...@@ -620,6 +622,16 @@ int ivpu_job_done_thread_init(struct ivpu_device *vdev)
void ivpu_job_done_thread_fini(struct ivpu_device *vdev) void ivpu_job_done_thread_fini(struct ivpu_device *vdev)
{ {
kthread_unpark(vdev->job_done_thread);
kthread_stop(vdev->job_done_thread); kthread_stop(vdev->job_done_thread);
put_task_struct(vdev->job_done_thread); }
void ivpu_job_done_thread_disable(struct ivpu_device *vdev)
{
kthread_park(vdev->job_done_thread);
}
void ivpu_job_done_thread_enable(struct ivpu_device *vdev)
{
kthread_unpark(vdev->job_done_thread);
} }
...@@ -61,6 +61,8 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev); ...@@ -61,6 +61,8 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
int ivpu_job_done_thread_init(struct ivpu_device *vdev); int ivpu_job_done_thread_init(struct ivpu_device *vdev);
void ivpu_job_done_thread_fini(struct ivpu_device *vdev); void ivpu_job_done_thread_fini(struct ivpu_device *vdev);
void ivpu_job_done_thread_disable(struct ivpu_device *vdev);
void ivpu_job_done_thread_enable(struct ivpu_device *vdev);
void ivpu_jobs_abort_all(struct ivpu_device *vdev); void ivpu_jobs_abort_all(struct ivpu_device *vdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment