Commit 379cad69 authored by Thomas Hellström's avatar Thomas Hellström

drm/xe: Use separate rpm lockdep map for non-d3cold-capable devices

For non-d3cold-capable devices we'd like to be able to wake up the
device from reclaim. In particular, for Lunar Lake we'd like to be
able to blit CCS metadata to system at shrink time; at least from
kswapd where it's reasonable OK to wait for rpm resume and a
preceding rpm suspend.

Therefore use a separate lockdep map for such devices and prime it
reclaim-tainted.

v2:
- Rename lockmap acquire- and release functions. (Rodrigo Vivi).
- Reinstate the old xe_pm_runtime_lockdep_prime() function and
  rename it to xe_rpm_might_enter_cb(). (Matthew Auld).
- Introduce a separate xe_pm_runtime_lockdep_prime function
  called from module init for known required locking orders.
v3:
- Actually hook up the prime function at module init.
v4:
- Rebase.
v5:
- Don't use reclaim-safe RPM with sriov.

Cc: "Vivi, Rodrigo" <rodrigo.vivi@intel.com>
Cc: "Auld, Matthew" <matthew.auld@intel.com>
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240826143450.92511-1-thomas.hellstrom@linux.intel.com
parent 789e5159
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include "xe_drv.h" #include "xe_drv.h"
#include "xe_hw_fence.h" #include "xe_hw_fence.h"
#include "xe_pci.h" #include "xe_pci.h"
#include "xe_pm.h"
#include "xe_observation.h" #include "xe_observation.h"
#include "xe_sched_job.h" #include "xe_sched_job.h"
...@@ -76,6 +77,10 @@ struct init_funcs { ...@@ -76,6 +77,10 @@ struct init_funcs {
void (*exit)(void); void (*exit)(void);
}; };
static void xe_dummy_exit(void)
{
}
static const struct init_funcs init_funcs[] = { static const struct init_funcs init_funcs[] = {
{ {
.init = xe_check_nomodeset, .init = xe_check_nomodeset,
...@@ -96,6 +101,10 @@ static const struct init_funcs init_funcs[] = { ...@@ -96,6 +101,10 @@ static const struct init_funcs init_funcs[] = {
.init = xe_observation_sysctl_register, .init = xe_observation_sysctl_register,
.exit = xe_observation_sysctl_unregister, .exit = xe_observation_sysctl_unregister,
}, },
{
.init = xe_pm_module_init,
.exit = xe_dummy_exit,
},
}; };
static int __init xe_call_init_func(unsigned int i) static int __init xe_call_init_func(unsigned int i)
......
...@@ -70,11 +70,34 @@ ...@@ -70,11 +70,34 @@
*/ */
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
static struct lockdep_map xe_pm_runtime_lockdep_map = { static struct lockdep_map xe_pm_runtime_d3cold_map = {
.name = "xe_pm_runtime_lockdep_map" .name = "xe_rpm_d3cold_map"
};
static struct lockdep_map xe_pm_runtime_nod3cold_map = {
.name = "xe_rpm_nod3cold_map"
}; };
#endif #endif
static bool __maybe_unused xe_rpm_reclaim_safe(const struct xe_device *xe)
{
return !xe->d3cold.capable && !xe->info.has_sriov;
}
static void xe_rpm_lockmap_acquire(const struct xe_device *xe)
{
lock_map_acquire(xe_rpm_reclaim_safe(xe) ?
&xe_pm_runtime_nod3cold_map :
&xe_pm_runtime_d3cold_map);
}
static void xe_rpm_lockmap_release(const struct xe_device *xe)
{
lock_map_release(xe_rpm_reclaim_safe(xe) ?
&xe_pm_runtime_nod3cold_map :
&xe_pm_runtime_d3cold_map);
}
/** /**
* xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
* @xe: xe device instance * @xe: xe device instance
...@@ -354,7 +377,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe) ...@@ -354,7 +377,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
* annotation here and in xe_pm_runtime_get() lockdep will see * annotation here and in xe_pm_runtime_get() lockdep will see
* the potential lock inversion and give us a nice splat. * the potential lock inversion and give us a nice splat.
*/ */
lock_map_acquire(&xe_pm_runtime_lockdep_map); xe_rpm_lockmap_acquire(xe);
/* /*
* Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify * Applying lock for entire list op as xe_ttm_bo_destroy and xe_bo_move_notify
...@@ -387,7 +410,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe) ...@@ -387,7 +410,7 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
out: out:
if (err) if (err)
xe_display_pm_resume(xe, true); xe_display_pm_resume(xe, true);
lock_map_release(&xe_pm_runtime_lockdep_map); xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL); xe_pm_write_callback_task(xe, NULL);
return err; return err;
} }
...@@ -408,7 +431,7 @@ int xe_pm_runtime_resume(struct xe_device *xe) ...@@ -408,7 +431,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
/* Disable access_ongoing asserts and prevent recursive pm calls */ /* Disable access_ongoing asserts and prevent recursive pm calls */
xe_pm_write_callback_task(xe, current); xe_pm_write_callback_task(xe, current);
lock_map_acquire(&xe_pm_runtime_lockdep_map); xe_rpm_lockmap_acquire(xe);
if (xe->d3cold.allowed) { if (xe->d3cold.allowed) {
err = xe_pcode_ready(xe, true); err = xe_pcode_ready(xe, true);
...@@ -440,7 +463,7 @@ int xe_pm_runtime_resume(struct xe_device *xe) ...@@ -440,7 +463,7 @@ int xe_pm_runtime_resume(struct xe_device *xe)
} }
out: out:
lock_map_release(&xe_pm_runtime_lockdep_map); xe_rpm_lockmap_release(xe);
xe_pm_write_callback_task(xe, NULL); xe_pm_write_callback_task(xe, NULL);
return err; return err;
} }
...@@ -454,15 +477,37 @@ int xe_pm_runtime_resume(struct xe_device *xe) ...@@ -454,15 +477,37 @@ int xe_pm_runtime_resume(struct xe_device *xe)
* stuff that can happen inside the runtime_resume callback by acquiring * stuff that can happen inside the runtime_resume callback by acquiring
* a dummy lock (it doesn't protect anything and gets compiled out on * a dummy lock (it doesn't protect anything and gets compiled out on
* non-debug builds). Lockdep then only needs to see the * non-debug builds). Lockdep then only needs to see the
* xe_pm_runtime_lockdep_map -> runtime_resume callback once, and then can * xe_pm_runtime_xxx_map -> runtime_resume callback once, and then can
* hopefully validate all the (callers_locks) -> xe_pm_runtime_lockdep_map. * hopefully validate all the (callers_locks) -> xe_pm_runtime_xxx_map.
* For example if the (callers_locks) are ever grabbed in the * For example if the (callers_locks) are ever grabbed in the
* runtime_resume callback, lockdep should give us a nice splat. * runtime_resume callback, lockdep should give us a nice splat.
*/ */
static void pm_runtime_lockdep_prime(void) static void xe_rpm_might_enter_cb(const struct xe_device *xe)
{ {
lock_map_acquire(&xe_pm_runtime_lockdep_map); xe_rpm_lockmap_acquire(xe);
lock_map_release(&xe_pm_runtime_lockdep_map); xe_rpm_lockmap_release(xe);
}
/*
* Prime the lockdep maps for known locking orders that need to
* be supported but that may not always occur on all systems.
*/
static void xe_pm_runtime_lockdep_prime(void)
{
struct dma_resv lockdep_resv;
dma_resv_init(&lockdep_resv);
lock_map_acquire(&xe_pm_runtime_d3cold_map);
/* D3Cold takes the dma_resv locks to evict bos */
dma_resv_lock(&lockdep_resv, NULL);
dma_resv_unlock(&lockdep_resv);
lock_map_release(&xe_pm_runtime_d3cold_map);
/* Shrinkers might like to wake up the device under reclaim. */
fs_reclaim_acquire(GFP_KERNEL);
lock_map_acquire(&xe_pm_runtime_nod3cold_map);
lock_map_release(&xe_pm_runtime_nod3cold_map);
fs_reclaim_release(GFP_KERNEL);
} }
/** /**
...@@ -477,7 +522,7 @@ void xe_pm_runtime_get(struct xe_device *xe) ...@@ -477,7 +522,7 @@ void xe_pm_runtime_get(struct xe_device *xe)
if (xe_pm_read_callback_task(xe) == current) if (xe_pm_read_callback_task(xe) == current)
return; return;
pm_runtime_lockdep_prime(); xe_rpm_might_enter_cb(xe);
pm_runtime_resume(xe->drm.dev); pm_runtime_resume(xe->drm.dev);
} }
...@@ -509,7 +554,7 @@ int xe_pm_runtime_get_ioctl(struct xe_device *xe) ...@@ -509,7 +554,7 @@ int xe_pm_runtime_get_ioctl(struct xe_device *xe)
if (WARN_ON(xe_pm_read_callback_task(xe) == current)) if (WARN_ON(xe_pm_read_callback_task(xe) == current))
return -ELOOP; return -ELOOP;
pm_runtime_lockdep_prime(); xe_rpm_might_enter_cb(xe);
return pm_runtime_get_sync(xe->drm.dev); return pm_runtime_get_sync(xe->drm.dev);
} }
...@@ -577,7 +622,7 @@ bool xe_pm_runtime_resume_and_get(struct xe_device *xe) ...@@ -577,7 +622,7 @@ bool xe_pm_runtime_resume_and_get(struct xe_device *xe)
return true; return true;
} }
pm_runtime_lockdep_prime(); xe_rpm_might_enter_cb(xe);
return pm_runtime_resume_and_get(xe->drm.dev) >= 0; return pm_runtime_resume_and_get(xe->drm.dev) >= 0;
} }
...@@ -669,3 +714,14 @@ void xe_pm_d3cold_allowed_toggle(struct xe_device *xe) ...@@ -669,3 +714,14 @@ void xe_pm_d3cold_allowed_toggle(struct xe_device *xe)
drm_dbg(&xe->drm, drm_dbg(&xe->drm,
"d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed)); "d3cold: allowed=%s\n", str_yes_no(xe->d3cold.allowed));
} }
/**
* xe_pm_module_init() - Perform xe_pm specific module initialization.
*
* Return: 0 on success. Currently doesn't fail.
*/
int __init xe_pm_module_init(void)
{
xe_pm_runtime_lockdep_prime();
return 0;
}
...@@ -32,5 +32,6 @@ void xe_pm_assert_unbounded_bridge(struct xe_device *xe); ...@@ -32,5 +32,6 @@ void xe_pm_assert_unbounded_bridge(struct xe_device *xe);
int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold); int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold);
void xe_pm_d3cold_allowed_toggle(struct xe_device *xe); void xe_pm_d3cold_allowed_toggle(struct xe_device *xe);
struct task_struct *xe_pm_read_callback_task(struct xe_device *xe); struct task_struct *xe_pm_read_callback_task(struct xe_device *xe);
int xe_pm_module_init(void);
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment