Commit d7bdb8e6 authored by Ulf Hansson's avatar Ulf Hansson

pmdomain: core: Enable s2idle for CPU PM domains on PREEMPT_RT

To allow a genpd provider for a CPU PM domain to enter a domain-idle-state
during s2idle on a PREEMPT_RT based configuration, we can't use the regular
spinlock, as they are turned into sleepable locks on PREEMPT_RT.

To address this problem, let's convert into using the raw spinlock, but
only for genpd providers that have the GENPD_FLAG_CPU_DOMAIN bit set. In
this way, the lock can still be acquired/released in atomic context, which
is needed in the idle-path for PREEMPT_RT.

Do note that the genpd power-on/off notifiers may also be fired during
s2idle, but these are already prepared for PREEMPT_RT as they are based on
the raw notifiers. However, consumers of them may need to adopt accordingly
to work properly on PREEMPT_RT.
Signed-off-by: default avatarUlf Hansson <ulf.hansson@linaro.org>
Tested-by: Raghavendra Kakarla <quic_rkakarla@quicinc.com>  # qcm6490 with PREEMPT_RT set
Acked-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://lore.kernel.org/r/20240527142557.321610-2-ulf.hansson@linaro.org
parent 328fc9b2
...@@ -117,6 +117,48 @@ static const struct genpd_lock_ops genpd_spin_ops = { ...@@ -117,6 +117,48 @@ static const struct genpd_lock_ops genpd_spin_ops = {
.unlock = genpd_unlock_spin, .unlock = genpd_unlock_spin,
}; };
static void genpd_lock_raw_spin(struct generic_pm_domain *genpd)
__acquires(&genpd->raw_slock)
{
unsigned long flags;
raw_spin_lock_irqsave(&genpd->raw_slock, flags);
genpd->raw_lock_flags = flags;
}
static void genpd_lock_nested_raw_spin(struct generic_pm_domain *genpd,
int depth)
__acquires(&genpd->raw_slock)
{
unsigned long flags;
raw_spin_lock_irqsave_nested(&genpd->raw_slock, flags, depth);
genpd->raw_lock_flags = flags;
}
static int genpd_lock_interruptible_raw_spin(struct generic_pm_domain *genpd)
__acquires(&genpd->raw_slock)
{
unsigned long flags;
raw_spin_lock_irqsave(&genpd->raw_slock, flags);
genpd->raw_lock_flags = flags;
return 0;
}
static void genpd_unlock_raw_spin(struct generic_pm_domain *genpd)
__releases(&genpd->raw_slock)
{
raw_spin_unlock_irqrestore(&genpd->raw_slock, genpd->raw_lock_flags);
}
static const struct genpd_lock_ops genpd_raw_spin_ops = {
.lock = genpd_lock_raw_spin,
.lock_nested = genpd_lock_nested_raw_spin,
.lock_interruptible = genpd_lock_interruptible_raw_spin,
.unlock = genpd_unlock_raw_spin,
};
#define genpd_lock(p) p->lock_ops->lock(p) #define genpd_lock(p) p->lock_ops->lock(p)
#define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d) #define genpd_lock_nested(p, d) p->lock_ops->lock_nested(p, d)
#define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p) #define genpd_lock_interruptible(p) p->lock_ops->lock_interruptible(p)
...@@ -2143,7 +2185,10 @@ static void genpd_free_data(struct generic_pm_domain *genpd) ...@@ -2143,7 +2185,10 @@ static void genpd_free_data(struct generic_pm_domain *genpd)
static void genpd_lock_init(struct generic_pm_domain *genpd) static void genpd_lock_init(struct generic_pm_domain *genpd)
{ {
if (genpd_is_irq_safe(genpd)) { if (genpd_is_cpu_domain(genpd)) {
raw_spin_lock_init(&genpd->raw_slock);
genpd->lock_ops = &genpd_raw_spin_ops;
} else if (genpd_is_irq_safe(genpd)) {
spin_lock_init(&genpd->slock); spin_lock_init(&genpd->slock);
genpd->lock_ops = &genpd_spin_ops; genpd->lock_ops = &genpd_spin_ops;
} else { } else {
......
...@@ -198,8 +198,11 @@ struct generic_pm_domain { ...@@ -198,8 +198,11 @@ struct generic_pm_domain {
spinlock_t slock; spinlock_t slock;
unsigned long lock_flags; unsigned long lock_flags;
}; };
struct {
raw_spinlock_t raw_slock;
unsigned long raw_lock_flags;
};
}; };
}; };
static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment