Commit 6899192f authored by David S. Miller's avatar David S. Miller

Merge branch 'ipa-runtime-pm'

Alex Elder says:

====================
net: ipa: use runtime PM reference counting

This series does further rework of the IPA clock code so that we
rely on some of the core runtime power management code (including
its referencing counting) instead.

The first patch makes ipa_clock_get() act like pm_runtime_get_sync().

The second patch makes system suspend occur regardless of the
current reference count value, which is again more like how the
runtime PM core code behaves.

The third patch creates functions to encapsulate all hardware
suspend and resume activity.  The fourth uses those functions as
the ->runtime_suspend and ->runtime_resume power callbacks.  With
that in place, ipa_clock_get() and ipa_clock_put() are changed to
use runtime PM get and put functions when needed.

The fifth patch eliminates an extra clock reference previously used
to control system suspend.  The sixth eliminates the "IPA clock"
reference count and mutex.

The final patch replaces the one call to ipa_clock_get_additional()
with a call to pm_runtime_get_if_active(), making the former
unnecessary.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6f45933d 0d08026a
......@@ -4,12 +4,11 @@
* Copyright (C) 2018-2021 Linaro Ltd.
*/
#include <linux/refcount.h>
#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/interconnect.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/bitops.h>
#include "ipa.h"
......@@ -57,16 +56,14 @@ enum ipa_power_flag {
/**
* struct ipa_clock - IPA clocking information
* @count: Clocking reference count
* @mutex: Protects clock enable/disable
* @dev: IPA device pointer
* @core: IPA core clock
* @flags: Boolean state flags
* @interconnect_count: Number of elements in interconnect[]
* @interconnect: Interconnect array
*/
struct ipa_clock {
refcount_t count;
struct mutex mutex; /* protects clock enable/disable */
struct device *dev;
struct clk *core;
DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
u32 interconnect_count;
......@@ -223,66 +220,65 @@ static int ipa_clock_enable(struct ipa *ipa)
}
/* Inverse of ipa_clock_enable() */
static void ipa_clock_disable(struct ipa *ipa)
static int ipa_clock_disable(struct ipa *ipa)
{
clk_disable_unprepare(ipa->clock->core);
(void)ipa_interconnect_disable(ipa);
return ipa_interconnect_disable(ipa);
}
/* Get an IPA clock reference, but only if the reference count is
* already non-zero. Returns true if the additional reference was
* added successfully, or false otherwise.
*/
bool ipa_clock_get_additional(struct ipa *ipa)
static int ipa_runtime_suspend(struct device *dev)
{
return refcount_inc_not_zero(&ipa->clock->count);
struct ipa *ipa = dev_get_drvdata(dev);
/* Endpoints aren't usable until setup is complete */
if (ipa->setup_complete) {
__clear_bit(IPA_POWER_FLAG_RESUMED, ipa->clock->flags);
ipa_endpoint_suspend(ipa);
gsi_suspend(&ipa->gsi);
}
return ipa_clock_disable(ipa);
}
/* Get an IPA clock reference. If the reference count is non-zero, it is
* incremented and return is immediate. Otherwise it is checked again
* under protection of the mutex, and if appropriate the IPA clock
* is enabled.
*
* Incrementing the reference count is intentionally deferred until
* after the clock is running and endpoints are resumed.
*/
void ipa_clock_get(struct ipa *ipa)
static int ipa_runtime_resume(struct device *dev)
{
struct ipa_clock *clock = ipa->clock;
struct ipa *ipa = dev_get_drvdata(dev);
int ret;
/* If the clock is running, just bump the reference count */
if (ipa_clock_get_additional(ipa))
return;
ret = ipa_clock_enable(ipa);
if (WARN_ON(ret < 0))
return ret;
/* Otherwise get the mutex and check again */
mutex_lock(&clock->mutex);
/* Endpoints aren't usable until setup is complete */
if (ipa->setup_complete) {
gsi_resume(&ipa->gsi);
ipa_endpoint_resume(ipa);
}
/* A reference might have been added before we got the mutex. */
if (ipa_clock_get_additional(ipa))
goto out_mutex_unlock;
return 0;
}
ret = ipa_clock_enable(ipa);
if (!ret)
refcount_set(&clock->count, 1);
out_mutex_unlock:
mutex_unlock(&clock->mutex);
static int ipa_runtime_idle(struct device *dev)
{
return -EAGAIN;
}
/* Attempt to remove an IPA clock reference. If this represents the
* last reference, disable the IPA clock under protection of the mutex.
/* Get an IPA clock reference. If the reference count is non-zero, it is
* incremented and return is immediate. Otherwise the IPA clock is
* enabled.
*/
void ipa_clock_put(struct ipa *ipa)
int ipa_clock_get(struct ipa *ipa)
{
struct ipa_clock *clock = ipa->clock;
/* If this is not the last reference there's nothing more to do */
if (!refcount_dec_and_mutex_lock(&clock->count, &clock->mutex))
return;
ipa_clock_disable(ipa);
return pm_runtime_get_sync(&ipa->pdev->dev);
}
mutex_unlock(&clock->mutex);
/* Attempt to remove an IPA clock reference. If this represents the
* last reference, disable the IPA clock.
*/
int ipa_clock_put(struct ipa *ipa)
{
return pm_runtime_put(&ipa->pdev->dev);
}
/* Return the current IPA core clock rate */
......@@ -352,6 +348,7 @@ ipa_clock_init(struct device *dev, const struct ipa_clock_data *data)
ret = -ENOMEM;
goto err_clk_put;
}
clock->dev = dev;
clock->core = clk;
clock->interconnect_count = data->interconnect_count;
......@@ -359,8 +356,8 @@ ipa_clock_init(struct device *dev, const struct ipa_clock_data *data)
if (ret)
goto err_kfree;
mutex_init(&clock->mutex);
refcount_set(&clock->count, 0);
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_enable(dev);
return clock;
......@@ -377,68 +374,16 @@ void ipa_clock_exit(struct ipa_clock *clock)
{
struct clk *clk = clock->core;
WARN_ON(refcount_read(&clock->count) != 0);
mutex_destroy(&clock->mutex);
pm_runtime_disable(clock->dev);
ipa_interconnect_exit(clock);
kfree(clock);
clk_put(clk);
}
/**
* ipa_suspend() - Power management system suspend callback
* @dev: IPA device structure
*
* Return: Always returns zero
*
* Called by the PM framework when a system suspend operation is invoked.
* Suspends endpoints and releases the clock reference held to keep
* the IPA clock running until this point.
*/
static int ipa_suspend(struct device *dev)
{
struct ipa *ipa = dev_get_drvdata(dev);
/* Endpoints aren't usable until setup is complete */
if (ipa->setup_complete) {
__clear_bit(IPA_POWER_FLAG_RESUMED, ipa->clock->flags);
ipa_endpoint_suspend(ipa);
gsi_suspend(&ipa->gsi);
}
ipa_clock_put(ipa);
return 0;
}
/**
* ipa_resume() - Power management system resume callback
* @dev: IPA device structure
*
* Return: Always returns 0
*
* Called by the PM framework when a system resume operation is invoked.
* Takes an IPA clock reference to keep the clock running until suspend,
* and resumes endpoints.
*/
static int ipa_resume(struct device *dev)
{
struct ipa *ipa = dev_get_drvdata(dev);
/* This clock reference will keep the IPA out of suspend
* until we get a power management suspend request.
*/
ipa_clock_get(ipa);
/* Endpoints aren't usable until setup is complete */
if (ipa->setup_complete) {
gsi_resume(&ipa->gsi);
ipa_endpoint_resume(ipa);
}
return 0;
}
const struct dev_pm_ops ipa_pm_ops = {
.suspend = ipa_suspend,
.resume = ipa_resume,
.suspend = pm_runtime_force_suspend,
.resume = pm_runtime_force_resume,
.runtime_suspend = ipa_runtime_suspend,
.runtime_resume = ipa_runtime_resume,
.runtime_idle = ipa_runtime_idle,
};
......@@ -54,26 +54,24 @@ void ipa_clock_exit(struct ipa_clock *clock);
* ipa_clock_get() - Get an IPA clock reference
* @ipa: IPA pointer
*
* This call blocks if this is the first reference.
*/
void ipa_clock_get(struct ipa *ipa);
/**
* ipa_clock_get_additional() - Get an IPA clock reference if not first
* @ipa: IPA pointer
* Return: 0 if clock started, 1 if clock already running, or a negative
* error code
*
* This returns immediately, and only takes a reference if not the first
* This call blocks if this is the first reference. A reference is
* taken even if an error occurs starting the IPA clock.
*/
bool ipa_clock_get_additional(struct ipa *ipa);
int ipa_clock_get(struct ipa *ipa);
/**
* ipa_clock_put() - Drop an IPA clock reference
* @ipa: IPA pointer
*
* Return: 0 if successful, or a negative error code
*
* This drops a clock reference. If the last reference is being dropped,
* the clock is stopped and RX endpoints are suspended. This call will
* not block unless the last reference is dropped.
*/
void ipa_clock_put(struct ipa *ipa);
int ipa_clock_put(struct ipa *ipa);
#endif /* _IPA_CLOCK_H_ */
......@@ -83,8 +83,11 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
u32 pending;
u32 offset;
u32 mask;
int ret;
ipa_clock_get(ipa);
ret = ipa_clock_get(ipa);
if (WARN_ON(ret < 0))
goto out_clock_put;
/* The status register indicates which conditions are present,
* including conditions whose interrupt is not enabled. Handle
......@@ -112,8 +115,8 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
offset = ipa_reg_irq_clr_offset(ipa->version);
iowrite32(pending, ipa->reg_virt + offset);
}
ipa_clock_put(ipa);
out_clock_put:
(void)ipa_clock_put(ipa);
return IRQ_HANDLED;
}
......
......@@ -427,12 +427,6 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
{
int ret;
/* Get a clock reference to allow initialization. This reference
* is held after initialization completes, and won't get dropped
* unless/until a system suspend request arrives.
*/
ipa_clock_get(ipa);
ipa_hardware_config(ipa, data);
ret = ipa_mem_config(ipa);
......@@ -475,7 +469,6 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
ipa_mem_deconfig(ipa);
err_hardware_deconfig:
ipa_hardware_deconfig(ipa);
ipa_clock_put(ipa);
return ret;
}
......@@ -493,7 +486,6 @@ static void ipa_deconfig(struct ipa *ipa)
ipa->interrupt = NULL;
ipa_mem_deconfig(ipa);
ipa_hardware_deconfig(ipa);
ipa_clock_put(ipa);
}
static int ipa_firmware_load(struct device *dev)
......@@ -750,20 +742,22 @@ static int ipa_probe(struct platform_device *pdev)
goto err_table_exit;
/* The clock needs to be active for config and setup */
ipa_clock_get(ipa);
ret = ipa_clock_get(ipa);
if (WARN_ON(ret < 0))
goto err_clock_put;
ret = ipa_config(ipa, data);
if (ret)
goto err_clock_put; /* Error */
goto err_clock_put;
dev_info(dev, "IPA driver initialized");
/* If the modem is doing early initialization, it will trigger a
* call to ipa_setup() call when it has finished. In that case
* we're done here.
* call to ipa_setup() when it has finished. In that case we're
* done here.
*/
if (modem_init)
goto out_clock_put; /* Done; no error */
goto done;
/* Otherwise we need to load the firmware and have Trust Zone validate
* and install it. If that succeeds we can proceed with setup.
......@@ -775,16 +769,15 @@ static int ipa_probe(struct platform_device *pdev)
ret = ipa_setup(ipa);
if (ret)
goto err_deconfig;
out_clock_put:
ipa_clock_put(ipa);
done:
(void)ipa_clock_put(ipa);
return 0;
err_deconfig:
ipa_deconfig(ipa);
err_clock_put:
ipa_clock_put(ipa);
(void)ipa_clock_put(ipa);
ipa_modem_exit(ipa);
err_table_exit:
ipa_table_exit(ipa);
......@@ -810,7 +803,9 @@ static int ipa_remove(struct platform_device *pdev)
struct ipa_clock *clock = ipa->clock;
int ret;
ipa_clock_get(ipa);
ret = ipa_clock_get(ipa);
if (WARN_ON(ret < 0))
goto out_clock_put;
if (ipa->setup_complete) {
ret = ipa_modem_stop(ipa);
......@@ -826,8 +821,8 @@ static int ipa_remove(struct platform_device *pdev)
}
ipa_deconfig(ipa);
ipa_clock_put(ipa);
out_clock_put:
(void)ipa_clock_put(ipa);
ipa_modem_exit(ipa);
ipa_table_exit(ipa);
......
......@@ -45,7 +45,9 @@ static int ipa_open(struct net_device *netdev)
struct ipa *ipa = priv->ipa;
int ret;
ipa_clock_get(ipa);
ret = ipa_clock_get(ipa);
if (WARN_ON(ret < 0))
goto err_clock_put;
ret = ipa_endpoint_enable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
if (ret)
......@@ -62,7 +64,7 @@ static int ipa_open(struct net_device *netdev)
err_disable_tx:
ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
err_clock_put:
ipa_clock_put(ipa);
(void)ipa_clock_put(ipa);
return ret;
}
......@@ -78,7 +80,7 @@ static int ipa_stop(struct net_device *netdev)
ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]);
ipa_endpoint_disable_one(ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]);
ipa_clock_put(ipa);
(void)ipa_clock_put(ipa);
return 0;
}
......@@ -297,7 +299,9 @@ static void ipa_modem_crashed(struct ipa *ipa)
struct device *dev = &ipa->pdev->dev;
int ret;
ipa_clock_get(ipa);
ret = ipa_clock_get(ipa);
if (WARN_ON(ret < 0))
goto out_clock_put;
ipa_endpoint_modem_pause_all(ipa, true);
......@@ -324,7 +328,8 @@ static void ipa_modem_crashed(struct ipa *ipa)
if (ret)
dev_err(dev, "error %d zeroing modem memory regions\n", ret);
ipa_clock_put(ipa);
out_clock_put:
(void)ipa_clock_put(ipa);
}
static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
......
......@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/panic_notifier.h>
#include <linux/pm_runtime.h>
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
......@@ -84,13 +85,15 @@ struct ipa_smp2p {
*/
static void ipa_smp2p_notify(struct ipa_smp2p *smp2p)
{
struct device *dev;
u32 value;
u32 mask;
if (smp2p->notified)
return;
smp2p->clock_on = ipa_clock_get_additional(smp2p->ipa);
dev = &smp2p->ipa->pdev->dev;
smp2p->clock_on = pm_runtime_get_if_active(dev, true) > 0;
/* Signal whether the clock is enabled */
mask = BIT(smp2p->enabled_bit);
......@@ -150,24 +153,26 @@ static void ipa_smp2p_panic_notifier_unregister(struct ipa_smp2p *smp2p)
static irqreturn_t ipa_smp2p_modem_setup_ready_isr(int irq, void *dev_id)
{
struct ipa_smp2p *smp2p = dev_id;
int ret;
mutex_lock(&smp2p->mutex);
if (!smp2p->disabled) {
int ret;
if (smp2p->disabled)
goto out_mutex_unlock;
smp2p->disabled = true; /* If any others arrive, ignore them */
/* The clock needs to be active for setup */
ipa_clock_get(smp2p->ipa);
ret = ipa_clock_get(smp2p->ipa);
if (WARN_ON(ret < 0))
goto out_clock_put;
/* An error here won't cause driver shutdown, so warn if one occurs */
ret = ipa_setup(smp2p->ipa);
if (ret)
dev_err(&smp2p->ipa->pdev->dev,
"error %d from ipa_setup()\n", ret);
smp2p->disabled = true;
ipa_clock_put(smp2p->ipa);
}
WARN(ret != 0, "error %d from ipa_setup()\n", ret);
out_clock_put:
(void)ipa_clock_put(smp2p->ipa);
out_mutex_unlock:
mutex_unlock(&smp2p->mutex);
return IRQ_HANDLED;
......@@ -206,7 +211,7 @@ static void ipa_smp2p_clock_release(struct ipa *ipa)
if (!ipa->smp2p->clock_on)
return;
ipa_clock_put(ipa);
(void)ipa_clock_put(ipa);
ipa->smp2p->clock_on = false;
}
......
......@@ -154,7 +154,7 @@ static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id)
case IPA_UC_RESPONSE_INIT_COMPLETED:
if (ipa->uc_clocked) {
ipa->uc_loaded = true;
ipa_clock_put(ipa);
(void)ipa_clock_put(ipa);
ipa->uc_clocked = false;
} else {
dev_warn(dev, "unexpected init_completed response\n");
......@@ -182,21 +182,25 @@ void ipa_uc_deconfig(struct ipa *ipa)
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_1);
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_0);
if (ipa->uc_clocked)
ipa_clock_put(ipa);
(void)ipa_clock_put(ipa);
}
/* Take a proxy clock reference for the microcontroller */
void ipa_uc_clock(struct ipa *ipa)
{
static bool already;
int ret;
if (already)
return;
already = true; /* Only do this on first boot */
/* This clock reference dropped in ipa_uc_response_hdlr() above */
ipa_clock_get(ipa);
ipa->uc_clocked = true;
ret = ipa_clock_get(ipa);
if (WARN(ret < 0, "error %d getting proxy clock\n", ret))
(void)ipa_clock_put(ipa);
ipa->uc_clocked = ret >= 0;
}
/* Send a command to the microcontroller */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment