Commit 5e43df14 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-ipa-wake-up-system-on-RX-available'

Alex Elder says:

====================
net: ipa: wake up system on RX available

This series arranges for the IPA driver to wake up a suspended
system if the IPA hardware has a packet to deliver to the AP.

Version 2 replaced the first patch from version 1 with three
patches, in response to David Miller's feedback.  And based on
Bjorn Andersson's feedback on version 2, this version reworks
the tracking of IPA clock references.  As a result, we no
longer need a flag to determine whether a "don't' suspend" clock
reference is held (though an bit in a bitmask is still used for
a different purpose).

In summary:
    - A refcount_t is used to track IPA clock references where an
      atomic_t was previously used.  (This may go away soon as well,
      with upcoming work to implement runtime PM.)
    - We no longer track whether a special reference has been taken
      to avoid suspending IPA.
    - A bit in a bitmask is used to ensure we only trigger a system
      resume once per system suspend.
And from the original series:
    - Suspending endpoints only occurs when suspending the driver,
      not when dropping the last clock reference.  Resuming
      endpoints is also disconnected from starting the clock.
    - The IPA SUSPEND interrupt is now a wakeup interrupt.  If it
      fires, it schedules a system resume operation.
    - The GSI interrupt is no longer a wakeup interrupt.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ba4ee3c0 54f7e443
...@@ -1987,31 +1987,26 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch, ...@@ -1987,31 +1987,26 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
} }
gsi->irq = irq; gsi->irq = irq;
ret = enable_irq_wake(gsi->irq);
if (ret)
dev_warn(dev, "error %d enabling gsi wake irq\n", ret);
gsi->irq_wake_enabled = !ret;
/* Get GSI memory range and map it */ /* Get GSI memory range and map it */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi"); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
if (!res) { if (!res) {
dev_err(dev, "DT error getting \"gsi\" memory property\n"); dev_err(dev, "DT error getting \"gsi\" memory property\n");
ret = -ENODEV; ret = -ENODEV;
goto err_disable_irq_wake; goto err_free_irq;
} }
size = resource_size(res); size = resource_size(res);
if (res->start > U32_MAX || size > U32_MAX - res->start) { if (res->start > U32_MAX || size > U32_MAX - res->start) {
dev_err(dev, "DT memory resource \"gsi\" out of range\n"); dev_err(dev, "DT memory resource \"gsi\" out of range\n");
ret = -EINVAL; ret = -EINVAL;
goto err_disable_irq_wake; goto err_free_irq;
} }
gsi->virt = ioremap(res->start, size); gsi->virt = ioremap(res->start, size);
if (!gsi->virt) { if (!gsi->virt) {
dev_err(dev, "unable to remap \"gsi\" memory\n"); dev_err(dev, "unable to remap \"gsi\" memory\n");
ret = -ENOMEM; ret = -ENOMEM;
goto err_disable_irq_wake; goto err_free_irq;
} }
ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc); ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc);
...@@ -2025,9 +2020,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch, ...@@ -2025,9 +2020,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
err_iounmap: err_iounmap:
iounmap(gsi->virt); iounmap(gsi->virt);
err_disable_irq_wake: err_free_irq:
if (gsi->irq_wake_enabled)
(void)disable_irq_wake(gsi->irq);
free_irq(gsi->irq, gsi); free_irq(gsi->irq, gsi);
return ret; return ret;
...@@ -2038,8 +2031,6 @@ void gsi_exit(struct gsi *gsi) ...@@ -2038,8 +2031,6 @@ void gsi_exit(struct gsi *gsi)
{ {
mutex_destroy(&gsi->mutex); mutex_destroy(&gsi->mutex);
gsi_channel_exit(gsi); gsi_channel_exit(gsi);
if (gsi->irq_wake_enabled)
(void)disable_irq_wake(gsi->irq);
free_irq(gsi->irq, gsi); free_irq(gsi->irq, gsi);
iounmap(gsi->virt); iounmap(gsi->virt);
} }
......
...@@ -150,7 +150,6 @@ struct gsi { ...@@ -150,7 +150,6 @@ struct gsi {
struct net_device dummy_dev; /* needed for NAPI */ struct net_device dummy_dev; /* needed for NAPI */
void __iomem *virt; void __iomem *virt;
u32 irq; u32 irq;
bool irq_wake_enabled;
u32 channel_count; u32 channel_count;
u32 evt_ring_count; u32 evt_ring_count;
struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX]; struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
......
...@@ -27,15 +27,25 @@ struct ipa_clock; ...@@ -27,15 +27,25 @@ struct ipa_clock;
struct ipa_smp2p; struct ipa_smp2p;
struct ipa_interrupt; struct ipa_interrupt;
/**
* enum ipa_flag - IPA state flags
* @IPA_FLAG_RESUMED: Whether resume from suspend has been signaled
* @IPA_FLAG_COUNT: Number of defined IPA flags
*/
enum ipa_flag {
IPA_FLAG_RESUMED,
IPA_FLAG_COUNT, /* Last; not a flag */
};
/** /**
* struct ipa - IPA information * struct ipa - IPA information
* @gsi: Embedded GSI structure * @gsi: Embedded GSI structure
* @flags: Boolean state flags
* @version: IPA hardware version * @version: IPA hardware version
* @pdev: Platform device * @pdev: Platform device
* @modem_rproc: Remoteproc handle for modem subsystem * @modem_rproc: Remoteproc handle for modem subsystem
* @smp2p: SMP2P information * @smp2p: SMP2P information
* @clock: IPA clocking information * @clock: IPA clocking information
* @suspend_ref: Whether clock reference preventing suspend taken
* @table_addr: DMA address of filter/route table content * @table_addr: DMA address of filter/route table content
* @table_virt: Virtual address of filter/route table content * @table_virt: Virtual address of filter/route table content
* @interrupt: IPA Interrupt information * @interrupt: IPA Interrupt information
...@@ -70,6 +80,7 @@ struct ipa_interrupt; ...@@ -70,6 +80,7 @@ struct ipa_interrupt;
*/ */
struct ipa { struct ipa {
struct gsi gsi; struct gsi gsi;
DECLARE_BITMAP(flags, IPA_FLAG_COUNT);
enum ipa_version version; enum ipa_version version;
struct platform_device *pdev; struct platform_device *pdev;
struct rproc *modem_rproc; struct rproc *modem_rproc;
...@@ -77,7 +88,6 @@ struct ipa { ...@@ -77,7 +88,6 @@ struct ipa {
void *notifier; void *notifier;
struct ipa_smp2p *smp2p; struct ipa_smp2p *smp2p;
struct ipa_clock *clock; struct ipa_clock *clock;
atomic_t suspend_ref;
dma_addr_t table_addr; dma_addr_t table_addr;
__le64 *table_virt; __le64 *table_virt;
...@@ -104,8 +114,6 @@ struct ipa { ...@@ -104,8 +114,6 @@ struct ipa {
void *zero_virt; void *zero_virt;
size_t zero_size; size_t zero_size;
struct wakeup_source *wakeup_source;
/* Bit masks indicating endpoint state */ /* Bit masks indicating endpoint state */
u32 available; /* supported by hardware */ u32 available; /* supported by hardware */
u32 filter_map; u32 filter_map;
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
* Copyright (C) 2018-2020 Linaro Ltd. * Copyright (C) 2018-2020 Linaro Ltd.
*/ */
#include <linux/atomic.h> #include <linux/refcount.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/device.h> #include <linux/device.h>
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
* @config_path: Configuration space interconnect * @config_path: Configuration space interconnect
*/ */
struct ipa_clock { struct ipa_clock {
atomic_t count; refcount_t count;
struct mutex mutex; /* protects clock enable/disable */ struct mutex mutex; /* protects clock enable/disable */
struct clk *core; struct clk *core;
struct icc_path *memory_path; struct icc_path *memory_path;
...@@ -195,14 +195,13 @@ static void ipa_clock_disable(struct ipa *ipa) ...@@ -195,14 +195,13 @@ static void ipa_clock_disable(struct ipa *ipa)
*/ */
bool ipa_clock_get_additional(struct ipa *ipa) bool ipa_clock_get_additional(struct ipa *ipa)
{ {
return !!atomic_inc_not_zero(&ipa->clock->count); return refcount_inc_not_zero(&ipa->clock->count);
} }
/* Get an IPA clock reference. If the reference count is non-zero, it is /* Get an IPA clock reference. If the reference count is non-zero, it is
* incremented and return is immediate. Otherwise it is checked again * incremented and return is immediate. Otherwise it is checked again
* under protection of the mutex, and if appropriate the clock (and * under protection of the mutex, and if appropriate the IPA clock
* interconnects) are enabled suspended endpoints (if any) are resumed * is enabled.
* before returning.
* *
* Incrementing the reference count is intentionally deferred until * Incrementing the reference count is intentionally deferred until
* after the clock is running and endpoints are resumed. * after the clock is running and endpoints are resumed.
...@@ -229,28 +228,23 @@ void ipa_clock_get(struct ipa *ipa) ...@@ -229,28 +228,23 @@ void ipa_clock_get(struct ipa *ipa)
goto out_mutex_unlock; goto out_mutex_unlock;
} }
ipa_endpoint_resume(ipa); refcount_set(&clock->count, 1);
atomic_inc(&clock->count);
out_mutex_unlock: out_mutex_unlock:
mutex_unlock(&clock->mutex); mutex_unlock(&clock->mutex);
} }
/* Attempt to remove an IPA clock reference. If this represents the last /* Attempt to remove an IPA clock reference. If this represents the
* reference, suspend endpoints and disable the clock (and interconnects) * last reference, disable the IPA clock under protection of the mutex.
* under protection of a mutex.
*/ */
void ipa_clock_put(struct ipa *ipa) void ipa_clock_put(struct ipa *ipa)
{ {
struct ipa_clock *clock = ipa->clock; struct ipa_clock *clock = ipa->clock;
/* If this is not the last reference there's nothing more to do */ /* If this is not the last reference there's nothing more to do */
if (!atomic_dec_and_mutex_lock(&clock->count, &clock->mutex)) if (!refcount_dec_and_mutex_lock(&clock->count, &clock->mutex))
return; return;
ipa_endpoint_suspend(ipa);
ipa_clock_disable(ipa); ipa_clock_disable(ipa);
mutex_unlock(&clock->mutex); mutex_unlock(&clock->mutex);
...@@ -294,7 +288,7 @@ struct ipa_clock *ipa_clock_init(struct device *dev) ...@@ -294,7 +288,7 @@ struct ipa_clock *ipa_clock_init(struct device *dev)
goto err_kfree; goto err_kfree;
mutex_init(&clock->mutex); mutex_init(&clock->mutex);
atomic_set(&clock->count, 0); refcount_set(&clock->count, 0);
return clock; return clock;
...@@ -311,7 +305,7 @@ void ipa_clock_exit(struct ipa_clock *clock) ...@@ -311,7 +305,7 @@ void ipa_clock_exit(struct ipa_clock *clock)
{ {
struct clk *clk = clock->core; struct clk *clk = clock->core;
WARN_ON(atomic_read(&clock->count) != 0); WARN_ON(refcount_read(&clock->count) != 0);
mutex_destroy(&clock->mutex); mutex_destroy(&clock->mutex);
ipa_interconnect_exit(clock); ipa_interconnect_exit(clock);
kfree(clock); kfree(clock);
......
...@@ -237,8 +237,16 @@ struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa) ...@@ -237,8 +237,16 @@ struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa)
goto err_kfree; goto err_kfree;
} }
ret = enable_irq_wake(irq);
if (ret) {
dev_err(dev, "error %d enabling wakeup for \"ipa\" IRQ\n", ret);
goto err_free_irq;
}
return interrupt; return interrupt;
err_free_irq:
free_irq(interrupt->irq, interrupt);
err_kfree: err_kfree:
kfree(interrupt); kfree(interrupt);
...@@ -248,6 +256,12 @@ struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa) ...@@ -248,6 +256,12 @@ struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa)
/* Tear down the IPA interrupt framework */ /* Tear down the IPA interrupt framework */
void ipa_interrupt_teardown(struct ipa_interrupt *interrupt) void ipa_interrupt_teardown(struct ipa_interrupt *interrupt)
{ {
struct device *dev = &interrupt->ipa->pdev->dev;
int ret;
ret = disable_irq_wake(interrupt->irq);
if (ret)
dev_err(dev, "error %d disabling \"ipa\" IRQ wakeup\n", ret);
free_irq(interrupt->irq, interrupt); free_irq(interrupt->irq, interrupt);
kfree(interrupt); kfree(interrupt);
} }
...@@ -75,17 +75,19 @@ ...@@ -75,17 +75,19 @@
* @ipa: IPA pointer * @ipa: IPA pointer
* @irq_id: IPA interrupt type (unused) * @irq_id: IPA interrupt type (unused)
* *
* When in suspended state, the IPA can trigger a resume by sending a SUSPEND * If an RX endpoint is in suspend state, and the IPA has a packet
* IPA interrupt. * destined for that endpoint, the IPA generates a SUSPEND interrupt
* to inform the AP that it should resume the endpoint. If we get
* one of these interrupts we just resume everything.
*/ */
static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id) static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
{ {
/* Take a a single clock reference to prevent suspend. All /* Just report the event, and let system resume handle the rest.
* endpoints will be resumed as a result. This reference will * More than one endpoint could signal this; if so, ignore
* be dropped when we get a power management suspend request. * all but the first.
*/ */
if (!atomic_xchg(&ipa->suspend_ref, 1)) if (!test_and_set_bit(IPA_FLAG_RESUMED, ipa->flags))
ipa_clock_get(ipa); pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
/* Acknowledge/clear the suspend interrupt on all endpoints */ /* Acknowledge/clear the suspend interrupt on all endpoints */
ipa_interrupt_suspend_clear_all(ipa->interrupt); ipa_interrupt_suspend_clear_all(ipa->interrupt);
...@@ -106,6 +108,7 @@ int ipa_setup(struct ipa *ipa) ...@@ -106,6 +108,7 @@ int ipa_setup(struct ipa *ipa)
{ {
struct ipa_endpoint *exception_endpoint; struct ipa_endpoint *exception_endpoint;
struct ipa_endpoint *command_endpoint; struct ipa_endpoint *command_endpoint;
struct device *dev = &ipa->pdev->dev;
int ret; int ret;
/* Setup for IPA v3.5.1 has some slight differences */ /* Setup for IPA v3.5.1 has some slight differences */
...@@ -123,6 +126,10 @@ int ipa_setup(struct ipa *ipa) ...@@ -123,6 +126,10 @@ int ipa_setup(struct ipa *ipa)
ipa_uc_setup(ipa); ipa_uc_setup(ipa);
ret = device_init_wakeup(dev, true);
if (ret)
goto err_uc_teardown;
ipa_endpoint_setup(ipa); ipa_endpoint_setup(ipa);
/* We need to use the AP command TX endpoint to perform other /* We need to use the AP command TX endpoint to perform other
...@@ -158,7 +165,7 @@ int ipa_setup(struct ipa *ipa) ...@@ -158,7 +165,7 @@ int ipa_setup(struct ipa *ipa)
ipa->setup_complete = true; ipa->setup_complete = true;
dev_info(&ipa->pdev->dev, "IPA driver setup completed successfully\n"); dev_info(dev, "IPA driver setup completed successfully\n");
return 0; return 0;
...@@ -173,6 +180,8 @@ int ipa_setup(struct ipa *ipa) ...@@ -173,6 +180,8 @@ int ipa_setup(struct ipa *ipa)
ipa_endpoint_disable_one(command_endpoint); ipa_endpoint_disable_one(command_endpoint);
err_endpoint_teardown: err_endpoint_teardown:
ipa_endpoint_teardown(ipa); ipa_endpoint_teardown(ipa);
(void)device_init_wakeup(dev, false);
err_uc_teardown:
ipa_uc_teardown(ipa); ipa_uc_teardown(ipa);
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
ipa_interrupt_teardown(ipa->interrupt); ipa_interrupt_teardown(ipa->interrupt);
...@@ -200,6 +209,7 @@ static void ipa_teardown(struct ipa *ipa) ...@@ -200,6 +209,7 @@ static void ipa_teardown(struct ipa *ipa)
command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]; command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
ipa_endpoint_disable_one(command_endpoint); ipa_endpoint_disable_one(command_endpoint);
ipa_endpoint_teardown(ipa); ipa_endpoint_teardown(ipa);
(void)device_init_wakeup(&ipa->pdev->dev, false);
ipa_uc_teardown(ipa); ipa_uc_teardown(ipa);
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
ipa_interrupt_teardown(ipa->interrupt); ipa_interrupt_teardown(ipa->interrupt);
...@@ -508,7 +518,6 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data) ...@@ -508,7 +518,6 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
* is held after initialization completes, and won't get dropped * is held after initialization completes, and won't get dropped
* unless/until a system suspend request arrives. * unless/until a system suspend request arrives.
*/ */
atomic_set(&ipa->suspend_ref, 1);
ipa_clock_get(ipa); ipa_clock_get(ipa);
ipa_hardware_config(ipa); ipa_hardware_config(ipa);
...@@ -544,7 +553,6 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data) ...@@ -544,7 +553,6 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
err_hardware_deconfig: err_hardware_deconfig:
ipa_hardware_deconfig(ipa); ipa_hardware_deconfig(ipa);
ipa_clock_put(ipa); ipa_clock_put(ipa);
atomic_set(&ipa->suspend_ref, 0);
return ret; return ret;
} }
...@@ -562,7 +570,6 @@ static void ipa_deconfig(struct ipa *ipa) ...@@ -562,7 +570,6 @@ static void ipa_deconfig(struct ipa *ipa)
ipa_endpoint_deconfig(ipa); ipa_endpoint_deconfig(ipa);
ipa_hardware_deconfig(ipa); ipa_hardware_deconfig(ipa);
ipa_clock_put(ipa); ipa_clock_put(ipa);
atomic_set(&ipa->suspend_ref, 0);
} }
static int ipa_firmware_load(struct device *dev) static int ipa_firmware_load(struct device *dev)
...@@ -709,7 +716,6 @@ static void ipa_validate_build(void) ...@@ -709,7 +716,6 @@ static void ipa_validate_build(void)
*/ */
static int ipa_probe(struct platform_device *pdev) static int ipa_probe(struct platform_device *pdev)
{ {
struct wakeup_source *wakeup_source;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
const struct ipa_data *data; const struct ipa_data *data;
struct ipa_clock *clock; struct ipa_clock *clock;
...@@ -758,27 +764,17 @@ static int ipa_probe(struct platform_device *pdev) ...@@ -758,27 +764,17 @@ static int ipa_probe(struct platform_device *pdev)
goto err_clock_exit; goto err_clock_exit;
} }
/* Create a wakeup source. */
wakeup_source = wakeup_source_register(dev, "ipa");
if (!wakeup_source) {
/* The most likely reason for failure is memory exhaustion */
ret = -ENOMEM;
goto err_clock_exit;
}
/* Allocate and initialize the IPA structure */ /* Allocate and initialize the IPA structure */
ipa = kzalloc(sizeof(*ipa), GFP_KERNEL); ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
if (!ipa) { if (!ipa) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_wakeup_source_unregister; goto err_clock_exit;
} }
ipa->pdev = pdev; ipa->pdev = pdev;
dev_set_drvdata(dev, ipa); dev_set_drvdata(dev, ipa);
ipa->modem_rproc = rproc; ipa->modem_rproc = rproc;
ipa->clock = clock; ipa->clock = clock;
atomic_set(&ipa->suspend_ref, 0);
ipa->wakeup_source = wakeup_source;
ipa->version = data->version; ipa->version = data->version;
ret = ipa_reg_init(ipa); ret = ipa_reg_init(ipa);
...@@ -857,8 +853,6 @@ static int ipa_probe(struct platform_device *pdev) ...@@ -857,8 +853,6 @@ static int ipa_probe(struct platform_device *pdev)
ipa_reg_exit(ipa); ipa_reg_exit(ipa);
err_kfree_ipa: err_kfree_ipa:
kfree(ipa); kfree(ipa);
err_wakeup_source_unregister:
wakeup_source_unregister(wakeup_source);
err_clock_exit: err_clock_exit:
ipa_clock_exit(clock); ipa_clock_exit(clock);
err_rproc_put: err_rproc_put:
...@@ -872,11 +866,8 @@ static int ipa_remove(struct platform_device *pdev) ...@@ -872,11 +866,8 @@ static int ipa_remove(struct platform_device *pdev)
struct ipa *ipa = dev_get_drvdata(&pdev->dev); struct ipa *ipa = dev_get_drvdata(&pdev->dev);
struct rproc *rproc = ipa->modem_rproc; struct rproc *rproc = ipa->modem_rproc;
struct ipa_clock *clock = ipa->clock; struct ipa_clock *clock = ipa->clock;
struct wakeup_source *wakeup_source;
int ret; int ret;
wakeup_source = ipa->wakeup_source;
if (ipa->setup_complete) { if (ipa->setup_complete) {
ret = ipa_modem_stop(ipa); ret = ipa_modem_stop(ipa);
if (ret) if (ret)
...@@ -893,7 +884,6 @@ static int ipa_remove(struct platform_device *pdev) ...@@ -893,7 +884,6 @@ static int ipa_remove(struct platform_device *pdev)
ipa_mem_exit(ipa); ipa_mem_exit(ipa);
ipa_reg_exit(ipa); ipa_reg_exit(ipa);
kfree(ipa); kfree(ipa);
wakeup_source_unregister(wakeup_source);
ipa_clock_exit(clock); ipa_clock_exit(clock);
rproc_put(rproc); rproc_put(rproc);
...@@ -907,13 +897,22 @@ static int ipa_remove(struct platform_device *pdev) ...@@ -907,13 +897,22 @@ static int ipa_remove(struct platform_device *pdev)
* Return: Always returns zero * Return: Always returns zero
* *
* Called by the PM framework when a system suspend operation is invoked. * Called by the PM framework when a system suspend operation is invoked.
* Suspends endpoints and releases the clock reference held to keep
* the IPA clock running until this point.
*/ */
static int ipa_suspend(struct device *dev) static int ipa_suspend(struct device *dev)
{ {
struct ipa *ipa = dev_get_drvdata(dev); struct ipa *ipa = dev_get_drvdata(dev);
/* When a suspended RX endpoint has a packet ready to receive, we
* get an IPA SUSPEND interrupt. We trigger a system resume in
* that case, but only on the first such interrupt since suspend.
*/
__clear_bit(IPA_FLAG_RESUMED, ipa->flags);
ipa_endpoint_suspend(ipa);
ipa_clock_put(ipa); ipa_clock_put(ipa);
atomic_set(&ipa->suspend_ref, 0);
return 0; return 0;
} }
...@@ -925,6 +924,8 @@ static int ipa_suspend(struct device *dev) ...@@ -925,6 +924,8 @@ static int ipa_suspend(struct device *dev)
* Return: Always returns 0 * Return: Always returns 0
* *
* Called by the PM framework when a system resume operation is invoked. * Called by the PM framework when a system resume operation is invoked.
* Takes an IPA clock reference to keep the clock running until suspend,
* and resumes endpoints.
*/ */
static int ipa_resume(struct device *dev) static int ipa_resume(struct device *dev)
{ {
...@@ -933,9 +934,10 @@ static int ipa_resume(struct device *dev) ...@@ -933,9 +934,10 @@ static int ipa_resume(struct device *dev)
/* This clock reference will keep the IPA out of suspend /* This clock reference will keep the IPA out of suspend
* until we get a power management suspend request. * until we get a power management suspend request.
*/ */
atomic_set(&ipa->suspend_ref, 1);
ipa_clock_get(ipa); ipa_clock_get(ipa);
ipa_endpoint_resume(ipa);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment