Commit f86c24f4 authored by Boris Brezillon's avatar Boris Brezillon Committed by Mark Brown

spi: spi-mem: Split spi_mem_exec_op() code

The logic surrounding the ->exec_op() call applies to direct mapping
accessors. Move this code to separate functions to avoid duplicating
code.
Signed-off-by: default avatarBoris Brezillon <boris.brezillon@bootlin.com>
Reviewed-by: default avatarMiquel Raynal <miquel.raynal@bootlin.com>
Signed-off-by: default avatarMark Brown <broonie@kernel.org>
parent 0ebb261a
...@@ -213,6 +213,44 @@ bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) ...@@ -213,6 +213,44 @@ bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
} }
EXPORT_SYMBOL_GPL(spi_mem_supports_op); EXPORT_SYMBOL_GPL(spi_mem_supports_op);
static int spi_mem_access_start(struct spi_mem *mem)
{
struct spi_controller *ctlr = mem->spi->controller;
/*
* Flush the message queue before executing our SPI memory
* operation to prevent preemption of regular SPI transfers.
*/
spi_flush_queue(ctlr);
if (ctlr->auto_runtime_pm) {
int ret;
ret = pm_runtime_get_sync(ctlr->dev.parent);
if (ret < 0) {
dev_err(&ctlr->dev, "Failed to power device: %d\n",
ret);
return ret;
}
}
mutex_lock(&ctlr->bus_lock_mutex);
mutex_lock(&ctlr->io_mutex);
return 0;
}
static void spi_mem_access_end(struct spi_mem *mem)
{
struct spi_controller *ctlr = mem->spi->controller;
mutex_unlock(&ctlr->io_mutex);
mutex_unlock(&ctlr->bus_lock_mutex);
if (ctlr->auto_runtime_pm)
pm_runtime_put(ctlr->dev.parent);
}
/** /**
* spi_mem_exec_op() - Execute a memory operation * spi_mem_exec_op() - Execute a memory operation
* @mem: the SPI memory * @mem: the SPI memory
...@@ -242,30 +280,13 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) ...@@ -242,30 +280,13 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
return -ENOTSUPP; return -ENOTSUPP;
if (ctlr->mem_ops) { if (ctlr->mem_ops) {
/* ret = spi_mem_access_start(mem);
* Flush the message queue before executing our SPI memory if (ret)
* operation to prevent preemption of regular SPI transfers. return ret;
*/
spi_flush_queue(ctlr);
if (ctlr->auto_runtime_pm) {
ret = pm_runtime_get_sync(ctlr->dev.parent);
if (ret < 0) {
dev_err(&ctlr->dev,
"Failed to power device: %d\n",
ret);
return ret;
}
}
mutex_lock(&ctlr->bus_lock_mutex);
mutex_lock(&ctlr->io_mutex);
ret = ctlr->mem_ops->exec_op(mem, op); ret = ctlr->mem_ops->exec_op(mem, op);
mutex_unlock(&ctlr->io_mutex);
mutex_unlock(&ctlr->bus_lock_mutex);
if (ctlr->auto_runtime_pm) spi_mem_access_end(mem);
pm_runtime_put(ctlr->dev.parent);
/* /*
* Some controllers only optimize specific paths (typically the * Some controllers only optimize specific paths (typically the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment