Commit 21243314 authored by Douglas Anderson's avatar Douglas Anderson Committed by Bjorn Andersson

spi: spi-qcom-qspi: Avoid clock setting if not needed

As per recent changes to the spi-qcom-qspi, now when we set the clock
we'll call into the interconnect framework and also call the OPP API.
Those are expensive operations.  Let's avoid calling them if possible.
This has a big impact on getting transfer rates back up to where they
were (or maybe slightly better) before those patches landed.

Fixes: cff80645 ("spi: spi-qcom-qspi: Add interconnect support")
Signed-off-by: default avatarDouglas Anderson <dianders@chromium.org>
Acked-by: default avatarMark Brown <broonie@kernel.org>
Reviewed-by: default avatarRajendra Nayak <rnayak@codeaurora.org>
Tested-by: default avatarRajendra Nayak <rnayak@codeaurora.org>
Reviewed-by: default avatarMukesh Kumar Savaliya <msavaliy@codeaurora.org>
Reviewed-by: default avatarAkash Asthana <akashast@codeaurora.org>
Link: https://lore.kernel.org/r/20200709075113.v2.1.Ia7cb4f41ce93d37d0a764b47c8a453ce9e9c70ef@changeidSigned-off-by: default avatarBjorn Andersson <bjorn.andersson@linaro.org>
parent f79a158d
...@@ -144,6 +144,7 @@ struct qcom_qspi { ...@@ -144,6 +144,7 @@ struct qcom_qspi {
struct icc_path *icc_path_cpu_to_qspi; struct icc_path *icc_path_cpu_to_qspi;
struct opp_table *opp_table; struct opp_table *opp_table;
bool has_opp_table; bool has_opp_table;
unsigned long last_speed;
/* Lock to protect data accessed by IRQs */ /* Lock to protect data accessed by IRQs */
spinlock_t lock; spinlock_t lock;
}; };
...@@ -226,19 +227,13 @@ static void qcom_qspi_handle_err(struct spi_master *master, ...@@ -226,19 +227,13 @@ static void qcom_qspi_handle_err(struct spi_master *master,
spin_unlock_irqrestore(&ctrl->lock, flags); spin_unlock_irqrestore(&ctrl->lock, flags);
} }
static int qcom_qspi_transfer_one(struct spi_master *master, static int qcom_qspi_set_speed(struct qcom_qspi *ctrl, unsigned long speed_hz)
struct spi_device *slv,
struct spi_transfer *xfer)
{ {
struct qcom_qspi *ctrl = spi_master_get_devdata(master);
int ret; int ret;
unsigned long speed_hz;
unsigned long flags;
unsigned int avg_bw_cpu; unsigned int avg_bw_cpu;
speed_hz = slv->max_speed_hz; if (speed_hz == ctrl->last_speed)
if (xfer->speed_hz) return 0;
speed_hz = xfer->speed_hz;
/* In regular operation (SBL_EN=1) core must be 4x transfer clock */ /* In regular operation (SBL_EN=1) core must be 4x transfer clock */
ret = dev_pm_opp_set_rate(ctrl->dev, speed_hz * 4); ret = dev_pm_opp_set_rate(ctrl->dev, speed_hz * 4);
...@@ -259,6 +254,28 @@ static int qcom_qspi_transfer_one(struct spi_master *master, ...@@ -259,6 +254,28 @@ static int qcom_qspi_transfer_one(struct spi_master *master,
return ret; return ret;
} }
ctrl->last_speed = speed_hz;
return 0;
}
static int qcom_qspi_transfer_one(struct spi_master *master,
struct spi_device *slv,
struct spi_transfer *xfer)
{
struct qcom_qspi *ctrl = spi_master_get_devdata(master);
int ret;
unsigned long speed_hz;
unsigned long flags;
speed_hz = slv->max_speed_hz;
if (xfer->speed_hz)
speed_hz = xfer->speed_hz;
ret = qcom_qspi_set_speed(ctrl, speed_hz);
if (ret)
return ret;
spin_lock_irqsave(&ctrl->lock, flags); spin_lock_irqsave(&ctrl->lock, flags);
/* We are half duplex, so either rx or tx will be set */ /* We are half duplex, so either rx or tx will be set */
...@@ -602,7 +619,11 @@ static int __maybe_unused qcom_qspi_runtime_resume(struct device *dev) ...@@ -602,7 +619,11 @@ static int __maybe_unused qcom_qspi_runtime_resume(struct device *dev)
return ret; return ret;
} }
return clk_bulk_prepare_enable(QSPI_NUM_CLKS, ctrl->clks); ret = clk_bulk_prepare_enable(QSPI_NUM_CLKS, ctrl->clks);
if (ret)
return ret;
return dev_pm_opp_set_rate(dev, ctrl->last_speed * 4);
} }
static int __maybe_unused qcom_qspi_suspend(struct device *dev) static int __maybe_unused qcom_qspi_suspend(struct device *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment