Commit 9aa51408 authored by Seungwon Jeon's avatar Seungwon Jeon Committed by Chris Ball

mmc: dw_mmc: Add support for pre_req and post_req

This patch implements pre_req and post_req in dw_mmc to support
asynchronous mmc request.
Signed-off-by: default avatarSeungwon Jeon <tgih.jun@samsung.com>
Acked-by: default avatarJaehoon Chung <jh80.chung@samsung.com>
Acked-by: default avatarWill Newton <will.newton@imgtec.com>
Signed-off-by: default avatarChris Ball <cjb@laptop.org>
parent 3e44a1a7
...@@ -296,14 +296,24 @@ static void dw_mci_stop_dma(struct dw_mci *host) ...@@ -296,14 +296,24 @@ static void dw_mci_stop_dma(struct dw_mci *host)
} }
#ifdef CONFIG_MMC_DW_IDMAC #ifdef CONFIG_MMC_DW_IDMAC
static int dw_mci_get_dma_dir(struct mmc_data *data)
{
if (data->flags & MMC_DATA_WRITE)
return DMA_TO_DEVICE;
else
return DMA_FROM_DEVICE;
}
static void dw_mci_dma_cleanup(struct dw_mci *host) static void dw_mci_dma_cleanup(struct dw_mci *host)
{ {
struct mmc_data *data = host->data; struct mmc_data *data = host->data;
if (data) if (data)
dma_unmap_sg(&host->dev, data->sg, data->sg_len, if (!data->host_cookie)
((data->flags & MMC_DATA_WRITE) dma_unmap_sg(&host->dev,
? DMA_TO_DEVICE : DMA_FROM_DEVICE)); data->sg,
data->sg_len,
dw_mci_get_dma_dir(data));
} }
static void dw_mci_idmac_stop_dma(struct dw_mci *host) static void dw_mci_idmac_stop_dma(struct dw_mci *host)
...@@ -419,26 +429,15 @@ static int dw_mci_idmac_init(struct dw_mci *host) ...@@ -419,26 +429,15 @@ static int dw_mci_idmac_init(struct dw_mci *host)
return 0; return 0;
} }
static struct dw_mci_dma_ops dw_mci_idmac_ops = { static int dw_mci_pre_dma_transfer(struct dw_mci *host,
.init = dw_mci_idmac_init, struct mmc_data *data,
.start = dw_mci_idmac_start_dma, bool next)
.stop = dw_mci_idmac_stop_dma,
.complete = dw_mci_idmac_complete_dma,
.cleanup = dw_mci_dma_cleanup,
};
#endif /* CONFIG_MMC_DW_IDMAC */
static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
{ {
struct scatterlist *sg; struct scatterlist *sg;
unsigned int i, direction, sg_len; unsigned int i, sg_len;
u32 temp;
host->using_dma = 0;
/* If we don't have a channel, we can't do DMA */ if (!next && data->host_cookie)
if (!host->use_dma) return data->host_cookie;
return -ENODEV;
/* /*
* We don't do DMA on "complex" transfers, i.e. with * We don't do DMA on "complex" transfers, i.e. with
...@@ -447,6 +446,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) ...@@ -447,6 +446,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
*/ */
if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
return -EINVAL; return -EINVAL;
if (data->blksz & 3) if (data->blksz & 3)
return -EINVAL; return -EINVAL;
...@@ -455,15 +455,88 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) ...@@ -455,15 +455,88 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
return -EINVAL; return -EINVAL;
} }
host->using_dma = 1; sg_len = dma_map_sg(&host->dev,
data->sg,
data->sg_len,
dw_mci_get_dma_dir(data));
if (sg_len == 0)
return -EINVAL;
if (data->flags & MMC_DATA_READ) if (next)
direction = DMA_FROM_DEVICE; data->host_cookie = sg_len;
else
direction = DMA_TO_DEVICE; return sg_len;
}
static struct dw_mci_dma_ops dw_mci_idmac_ops = {
.init = dw_mci_idmac_init,
.start = dw_mci_idmac_start_dma,
.stop = dw_mci_idmac_stop_dma,
.complete = dw_mci_idmac_complete_dma,
.cleanup = dw_mci_dma_cleanup,
};
#else
static int dw_mci_pre_dma_transfer(struct dw_mci *host,
struct mmc_data *data,
bool next)
{
return -ENOSYS;
}
#endif /* CONFIG_MMC_DW_IDMAC */
static void dw_mci_pre_req(struct mmc_host *mmc,
struct mmc_request *mrq,
bool is_first_req)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
if (!slot->host->use_dma || !data)
return;
if (data->host_cookie) {
data->host_cookie = 0;
return;
}
if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
data->host_cookie = 0;
}
static void dw_mci_post_req(struct mmc_host *mmc,
struct mmc_request *mrq,
int err)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
if (!slot->host->use_dma || !data)
return;
if (data->host_cookie)
dma_unmap_sg(&slot->host->dev,
data->sg,
data->sg_len,
dw_mci_get_dma_dir(data));
data->host_cookie = 0;
}
static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
{
int sg_len;
u32 temp;
sg_len = dma_map_sg(&host->dev, data->sg, data->sg_len, host->using_dma = 0;
direction);
/* If we don't have a channel, we can't do DMA */
if (!host->use_dma)
return -ENODEV;
sg_len = dw_mci_pre_dma_transfer(host, data, 0);
if (sg_len < 0)
return sg_len;
host->using_dma = 1;
dev_vdbg(&host->dev, dev_vdbg(&host->dev,
"sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
...@@ -800,6 +873,8 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) ...@@ -800,6 +873,8 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
static const struct mmc_host_ops dw_mci_ops = { static const struct mmc_host_ops dw_mci_ops = {
.request = dw_mci_request, .request = dw_mci_request,
.pre_req = dw_mci_pre_req,
.post_req = dw_mci_post_req,
.set_ios = dw_mci_set_ios, .set_ios = dw_mci_set_ios,
.get_ro = dw_mci_get_ro, .get_ro = dw_mci_get_ro,
.get_cd = dw_mci_get_cd, .get_cd = dw_mci_get_cd,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment