Commit edb2877f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc: (39 commits)
  mmc: davinci: add support for SDIO irq handling
  mmc: fix division by zero in MMC core
  mmc: tmio_mmc: fix CMD irq handling
  mmc: tmio_mmc: handle missing HW interrupts
  mfd: sh_mobile_sdhi: activate SDIO IRQ for tmio_mmc
  mmc: tmio_mmc: implement SDIO IRQ support
  mfd: sdhi: require the tmio-mmc driver to bounce unaligned buffers
  mmc: tmio_mmc: silence compiler warnings
  mmc: tmio_mmc: implement a bounce buffer for unaligned DMA
  mmc: tmio_mmc: merge the private header into the driver
  mmc: tmio_mmc: fix PIO fallback on DMA descriptor allocation failure
  mmc: tmio_mmc: allow multi-element scatter-gather lists
  mmc: Register debugfs dir before calling card probe function.
  mmc: MMC_BLOCK_MINORS should depend on MMC_BLOCK.
  mmc: Explain why we make adjacent mmc_bus_{put,get} calls during rescan.
  mmc: Fix sd/sdio/mmc initialization frequency retries
  mmc: fix mmc_set_bus_width_ddr() call without bus-width-test cap
  mmc: dw_mmc: Add Synopsys DesignWare mmc host driver.
  mmc: add sdhci-tegra driver for Tegra SoCs
  mmc: sdhci: add quirk for max len ADMA descriptors
  ...
parents 5b2eef96 f9db92cb
...@@ -770,7 +770,7 @@ static struct resource dove_sdio0_resources[] = { ...@@ -770,7 +770,7 @@ static struct resource dove_sdio0_resources[] = {
}; };
static struct platform_device dove_sdio0 = { static struct platform_device dove_sdio0 = {
.name = "sdhci-mv", .name = "sdhci-dove",
.id = 0, .id = 0,
.dev = { .dev = {
.dma_mask = &sdio_dmamask, .dma_mask = &sdio_dmamask,
...@@ -798,7 +798,7 @@ static struct resource dove_sdio1_resources[] = { ...@@ -798,7 +798,7 @@ static struct resource dove_sdio1_resources[] = {
}; };
static struct platform_device dove_sdio1 = { static struct platform_device dove_sdio1 = {
.name = "sdhci-mv", .name = "sdhci-dove",
.id = 1, .id = 1,
.dev = { .dev = {
.dma_mask = &sdio_dmamask, .dma_mask = &sdio_dmamask,
......
/*
* include/asm-arm/arch-tegra/include/mach/sdhci.h
*
* Copyright (C) 2009 Palm, Inc.
* Author: Yvonne Yip <y@palm.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __ASM_ARM_ARCH_TEGRA_SDHCI_H
#define __ASM_ARM_ARCH_TEGRA_SDHCI_H
#include <linux/mmc/host.h>
struct tegra_sdhci_platform_data {
int cd_gpio;
int wp_gpio;
int power_gpio;
int is_8bit;
};
#endif
...@@ -131,11 +131,17 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) ...@@ -131,11 +131,17 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
*/ */
mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES; mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES;
/*
* All SDHI blocks support SDIO IRQ signalling.
*/
mmc_data->flags |= TMIO_MMC_SDIO_IRQ;
if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) { if (p && p->dma_slave_tx >= 0 && p->dma_slave_rx >= 0) {
priv->param_tx.slave_id = p->dma_slave_tx; priv->param_tx.slave_id = p->dma_slave_tx;
priv->param_rx.slave_id = p->dma_slave_rx; priv->param_rx.slave_id = p->dma_slave_rx;
priv->dma_priv.chan_priv_tx = &priv->param_tx; priv->dma_priv.chan_priv_tx = &priv->param_tx;
priv->dma_priv.chan_priv_rx = &priv->param_rx; priv->dma_priv.chan_priv_rx = &priv->param_rx;
priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */
mmc_data->dma = &priv->dma_priv; mmc_data->dma = &priv->dma_priv;
} }
......
...@@ -16,6 +16,7 @@ config MMC_BLOCK ...@@ -16,6 +16,7 @@ config MMC_BLOCK
config MMC_BLOCK_MINORS config MMC_BLOCK_MINORS
int "Number of minors per block device" int "Number of minors per block device"
depends on MMC_BLOCK
range 4 256 range 4 256
default 8 default 8
help help
......
...@@ -16,3 +16,14 @@ config MMC_UNSAFE_RESUME ...@@ -16,3 +16,14 @@ config MMC_UNSAFE_RESUME
This option sets a default which can be overridden by the This option sets a default which can be overridden by the
module parameter "removable=0" or "removable=1". module parameter "removable=0" or "removable=1".
config MMC_CLKGATE
bool "MMC host clock gating (EXPERIMENTAL)"
depends on EXPERIMENTAL
help
This will attempt to aggressively gate the clock to the MMC card.
This is done to save power due to gating off the logic and bus
noise when the MMC card is not in use. Your host driver has to
support handling this in order for it to be of any use.
If unsure, say N.
...@@ -303,14 +303,14 @@ int mmc_add_card(struct mmc_card *card) ...@@ -303,14 +303,14 @@ int mmc_add_card(struct mmc_card *card)
type, card->rca); type, card->rca);
} }
ret = device_add(&card->dev);
if (ret)
return ret;
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
mmc_add_card_debugfs(card); mmc_add_card_debugfs(card);
#endif #endif
ret = device_add(&card->dev);
if (ret)
return ret;
mmc_card_set_present(card); mmc_card_set_present(card);
return 0; return 0;
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/regulator/consumer.h> #include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/mmc/card.h> #include <linux/mmc/card.h>
#include <linux/mmc/host.h> #include <linux/mmc/host.h>
...@@ -130,6 +131,8 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq) ...@@ -130,6 +131,8 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
if (mrq->done) if (mrq->done)
mrq->done(mrq); mrq->done(mrq);
mmc_host_clk_gate(host);
} }
} }
...@@ -190,6 +193,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) ...@@ -190,6 +193,7 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
mrq->stop->mrq = mrq; mrq->stop->mrq = mrq;
} }
} }
mmc_host_clk_ungate(host);
host->ops->request(host, mrq); host->ops->request(host, mrq);
} }
...@@ -295,8 +299,9 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) ...@@ -295,8 +299,9 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
unsigned int timeout_us, limit_us; unsigned int timeout_us, limit_us;
timeout_us = data->timeout_ns / 1000; timeout_us = data->timeout_ns / 1000;
if (mmc_host_clk_rate(card->host))
timeout_us += data->timeout_clks * 1000 / timeout_us += data->timeout_clks * 1000 /
(card->host->ios.clock / 1000); (mmc_host_clk_rate(card->host) / 1000);
if (data->flags & MMC_DATA_WRITE) if (data->flags & MMC_DATA_WRITE)
/* /*
...@@ -614,6 +619,8 @@ static inline void mmc_set_ios(struct mmc_host *host) ...@@ -614,6 +619,8 @@ static inline void mmc_set_ios(struct mmc_host *host)
ios->power_mode, ios->chip_select, ios->vdd, ios->power_mode, ios->chip_select, ios->vdd,
ios->bus_width, ios->timing); ios->bus_width, ios->timing);
if (ios->clock > 0)
mmc_set_ungated(host);
host->ops->set_ios(host, ios); host->ops->set_ios(host, ios);
} }
...@@ -641,6 +648,61 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz) ...@@ -641,6 +648,61 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
mmc_set_ios(host); mmc_set_ios(host);
} }
#ifdef CONFIG_MMC_CLKGATE
/*
* This gates the clock by setting it to 0 Hz.
*/
void mmc_gate_clock(struct mmc_host *host)
{
unsigned long flags;
spin_lock_irqsave(&host->clk_lock, flags);
host->clk_old = host->ios.clock;
host->ios.clock = 0;
host->clk_gated = true;
spin_unlock_irqrestore(&host->clk_lock, flags);
mmc_set_ios(host);
}
/*
* This restores the clock from gating by using the cached
* clock value.
*/
void mmc_ungate_clock(struct mmc_host *host)
{
/*
* We should previously have gated the clock, so the clock shall
* be 0 here! The clock may however be 0 during initialization,
* when some request operations are performed before setting
* the frequency. When ungate is requested in that situation
* we just ignore the call.
*/
if (host->clk_old) {
BUG_ON(host->ios.clock);
/* This call will also set host->clk_gated to false */
mmc_set_clock(host, host->clk_old);
}
}
void mmc_set_ungated(struct mmc_host *host)
{
unsigned long flags;
/*
* We've been given a new frequency while the clock is gated,
* so make sure we regard this as ungating it.
*/
spin_lock_irqsave(&host->clk_lock, flags);
host->clk_gated = false;
spin_unlock_irqrestore(&host->clk_lock, flags);
}
#else
void mmc_set_ungated(struct mmc_host *host)
{
}
#endif
/* /*
* Change the bus mode (open drain/push-pull) of a host. * Change the bus mode (open drain/push-pull) of a host.
*/ */
...@@ -1424,35 +1486,57 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) ...@@ -1424,35 +1486,57 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
} }
EXPORT_SYMBOL(mmc_set_blocklen); EXPORT_SYMBOL(mmc_set_blocklen);
static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
{
host->f_init = freq;
#ifdef CONFIG_MMC_DEBUG
pr_info("%s: %s: trying to init card at %u Hz\n",
mmc_hostname(host), __func__, host->f_init);
#endif
mmc_power_up(host);
sdio_reset(host);
mmc_go_idle(host);
mmc_send_if_cond(host, host->ocr_avail);
/* Order's important: probe SDIO, then SD, then MMC */
if (!mmc_attach_sdio(host))
return 0;
if (!mmc_attach_sd(host))
return 0;
if (!mmc_attach_mmc(host))
return 0;
mmc_power_off(host);
return -EIO;
}
void mmc_rescan(struct work_struct *work) void mmc_rescan(struct work_struct *work)
{ {
static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
struct mmc_host *host = struct mmc_host *host =
container_of(work, struct mmc_host, detect.work); container_of(work, struct mmc_host, detect.work);
u32 ocr;
int err;
unsigned long flags;
int i; int i;
const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
spin_lock_irqsave(&host->lock, flags);
if (host->rescan_disable) { if (host->rescan_disable)
spin_unlock_irqrestore(&host->lock, flags);
return; return;
}
spin_unlock_irqrestore(&host->lock, flags);
mmc_bus_get(host); mmc_bus_get(host);
/* if there is a card registered, check whether it is still present */ /*
if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) * if there is a _removable_ card registered, check whether it is
* still present
*/
if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
&& mmc_card_is_removable(host))
host->bus_ops->detect(host); host->bus_ops->detect(host);
/*
* Let mmc_bus_put() free the bus/bus_ops if we've found that
* the card is no longer present.
*/
mmc_bus_put(host); mmc_bus_put(host);
mmc_bus_get(host); mmc_bus_get(host);
/* if there still is a card present, stop here */ /* if there still is a card present, stop here */
...@@ -1461,8 +1545,6 @@ void mmc_rescan(struct work_struct *work) ...@@ -1461,8 +1545,6 @@ void mmc_rescan(struct work_struct *work)
goto out; goto out;
} }
/* detect a newly inserted card */
/* /*
* Only we can add a new handler, so it's safe to * Only we can add a new handler, so it's safe to
* release the lock here. * release the lock here.
...@@ -1472,72 +1554,16 @@ void mmc_rescan(struct work_struct *work) ...@@ -1472,72 +1554,16 @@ void mmc_rescan(struct work_struct *work)
if (host->ops->get_cd && host->ops->get_cd(host) == 0) if (host->ops->get_cd && host->ops->get_cd(host) == 0)
goto out; goto out;
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
mmc_claim_host(host); mmc_claim_host(host);
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
if (freqs[i] >= host->f_min) if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
host->f_init = freqs[i]; break;
else if (!i || freqs[i-1] > host->f_min) if (freqs[i] < host->f_min)
host->f_init = host->f_min; break;
else {
mmc_release_host(host);
goto out;
}
#ifdef CONFIG_MMC_DEBUG
pr_info("%s: %s: trying to init card at %u Hz\n",
mmc_hostname(host), __func__, host->f_init);
#endif
mmc_power_up(host);
sdio_reset(host);
mmc_go_idle(host);
mmc_send_if_cond(host, host->ocr_avail);
/*
* First we search for SDIO...
*/
err = mmc_send_io_op_cond(host, 0, &ocr);
if (!err) {
if (mmc_attach_sdio(host, ocr)) {
mmc_claim_host(host);
/*
* Try SDMEM (but not MMC) even if SDIO
* is broken.
*/
if (mmc_send_app_op_cond(host, 0, &ocr))
goto out_fail;
if (mmc_attach_sd(host, ocr))
mmc_power_off(host);
}
goto out;
}
/*
* ...then normal SD...
*/
err = mmc_send_app_op_cond(host, 0, &ocr);
if (!err) {
if (mmc_attach_sd(host, ocr))
mmc_power_off(host);
goto out;
}
/*
* ...and finally MMC.
*/
err = mmc_send_op_cond(host, 0, &ocr);
if (!err) {
if (mmc_attach_mmc(host, ocr))
mmc_power_off(host);
goto out;
} }
out_fail:
mmc_release_host(host); mmc_release_host(host);
mmc_power_off(host);
} out:
out:
if (host->caps & MMC_CAP_NEEDS_POLL) if (host->caps & MMC_CAP_NEEDS_POLL)
mmc_schedule_delayed_work(&host->detect, HZ); mmc_schedule_delayed_work(&host->detect, HZ);
} }
...@@ -1721,6 +1747,18 @@ int mmc_resume_host(struct mmc_host *host) ...@@ -1721,6 +1747,18 @@ int mmc_resume_host(struct mmc_host *host)
if (!(host->pm_flags & MMC_PM_KEEP_POWER)) { if (!(host->pm_flags & MMC_PM_KEEP_POWER)) {
mmc_power_up(host); mmc_power_up(host);
mmc_select_voltage(host, host->ocr); mmc_select_voltage(host, host->ocr);
/*
* Tell runtime PM core we just powered up the card,
* since it still believes the card is powered off.
* Note that currently runtime PM is only enabled
* for SDIO cards that are MMC_CAP_POWER_OFF_CARD
*/
if (mmc_card_sdio(host->card) &&
(host->caps & MMC_CAP_POWER_OFF_CARD)) {
pm_runtime_disable(&host->card->dev);
pm_runtime_set_active(&host->card->dev);
pm_runtime_enable(&host->card->dev);
}
} }
BUG_ON(!host->bus_ops->resume); BUG_ON(!host->bus_ops->resume);
err = host->bus_ops->resume(host); err = host->bus_ops->resume(host);
......
...@@ -33,6 +33,9 @@ void mmc_init_erase(struct mmc_card *card); ...@@ -33,6 +33,9 @@ void mmc_init_erase(struct mmc_card *card);
void mmc_set_chip_select(struct mmc_host *host, int mode); void mmc_set_chip_select(struct mmc_host *host, int mode);
void mmc_set_clock(struct mmc_host *host, unsigned int hz); void mmc_set_clock(struct mmc_host *host, unsigned int hz);
void mmc_gate_clock(struct mmc_host *host);
void mmc_ungate_clock(struct mmc_host *host);
void mmc_set_ungated(struct mmc_host *host);
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode); void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
void mmc_set_bus_width(struct mmc_host *host, unsigned int width); void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width, void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width,
...@@ -54,9 +57,9 @@ void mmc_rescan(struct work_struct *work); ...@@ -54,9 +57,9 @@ void mmc_rescan(struct work_struct *work);
void mmc_start_host(struct mmc_host *host); void mmc_start_host(struct mmc_host *host);
void mmc_stop_host(struct mmc_host *host); void mmc_stop_host(struct mmc_host *host);
int mmc_attach_mmc(struct mmc_host *host, u32 ocr); int mmc_attach_mmc(struct mmc_host *host);
int mmc_attach_sd(struct mmc_host *host, u32 ocr); int mmc_attach_sd(struct mmc_host *host);
int mmc_attach_sdio(struct mmc_host *host, u32 ocr); int mmc_attach_sdio(struct mmc_host *host);
/* Module parameters */ /* Module parameters */
extern int use_spi_crc; extern int use_spi_crc;
......
...@@ -183,6 +183,11 @@ void mmc_add_host_debugfs(struct mmc_host *host) ...@@ -183,6 +183,11 @@ void mmc_add_host_debugfs(struct mmc_host *host)
&mmc_clock_fops)) &mmc_clock_fops))
goto err_node; goto err_node;
#ifdef CONFIG_MMC_CLKGATE
if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
root, &host->clk_delay))
goto err_node;
#endif
return; return;
err_node: err_node:
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
* *
* Copyright (C) 2003 Russell King, All Rights Reserved. * Copyright (C) 2003 Russell King, All Rights Reserved.
* Copyright (C) 2007-2008 Pierre Ossman * Copyright (C) 2007-2008 Pierre Ossman
* Copyright (C) 2010 Linus Walleij
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as * it under the terms of the GNU General Public License version 2 as
...@@ -20,6 +21,7 @@ ...@@ -20,6 +21,7 @@
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/mmc/host.h> #include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include "core.h" #include "core.h"
#include "host.h" #include "host.h"
...@@ -50,6 +52,205 @@ void mmc_unregister_host_class(void) ...@@ -50,6 +52,205 @@ void mmc_unregister_host_class(void)
static DEFINE_IDR(mmc_host_idr); static DEFINE_IDR(mmc_host_idr);
static DEFINE_SPINLOCK(mmc_host_lock); static DEFINE_SPINLOCK(mmc_host_lock);
#ifdef CONFIG_MMC_CLKGATE
/*
* Enabling clock gating will make the core call out to the host
* once up and once down when it performs a request or card operation
* intermingled in any fashion. The driver will see this through
* set_ios() operations with ios.clock field set to 0 to gate (disable)
* the block clock, and to the old frequency to enable it again.
*/
static void mmc_host_clk_gate_delayed(struct mmc_host *host)
{
unsigned long tick_ns;
unsigned long freq = host->ios.clock;
unsigned long flags;
if (!freq) {
pr_debug("%s: frequency set to 0 in disable function, "
"this means the clock is already disabled.\n",
mmc_hostname(host));
return;
}
/*
* New requests may have appeared while we were scheduling,
* then there is no reason to delay the check before
* clk_disable().
*/
spin_lock_irqsave(&host->clk_lock, flags);
/*
* Delay n bus cycles (at least 8 from MMC spec) before attempting
* to disable the MCI block clock. The reference count may have
* gone up again after this delay due to rescheduling!
*/
if (!host->clk_requests) {
spin_unlock_irqrestore(&host->clk_lock, flags);
tick_ns = DIV_ROUND_UP(1000000000, freq);
ndelay(host->clk_delay * tick_ns);
} else {
/* New users appeared while waiting for this work */
spin_unlock_irqrestore(&host->clk_lock, flags);
return;
}
mutex_lock(&host->clk_gate_mutex);
spin_lock_irqsave(&host->clk_lock, flags);
if (!host->clk_requests) {
spin_unlock_irqrestore(&host->clk_lock, flags);
/* This will set host->ios.clock to 0 */
mmc_gate_clock(host);
spin_lock_irqsave(&host->clk_lock, flags);
pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
}
spin_unlock_irqrestore(&host->clk_lock, flags);
mutex_unlock(&host->clk_gate_mutex);
}
/*
* Internal work. Work to disable the clock at some later point.
*/
static void mmc_host_clk_gate_work(struct work_struct *work)
{
struct mmc_host *host = container_of(work, struct mmc_host,
clk_gate_work);
mmc_host_clk_gate_delayed(host);
}
/**
* mmc_host_clk_ungate - ungate hardware MCI clocks
* @host: host to ungate.
*
* Makes sure the host ios.clock is restored to a non-zero value
* past this call. Increase clock reference count and ungate clock
* if we're the first user.
*/
void mmc_host_clk_ungate(struct mmc_host *host)
{
unsigned long flags;
mutex_lock(&host->clk_gate_mutex);
spin_lock_irqsave(&host->clk_lock, flags);
if (host->clk_gated) {
spin_unlock_irqrestore(&host->clk_lock, flags);
mmc_ungate_clock(host);
spin_lock_irqsave(&host->clk_lock, flags);
pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
}
host->clk_requests++;
spin_unlock_irqrestore(&host->clk_lock, flags);
mutex_unlock(&host->clk_gate_mutex);
}
/**
* mmc_host_may_gate_card - check if this card may be gated
* @card: card to check.
*/
static bool mmc_host_may_gate_card(struct mmc_card *card)
{
/* If there is no card we may gate it */
if (!card)
return true;
/*
* Don't gate SDIO cards! These need to be clocked at all times
* since they may be independent systems generating interrupts
* and other events. The clock requests counter from the core will
* go down to zero since the core does not need it, but we will not
* gate the clock, because there is somebody out there that may still
* be using it.
*/
if (mmc_card_sdio(card))
return false;
return true;
}
/**
* mmc_host_clk_gate - gate off hardware MCI clocks
* @host: host to gate.
*
* Calls the host driver with ios.clock set to zero as often as possible
* in order to gate off hardware MCI clocks. Decrease clock reference
* count and schedule disabling of clock.
*/
void mmc_host_clk_gate(struct mmc_host *host)
{
unsigned long flags;
spin_lock_irqsave(&host->clk_lock, flags);
host->clk_requests--;
if (mmc_host_may_gate_card(host->card) &&
!host->clk_requests)
schedule_work(&host->clk_gate_work);
spin_unlock_irqrestore(&host->clk_lock, flags);
}
/**
* mmc_host_clk_rate - get current clock frequency setting
* @host: host to get the clock frequency for.
*
* Returns current clock frequency regardless of gating.
*/
unsigned int mmc_host_clk_rate(struct mmc_host *host)
{
unsigned long freq;
unsigned long flags;
spin_lock_irqsave(&host->clk_lock, flags);
if (host->clk_gated)
freq = host->clk_old;
else
freq = host->ios.clock;
spin_unlock_irqrestore(&host->clk_lock, flags);
return freq;
}
/**
* mmc_host_clk_init - set up clock gating code
* @host: host with potential clock to control
*/
static inline void mmc_host_clk_init(struct mmc_host *host)
{
host->clk_requests = 0;
/* Hold MCI clock for 8 cycles by default */
host->clk_delay = 8;
host->clk_gated = false;
INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
spin_lock_init(&host->clk_lock);
mutex_init(&host->clk_gate_mutex);
}
/**
* mmc_host_clk_exit - shut down clock gating code
* @host: host with potential clock to control
*/
static inline void mmc_host_clk_exit(struct mmc_host *host)
{
/*
* Wait for any outstanding gate and then make sure we're
* ungated before exiting.
*/
if (cancel_work_sync(&host->clk_gate_work))
mmc_host_clk_gate_delayed(host);
if (host->clk_gated)
mmc_host_clk_ungate(host);
/* There should be only one user now */
WARN_ON(host->clk_requests > 1);
}
#else
static inline void mmc_host_clk_init(struct mmc_host *host)
{
}
static inline void mmc_host_clk_exit(struct mmc_host *host)
{
}
#endif
/** /**
* mmc_alloc_host - initialise the per-host structure. * mmc_alloc_host - initialise the per-host structure.
* @extra: sizeof private data structure * @extra: sizeof private data structure
...@@ -82,6 +283,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) ...@@ -82,6 +283,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
host->class_dev.class = &mmc_host_class; host->class_dev.class = &mmc_host_class;
device_initialize(&host->class_dev); device_initialize(&host->class_dev);
mmc_host_clk_init(host);
spin_lock_init(&host->lock); spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq); init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan); INIT_DELAYED_WORK(&host->detect, mmc_rescan);
...@@ -163,6 +366,8 @@ void mmc_remove_host(struct mmc_host *host) ...@@ -163,6 +366,8 @@ void mmc_remove_host(struct mmc_host *host)
device_del(&host->class_dev); device_del(&host->class_dev);
led_trigger_unregister_simple(host->led); led_trigger_unregister_simple(host->led);
mmc_host_clk_exit(host);
} }
EXPORT_SYMBOL(mmc_remove_host); EXPORT_SYMBOL(mmc_remove_host);
...@@ -183,4 +388,3 @@ void mmc_free_host(struct mmc_host *host) ...@@ -183,4 +388,3 @@ void mmc_free_host(struct mmc_host *host)
} }
EXPORT_SYMBOL(mmc_free_host); EXPORT_SYMBOL(mmc_free_host);
...@@ -10,10 +10,31 @@ ...@@ -10,10 +10,31 @@
*/ */
#ifndef _MMC_CORE_HOST_H #ifndef _MMC_CORE_HOST_H
#define _MMC_CORE_HOST_H #define _MMC_CORE_HOST_H
#include <linux/mmc/host.h>
int mmc_register_host_class(void); int mmc_register_host_class(void);
void mmc_unregister_host_class(void); void mmc_unregister_host_class(void);
#ifdef CONFIG_MMC_CLKGATE
void mmc_host_clk_ungate(struct mmc_host *host);
void mmc_host_clk_gate(struct mmc_host *host);
unsigned int mmc_host_clk_rate(struct mmc_host *host);
#else
static inline void mmc_host_clk_ungate(struct mmc_host *host)
{
}
static inline void mmc_host_clk_gate(struct mmc_host *host)
{
}
static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
{
return host->ios.clock;
}
#endif
void mmc_host_deeper_disable(struct work_struct *work); void mmc_host_deeper_disable(struct work_struct *work);
#endif #endif
......
...@@ -534,39 +534,57 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, ...@@ -534,39 +534,57 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
*/ */
if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) && if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
unsigned ext_csd_bit, bus_width; static unsigned ext_csd_bits[][2] = {
{ EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
if (host->caps & MMC_CAP_8_BIT_DATA) { { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 },
if (ddr) { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 },
ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_8; };
else static unsigned bus_widths[] = {
ext_csd_bit = EXT_CSD_BUS_WIDTH_8; MMC_BUS_WIDTH_8,
bus_width = MMC_BUS_WIDTH_8; MMC_BUS_WIDTH_4,
} else { MMC_BUS_WIDTH_1
if (ddr) };
ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_4; unsigned idx, bus_width = 0;
if (host->caps & MMC_CAP_8_BIT_DATA)
idx = 0;
else else
ext_csd_bit = EXT_CSD_BUS_WIDTH_4; idx = 1;
bus_width = MMC_BUS_WIDTH_4; for (; idx < ARRAY_SIZE(bus_widths); idx++) {
bus_width = bus_widths[idx];
if (bus_width == MMC_BUS_WIDTH_1)
ddr = 0; /* no DDR for 1-bit width */
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
ext_csd_bits[idx][0]);
if (!err) {
mmc_set_bus_width_ddr(card->host,
bus_width, MMC_SDR_MODE);
/*
* If controller can't handle bus width test,
* use the highest bus width to maintain
* compatibility with previous MMC behavior.
*/
if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
break;
err = mmc_bus_test(card, bus_width);
if (!err)
break;
}
} }
if (!err && ddr) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH, ext_csd_bit); EXT_CSD_BUS_WIDTH,
ext_csd_bits[idx][1]);
if (err && err != -EBADMSG) }
goto free_card;
if (err) { if (err) {
printk(KERN_WARNING "%s: switch to bus width %d ddr %d " printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
"failed\n", mmc_hostname(card->host), "failed\n", mmc_hostname(card->host),
1 << bus_width, ddr); 1 << bus_width, ddr);
err = 0; goto free_card;
} else { } else if (ddr) {
if (ddr)
mmc_card_set_ddr_mode(card); mmc_card_set_ddr_mode(card);
else
ddr = MMC_SDR_MODE;
mmc_set_bus_width_ddr(card->host, bus_width, ddr); mmc_set_bus_width_ddr(card->host, bus_width, ddr);
} }
} }
...@@ -737,14 +755,21 @@ static void mmc_attach_bus_ops(struct mmc_host *host) ...@@ -737,14 +755,21 @@ static void mmc_attach_bus_ops(struct mmc_host *host)
/* /*
* Starting point for MMC card init. * Starting point for MMC card init.
*/ */
int mmc_attach_mmc(struct mmc_host *host, u32 ocr) int mmc_attach_mmc(struct mmc_host *host)
{ {
int err; int err;
u32 ocr;
BUG_ON(!host); BUG_ON(!host);
WARN_ON(!host->claimed); WARN_ON(!host->claimed);
err = mmc_send_op_cond(host, 0, &ocr);
if (err)
return err;
mmc_attach_bus_ops(host); mmc_attach_bus_ops(host);
if (host->ocr_avail_mmc)
host->ocr_avail = host->ocr_avail_mmc;
/* /*
* We need to get OCR a different way for SPI. * We need to get OCR a different way for SPI.
...@@ -784,20 +809,20 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr) ...@@ -784,20 +809,20 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr)
goto err; goto err;
mmc_release_host(host); mmc_release_host(host);
err = mmc_add_card(host->card); err = mmc_add_card(host->card);
mmc_claim_host(host);
if (err) if (err)
goto remove_card; goto remove_card;
return 0; return 0;
remove_card: remove_card:
mmc_release_host(host);
mmc_remove_card(host->card); mmc_remove_card(host->card);
host->card = NULL;
mmc_claim_host(host); mmc_claim_host(host);
host->card = NULL;
err: err:
mmc_detach_bus(host); mmc_detach_bus(host);
mmc_release_host(host);
printk(KERN_ERR "%s: error %d whilst initialising MMC card\n", printk(KERN_ERR "%s: error %d whilst initialising MMC card\n",
mmc_hostname(host), err); mmc_hostname(host), err);
......
...@@ -462,3 +462,104 @@ int mmc_send_status(struct mmc_card *card, u32 *status) ...@@ -462,3 +462,104 @@ int mmc_send_status(struct mmc_card *card, u32 *status)
return 0; return 0;
} }
static int
mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
u8 len)
{
struct mmc_request mrq;
struct mmc_command cmd;
struct mmc_data data;
struct scatterlist sg;
u8 *data_buf;
u8 *test_buf;
int i, err;
static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 };
static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 };
/* dma onto stack is unsafe/nonportable, but callers to this
* routine normally provide temporary on-stack buffers ...
*/
data_buf = kmalloc(len, GFP_KERNEL);
if (!data_buf)
return -ENOMEM;
if (len == 8)
test_buf = testdata_8bit;
else if (len == 4)
test_buf = testdata_4bit;
else {
printk(KERN_ERR "%s: Invalid bus_width %d\n",
mmc_hostname(host), len);
kfree(data_buf);
return -EINVAL;
}
if (opcode == MMC_BUS_TEST_W)
memcpy(data_buf, test_buf, len);
memset(&mrq, 0, sizeof(struct mmc_request));
memset(&cmd, 0, sizeof(struct mmc_command));
memset(&data, 0, sizeof(struct mmc_data));
mrq.cmd = &cmd;
mrq.data = &data;
cmd.opcode = opcode;
cmd.arg = 0;
/* NOTE HACK: the MMC_RSP_SPI_R1 is always correct here, but we
* rely on callers to never use this with "native" calls for reading
* CSD or CID. Native versions of those commands use the R2 type,
* not R1 plus a data block.
*/
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
data.blksz = len;
data.blocks = 1;
if (opcode == MMC_BUS_TEST_R)
data.flags = MMC_DATA_READ;
else
data.flags = MMC_DATA_WRITE;
data.sg = &sg;
data.sg_len = 1;
sg_init_one(&sg, data_buf, len);
mmc_wait_for_req(host, &mrq);
err = 0;
if (opcode == MMC_BUS_TEST_R) {
for (i = 0; i < len / 4; i++)
if ((test_buf[i] ^ data_buf[i]) != 0xff) {
err = -EIO;
break;
}
}
kfree(data_buf);
if (cmd.error)
return cmd.error;
if (data.error)
return data.error;
return err;
}
int mmc_bus_test(struct mmc_card *card, u8 bus_width)
{
int err, width;
if (bus_width == MMC_BUS_WIDTH_8)
width = 8;
else if (bus_width == MMC_BUS_WIDTH_4)
width = 4;
else if (bus_width == MMC_BUS_WIDTH_1)
return 0; /* no need for test */
else
return -EINVAL;
/*
* Ignore errors from BUS_TEST_W. BUS_TEST_R will fail if there
* is a problem. This improves chances that the test will work.
*/
mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
return err;
}
...@@ -26,6 +26,7 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid); ...@@ -26,6 +26,7 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid);
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp); int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
int mmc_spi_set_crc(struct mmc_host *host, int use_crc); int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
int mmc_card_sleepawake(struct mmc_host *host, int sleep); int mmc_card_sleepawake(struct mmc_host *host, int sleep);
int mmc_bus_test(struct mmc_card *card, u8 bus_width);
#endif #endif
...@@ -764,14 +764,21 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host) ...@@ -764,14 +764,21 @@ static void mmc_sd_attach_bus_ops(struct mmc_host *host)
/* /*
* Starting point for SD card init. * Starting point for SD card init.
*/ */
int mmc_attach_sd(struct mmc_host *host, u32 ocr) int mmc_attach_sd(struct mmc_host *host)
{ {
int err; int err;
u32 ocr;
BUG_ON(!host); BUG_ON(!host);
WARN_ON(!host->claimed); WARN_ON(!host->claimed);
err = mmc_send_app_op_cond(host, 0, &ocr);
if (err)
return err;
mmc_sd_attach_bus_ops(host); mmc_sd_attach_bus_ops(host);
if (host->ocr_avail_sd)
host->ocr_avail = host->ocr_avail_sd;
/* /*
* We need to get OCR a different way for SPI. * We need to get OCR a different way for SPI.
...@@ -795,7 +802,8 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr) ...@@ -795,7 +802,8 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr)
ocr &= ~0x7F; ocr &= ~0x7F;
} }
if (ocr & MMC_VDD_165_195) { if ((ocr & MMC_VDD_165_195) &&
!(host->ocr_avail_sd & MMC_VDD_165_195)) {
printk(KERN_WARNING "%s: SD card claims to support the " printk(KERN_WARNING "%s: SD card claims to support the "
"incompletely defined 'low voltage range'. This " "incompletely defined 'low voltage range'. This "
"will be ignored.\n", mmc_hostname(host)); "will be ignored.\n", mmc_hostname(host));
...@@ -820,20 +828,20 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr) ...@@ -820,20 +828,20 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr)
goto err; goto err;
mmc_release_host(host); mmc_release_host(host);
err = mmc_add_card(host->card); err = mmc_add_card(host->card);
mmc_claim_host(host);
if (err) if (err)
goto remove_card; goto remove_card;
return 0; return 0;
remove_card: remove_card:
mmc_release_host(host);
mmc_remove_card(host->card); mmc_remove_card(host->card);
host->card = NULL; host->card = NULL;
mmc_claim_host(host); mmc_claim_host(host);
err: err:
mmc_detach_bus(host); mmc_detach_bus(host);
mmc_release_host(host);
printk(KERN_ERR "%s: error %d whilst initialising SD card\n", printk(KERN_ERR "%s: error %d whilst initialising SD card\n",
mmc_hostname(host), err); mmc_hostname(host), err);
......
...@@ -627,15 +627,27 @@ static int mmc_sdio_suspend(struct mmc_host *host) ...@@ -627,15 +627,27 @@ static int mmc_sdio_suspend(struct mmc_host *host)
static int mmc_sdio_resume(struct mmc_host *host) static int mmc_sdio_resume(struct mmc_host *host)
{ {
int i, err; int i, err = 0;
BUG_ON(!host); BUG_ON(!host);
BUG_ON(!host->card); BUG_ON(!host->card);
/* Basic card reinitialization. */ /* Basic card reinitialization. */
mmc_claim_host(host); mmc_claim_host(host);
/* No need to reinitialize powered-resumed nonremovable cards */
if (mmc_card_is_removable(host) || !mmc_card_is_powered_resumed(host))
err = mmc_sdio_init_card(host, host->ocr, host->card, err = mmc_sdio_init_card(host, host->ocr, host->card,
(host->pm_flags & MMC_PM_KEEP_POWER)); (host->pm_flags & MMC_PM_KEEP_POWER));
else if (mmc_card_is_powered_resumed(host)) {
/* We may have switched to 1-bit mode during suspend */
err = sdio_enable_4bit_bus(host->card);
if (err > 0) {
mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
err = 0;
}
}
if (!err && host->sdio_irqs) if (!err && host->sdio_irqs)
mmc_signal_sdio_irq(host); mmc_signal_sdio_irq(host);
mmc_release_host(host); mmc_release_host(host);
...@@ -690,16 +702,22 @@ static const struct mmc_bus_ops mmc_sdio_ops = { ...@@ -690,16 +702,22 @@ static const struct mmc_bus_ops mmc_sdio_ops = {
/* /*
* Starting point for SDIO card init. * Starting point for SDIO card init.
*/ */
int mmc_attach_sdio(struct mmc_host *host, u32 ocr) int mmc_attach_sdio(struct mmc_host *host)
{ {
int err; int err, i, funcs;
int i, funcs; u32 ocr;
struct mmc_card *card; struct mmc_card *card;
BUG_ON(!host); BUG_ON(!host);
WARN_ON(!host->claimed); WARN_ON(!host->claimed);
err = mmc_send_io_op_cond(host, 0, &ocr);
if (err)
return err;
mmc_attach_bus(host, &mmc_sdio_ops); mmc_attach_bus(host, &mmc_sdio_ops);
if (host->ocr_avail_sdio)
host->ocr_avail = host->ocr_avail_sdio;
/* /*
* Sanity check the voltages that the card claims to * Sanity check the voltages that the card claims to
...@@ -769,12 +787,12 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr) ...@@ -769,12 +787,12 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
pm_runtime_enable(&card->sdio_func[i]->dev); pm_runtime_enable(&card->sdio_func[i]->dev);
} }
mmc_release_host(host);
/* /*
* First add the card to the driver model... * First add the card to the driver model...
*/ */
mmc_release_host(host);
err = mmc_add_card(host->card); err = mmc_add_card(host->card);
mmc_claim_host(host);
if (err) if (err)
goto remove_added; goto remove_added;
...@@ -792,15 +810,17 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr) ...@@ -792,15 +810,17 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
remove_added: remove_added:
/* Remove without lock if the device has been added. */ /* Remove without lock if the device has been added. */
mmc_release_host(host);
mmc_sdio_remove(host); mmc_sdio_remove(host);
mmc_claim_host(host); mmc_claim_host(host);
remove: remove:
/* And with lock if it hasn't been added. */ /* And with lock if it hasn't been added. */
mmc_release_host(host);
if (host->card) if (host->card)
mmc_sdio_remove(host); mmc_sdio_remove(host);
mmc_claim_host(host);
err: err:
mmc_detach_bus(host); mmc_detach_bus(host);
mmc_release_host(host);
printk(KERN_ERR "%s: error %d whilst initialising SDIO card\n", printk(KERN_ERR "%s: error %d whilst initialising SDIO card\n",
mmc_hostname(host), err); mmc_hostname(host), err);
......
...@@ -197,44 +197,12 @@ static int sdio_bus_remove(struct device *dev) ...@@ -197,44 +197,12 @@ static int sdio_bus_remove(struct device *dev)
#ifdef CONFIG_PM_RUNTIME #ifdef CONFIG_PM_RUNTIME
static int sdio_bus_pm_prepare(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
/*
* Resume an SDIO device which was suspended at run time at this
* point, in order to allow standard SDIO suspend/resume paths
* to keep working as usual.
*
* Ultimately, the SDIO driver itself will decide (in its
* suspend handler, or lack thereof) whether the card should be
* removed or kept, and if kept, at what power state.
*
* At this point, PM core have increased our use count, so it's
* safe to directly resume the device. After system is resumed
* again, PM core will drop back its runtime PM use count, and if
* needed device will be suspended again.
*
* The end result is guaranteed to be a power state that is
* coherent with the device's runtime PM use count.
*
* The return value of pm_runtime_resume is deliberately unchecked
* since there is little point in failing system suspend if a
* device can't be resumed.
*/
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_resume(dev);
return 0;
}
static const struct dev_pm_ops sdio_bus_pm_ops = { static const struct dev_pm_ops sdio_bus_pm_ops = {
SET_RUNTIME_PM_OPS( SET_RUNTIME_PM_OPS(
pm_generic_runtime_suspend, pm_generic_runtime_suspend,
pm_generic_runtime_resume, pm_generic_runtime_resume,
pm_generic_runtime_idle pm_generic_runtime_idle
) )
.prepare = sdio_bus_pm_prepare,
}; };
#define SDIO_PM_OPS_PTR (&sdio_bus_pm_ops) #define SDIO_PM_OPS_PTR (&sdio_bus_pm_ops)
......
...@@ -142,6 +142,27 @@ config MMC_SDHCI_ESDHC_IMX ...@@ -142,6 +142,27 @@ config MMC_SDHCI_ESDHC_IMX
If unsure, say N. If unsure, say N.
config MMC_SDHCI_DOVE
bool "SDHCI support on Marvell's Dove SoC"
depends on ARCH_DOVE
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
help
This selects the Secure Digital Host Controller Interface in
Marvell's Dove SoC.
If unsure, say N.
config MMC_SDHCI_TEGRA
tristate "SDHCI platform support for the Tegra SD/MMC Controller"
depends on MMC_SDHCI_PLTFM && ARCH_TEGRA
select MMC_SDHCI_IO_ACCESSORS
help
This selects the Tegra SD/MMC controller. If you have a Tegra
platform with SD or MMC devices, say Y or M here.
If unsure, say N.
config MMC_SDHCI_S3C config MMC_SDHCI_S3C
tristate "SDHCI support on Samsung S3C SoC" tristate "SDHCI support on Samsung S3C SoC"
depends on MMC_SDHCI && PLAT_SAMSUNG depends on MMC_SDHCI && PLAT_SAMSUNG
...@@ -460,6 +481,22 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND ...@@ -460,6 +481,22 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND
help help
If you say yes here SD-Cards may work on the EZkit. If you say yes here SD-Cards may work on the EZkit.
config MMC_DW
tristate "Synopsys DesignWare Memory Card Interface"
depends on ARM
help
This selects support for the Synopsys DesignWare Mobile Storage IP
block, this provides host support for SD and MMC interfaces, in both
PIO and external DMA modes.
config MMC_DW_IDMAC
bool "Internal DMAC interface"
depends on MMC_DW
help
This selects support for the internal DMAC block within the Synopsys
Designware Mobile Storage IP block. This disables the external DMA
interface.
config MMC_SH_MMCIF config MMC_SH_MMCIF
tristate "SuperH Internal MMCIF support" tristate "SuperH Internal MMCIF support"
depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE) depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
......
...@@ -31,6 +31,7 @@ obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o ...@@ -31,6 +31,7 @@ obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
obj-$(CONFIG_MMC_DW) += dw_mmc.o
obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
obj-$(CONFIG_MMC_USHC) += ushc.o obj-$(CONFIG_MMC_USHC) += ushc.o
...@@ -39,6 +40,8 @@ obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o ...@@ -39,6 +40,8 @@ obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o
sdhci-platform-y := sdhci-pltfm.o sdhci-platform-y := sdhci-pltfm.o
sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
sdhci-platform-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o sdhci-platform-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
sdhci-platform-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
sdhci-platform-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
sdhci-of-y := sdhci-of-core.o sdhci-of-y := sdhci-of-core.o
......
...@@ -66,8 +66,8 @@ ...@@ -66,8 +66,8 @@
#define DAVINCI_MMCBLNC 0x60 #define DAVINCI_MMCBLNC 0x60
#define DAVINCI_SDIOCTL 0x64 #define DAVINCI_SDIOCTL 0x64
#define DAVINCI_SDIOST0 0x68 #define DAVINCI_SDIOST0 0x68
#define DAVINCI_SDIOEN 0x6C #define DAVINCI_SDIOIEN 0x6C
#define DAVINCI_SDIOST 0x70 #define DAVINCI_SDIOIST 0x70
#define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */ #define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */
/* DAVINCI_MMCCTL definitions */ /* DAVINCI_MMCCTL definitions */
...@@ -131,6 +131,14 @@ ...@@ -131,6 +131,14 @@
#define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */ #define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */
#define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */ #define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */
/* DAVINCI_SDIOST0 definitions */
#define SDIOST0_DAT1_HI BIT(0)
/* DAVINCI_SDIOIEN definitions */
#define SDIOIEN_IOINTEN BIT(0)
/* DAVINCI_SDIOIST definitions */
#define SDIOIST_IOINT BIT(0)
/* MMCSD Init clock in Hz in opendrain mode */ /* MMCSD Init clock in Hz in opendrain mode */
#define MMCSD_INIT_CLOCK 200000 #define MMCSD_INIT_CLOCK 200000
...@@ -164,7 +172,7 @@ struct mmc_davinci_host { ...@@ -164,7 +172,7 @@ struct mmc_davinci_host {
unsigned int mmc_input_clk; unsigned int mmc_input_clk;
void __iomem *base; void __iomem *base;
struct resource *mem_res; struct resource *mem_res;
int irq; int mmc_irq, sdio_irq;
unsigned char bus_mode; unsigned char bus_mode;
#define DAVINCI_MMC_DATADIR_NONE 0 #define DAVINCI_MMC_DATADIR_NONE 0
...@@ -184,6 +192,7 @@ struct mmc_davinci_host { ...@@ -184,6 +192,7 @@ struct mmc_davinci_host {
u32 rxdma, txdma; u32 rxdma, txdma;
bool use_dma; bool use_dma;
bool do_dma; bool do_dma;
bool sdio_int;
/* Scatterlist DMA uses one or more parameter RAM entries: /* Scatterlist DMA uses one or more parameter RAM entries:
* the main one (associated with rxdma or txdma) plus zero or * the main one (associated with rxdma or txdma) plus zero or
...@@ -480,7 +489,7 @@ static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host, ...@@ -480,7 +489,7 @@ static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
struct scatterlist *sg; struct scatterlist *sg;
unsigned sg_len; unsigned sg_len;
unsigned bytes_left = host->bytes_left; unsigned bytes_left = host->bytes_left;
const unsigned shift = ffs(rw_threshold) - 1;; const unsigned shift = ffs(rw_threshold) - 1;
if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
template = &host->tx_template; template = &host->tx_template;
...@@ -866,6 +875,19 @@ mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data) ...@@ -866,6 +875,19 @@ mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)
{ {
host->data = NULL; host->data = NULL;
if (host->mmc->caps & MMC_CAP_SDIO_IRQ) {
/*
* SDIO Interrupt Detection work-around as suggested by
* Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata
* 2.1.6): Signal SDIO interrupt only if it is enabled by core
*/
if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) &
SDIOST0_DAT1_HI)) {
writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
mmc_signal_sdio_irq(host->mmc);
}
}
if (host->do_dma) { if (host->do_dma) {
davinci_abort_dma(host); davinci_abort_dma(host);
...@@ -932,6 +954,21 @@ davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) ...@@ -932,6 +954,21 @@ davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)
mmc_davinci_reset_ctrl(host, 0); mmc_davinci_reset_ctrl(host, 0);
} }
static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id)
{
struct mmc_davinci_host *host = dev_id;
unsigned int status;
status = readl(host->base + DAVINCI_SDIOIST);
if (status & SDIOIST_IOINT) {
dev_dbg(mmc_dev(host->mmc),
"SDIO interrupt status %x\n", status);
writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
mmc_signal_sdio_irq(host->mmc);
}
return IRQ_HANDLED;
}
static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
{ {
struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id;
...@@ -1076,11 +1113,32 @@ static int mmc_davinci_get_ro(struct mmc_host *mmc) ...@@ -1076,11 +1113,32 @@ static int mmc_davinci_get_ro(struct mmc_host *mmc)
return config->get_ro(pdev->id); return config->get_ro(pdev->id);
} }
static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct mmc_davinci_host *host = mmc_priv(mmc);
if (enable) {
if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) {
writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST);
mmc_signal_sdio_irq(host->mmc);
} else {
host->sdio_int = true;
writel(readl(host->base + DAVINCI_SDIOIEN) |
SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN);
}
} else {
host->sdio_int = false;
writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN,
host->base + DAVINCI_SDIOIEN);
}
}
static struct mmc_host_ops mmc_davinci_ops = { static struct mmc_host_ops mmc_davinci_ops = {
.request = mmc_davinci_request, .request = mmc_davinci_request,
.set_ios = mmc_davinci_set_ios, .set_ios = mmc_davinci_set_ios,
.get_cd = mmc_davinci_get_cd, .get_cd = mmc_davinci_get_cd,
.get_ro = mmc_davinci_get_ro, .get_ro = mmc_davinci_get_ro,
.enable_sdio_irq = mmc_davinci_enable_sdio_irq,
}; };
/*----------------------------------------------------------------------*/ /*----------------------------------------------------------------------*/
...@@ -1209,7 +1267,8 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev) ...@@ -1209,7 +1267,8 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
host->nr_sg = MAX_NR_SG; host->nr_sg = MAX_NR_SG;
host->use_dma = use_dma; host->use_dma = use_dma;
host->irq = irq; host->mmc_irq = irq;
host->sdio_irq = platform_get_irq(pdev, 1);
if (host->use_dma && davinci_acquire_dma_channels(host) != 0) if (host->use_dma && davinci_acquire_dma_channels(host) != 0)
host->use_dma = 0; host->use_dma = 0;
...@@ -1270,6 +1329,13 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev) ...@@ -1270,6 +1329,13 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
if (ret) if (ret)
goto out; goto out;
if (host->sdio_irq >= 0) {
ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0,
mmc_hostname(mmc), host);
if (!ret)
mmc->caps |= MMC_CAP_SDIO_IRQ;
}
rename_region(mem, mmc_hostname(mmc)); rename_region(mem, mmc_hostname(mmc));
dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n",
...@@ -1313,7 +1379,9 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev) ...@@ -1313,7 +1379,9 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev)
mmc_davinci_cpufreq_deregister(host); mmc_davinci_cpufreq_deregister(host);
mmc_remove_host(host->mmc); mmc_remove_host(host->mmc);
free_irq(host->irq, host); free_irq(host->mmc_irq, host);
if (host->mmc->caps & MMC_CAP_SDIO_IRQ)
free_irq(host->sdio_irq, host);
davinci_release_dma_channels(host); davinci_release_dma_channels(host);
......
/*
* Synopsys DesignWare Multimedia Card Interface driver
* (Based on NXP driver for lpc 31xx)
*
* Copyright (C) 2009 NXP Semiconductors
* Copyright (C) 2009, 2010 Imagination Technologies Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/blkdev.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/dw_mmc.h>
#include <linux/bitops.h>
#include "dw_mmc.h"
/* Common flag combinations */
#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DTO | SDMMC_INT_DCRC | \
SDMMC_INT_HTO | SDMMC_INT_SBE | \
SDMMC_INT_EBE)
#define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
SDMMC_INT_RESP_ERR)
#define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
#define DW_MCI_SEND_STATUS 1
#define DW_MCI_RECV_STATUS 2
#define DW_MCI_DMA_THRESHOLD 16
#ifdef CONFIG_MMC_DW_IDMAC
struct idmac_desc {
u32 des0; /* Control Descriptor */
#define IDMAC_DES0_DIC BIT(1)
#define IDMAC_DES0_LD BIT(2)
#define IDMAC_DES0_FD BIT(3)
#define IDMAC_DES0_CH BIT(4)
#define IDMAC_DES0_ER BIT(5)
#define IDMAC_DES0_CES BIT(30)
#define IDMAC_DES0_OWN BIT(31)
u32 des1; /* Buffer sizes */
#define IDMAC_SET_BUFFER1_SIZE(d, s) \
((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff))
u32 des2; /* buffer 1 physical address */
u32 des3; /* buffer 2 physical address */
};
#endif /* CONFIG_MMC_DW_IDMAC */
/**
* struct dw_mci_slot - MMC slot state
* @mmc: The mmc_host representing this slot.
* @host: The MMC controller this slot is using.
* @ctype: Card type for this slot.
* @mrq: mmc_request currently being processed or waiting to be
* processed, or NULL when the slot is idle.
* @queue_node: List node for placing this node in the @queue list of
* &struct dw_mci.
* @clock: Clock rate configured by set_ios(). Protected by host->lock.
* @flags: Random state bits associated with the slot.
* @id: Number of this slot.
* @last_detect_state: Most recently observed card detect state.
*/
struct dw_mci_slot {
struct mmc_host *mmc;
struct dw_mci *host;
u32 ctype;
struct mmc_request *mrq;
struct list_head queue_node;
unsigned int clock;
unsigned long flags;
#define DW_MMC_CARD_PRESENT 0
#define DW_MMC_CARD_NEED_INIT 1
int id;
int last_detect_state;
};
#if defined(CONFIG_DEBUG_FS)
static int dw_mci_req_show(struct seq_file *s, void *v)
{
struct dw_mci_slot *slot = s->private;
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_command *stop;
struct mmc_data *data;
/* Make sure we get a consistent snapshot */
spin_lock_bh(&slot->host->lock);
mrq = slot->mrq;
if (mrq) {
cmd = mrq->cmd;
data = mrq->data;
stop = mrq->stop;
if (cmd)
seq_printf(s,
"CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
cmd->opcode, cmd->arg, cmd->flags,
cmd->resp[0], cmd->resp[1], cmd->resp[2],
cmd->resp[2], cmd->error);
if (data)
seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
data->bytes_xfered, data->blocks,
data->blksz, data->flags, data->error);
if (stop)
seq_printf(s,
"CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
stop->opcode, stop->arg, stop->flags,
stop->resp[0], stop->resp[1], stop->resp[2],
stop->resp[2], stop->error);
}
spin_unlock_bh(&slot->host->lock);
return 0;
}
static int dw_mci_req_open(struct inode *inode, struct file *file)
{
return single_open(file, dw_mci_req_show, inode->i_private);
}
static const struct file_operations dw_mci_req_fops = {
.owner = THIS_MODULE,
.open = dw_mci_req_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int dw_mci_regs_show(struct seq_file *s, void *v)
{
seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS);
seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS);
seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD);
seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL);
seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK);
seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA);
return 0;
}
static int dw_mci_regs_open(struct inode *inode, struct file *file)
{
return single_open(file, dw_mci_regs_show, inode->i_private);
}
static const struct file_operations dw_mci_regs_fops = {
.owner = THIS_MODULE,
.open = dw_mci_regs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
{
struct mmc_host *mmc = slot->mmc;
struct dw_mci *host = slot->host;
struct dentry *root;
struct dentry *node;
root = mmc->debugfs_root;
if (!root)
return;
node = debugfs_create_file("regs", S_IRUSR, root, host,
&dw_mci_regs_fops);
if (!node)
goto err;
node = debugfs_create_file("req", S_IRUSR, root, slot,
&dw_mci_req_fops);
if (!node)
goto err;
node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state);
if (!node)
goto err;
node = debugfs_create_x32("pending_events", S_IRUSR, root,
(u32 *)&host->pending_events);
if (!node)
goto err;
node = debugfs_create_x32("completed_events", S_IRUSR, root,
(u32 *)&host->completed_events);
if (!node)
goto err;
return;
err:
dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
}
#endif /* defined(CONFIG_DEBUG_FS) */
static void dw_mci_set_timeout(struct dw_mci *host)
{
/* timeout (maximum) */
mci_writel(host, TMOUT, 0xffffffff);
}
static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
{
struct mmc_data *data;
u32 cmdr;
cmd->error = -EINPROGRESS;
cmdr = cmd->opcode;
if (cmdr == MMC_STOP_TRANSMISSION)
cmdr |= SDMMC_CMD_STOP;
else
cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
if (cmd->flags & MMC_RSP_PRESENT) {
/* We expect a response, so set this bit */
cmdr |= SDMMC_CMD_RESP_EXP;
if (cmd->flags & MMC_RSP_136)
cmdr |= SDMMC_CMD_RESP_LONG;
}
if (cmd->flags & MMC_RSP_CRC)
cmdr |= SDMMC_CMD_RESP_CRC;
data = cmd->data;
if (data) {
cmdr |= SDMMC_CMD_DAT_EXP;
if (data->flags & MMC_DATA_STREAM)
cmdr |= SDMMC_CMD_STRM_MODE;
if (data->flags & MMC_DATA_WRITE)
cmdr |= SDMMC_CMD_DAT_WR;
}
return cmdr;
}
static void dw_mci_start_command(struct dw_mci *host,
struct mmc_command *cmd, u32 cmd_flags)
{
host->cmd = cmd;
dev_vdbg(&host->pdev->dev,
"start command: ARGR=0x%08x CMDR=0x%08x\n",
cmd->arg, cmd_flags);
mci_writel(host, CMDARG, cmd->arg);
wmb();
mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
}
static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
{
dw_mci_start_command(host, data->stop, host->stop_cmdr);
}
/* DMA interface functions */
static void dw_mci_stop_dma(struct dw_mci *host)
{
if (host->use_dma) {
host->dma_ops->stop(host);
host->dma_ops->cleanup(host);
} else {
/* Data transfer was stopped by the interrupt handler */
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
}
#ifdef CONFIG_MMC_DW_IDMAC
static void dw_mci_dma_cleanup(struct dw_mci *host)
{
struct mmc_data *data = host->data;
if (data)
dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
((data->flags & MMC_DATA_WRITE)
? DMA_TO_DEVICE : DMA_FROM_DEVICE));
}
static void dw_mci_idmac_stop_dma(struct dw_mci *host)
{
u32 temp;
/* Disable and reset the IDMAC interface */
temp = mci_readl(host, CTRL);
temp &= ~SDMMC_CTRL_USE_IDMAC;
temp |= SDMMC_CTRL_DMA_RESET;
mci_writel(host, CTRL, temp);
/* Stop the IDMAC running */
temp = mci_readl(host, BMOD);
temp &= ~SDMMC_IDMAC_ENABLE;
mci_writel(host, BMOD, temp);
}
static void dw_mci_idmac_complete_dma(struct dw_mci *host)
{
struct mmc_data *data = host->data;
dev_vdbg(&host->pdev->dev, "DMA complete\n");
host->dma_ops->cleanup(host);
/*
* If the card was removed, data will be NULL. No point in trying to
* send the stop command or waiting for NBUSY in this case.
*/
if (data) {
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
}
static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
unsigned int sg_len)
{
int i;
struct idmac_desc *desc = host->sg_cpu;
for (i = 0; i < sg_len; i++, desc++) {
unsigned int length = sg_dma_len(&data->sg[i]);
u32 mem_addr = sg_dma_address(&data->sg[i]);
/* Set the OWN bit and disable interrupts for this descriptor */
desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH;
/* Buffer length */
IDMAC_SET_BUFFER1_SIZE(desc, length);
/* Physical address to DMA to/from */
desc->des2 = mem_addr;
}
/* Set first descriptor */
desc = host->sg_cpu;
desc->des0 |= IDMAC_DES0_FD;
/* Set last descriptor */
desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc);
desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
desc->des0 |= IDMAC_DES0_LD;
wmb();
}
static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
{
u32 temp;
dw_mci_translate_sglist(host, host->data, sg_len);
/* Select IDMAC interface */
temp = mci_readl(host, CTRL);
temp |= SDMMC_CTRL_USE_IDMAC;
mci_writel(host, CTRL, temp);
wmb();
/* Enable the IDMAC */
temp = mci_readl(host, BMOD);
temp |= SDMMC_IDMAC_ENABLE;
mci_writel(host, BMOD, temp);
/* Start it running */
mci_writel(host, PLDMND, 1);
}
static int dw_mci_idmac_init(struct dw_mci *host)
{
struct idmac_desc *p;
int i;
/* Number of descriptors in the ring buffer */
host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
/* Forward link the descriptor list */
for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1));
/* Set the last descriptor as the end-of-ring descriptor */
p->des3 = host->sg_dma;
p->des0 = IDMAC_DES0_ER;
/* Mask out interrupts - get Tx & Rx complete only */
mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
SDMMC_IDMAC_INT_TI);
/* Set the descriptor base address */
mci_writel(host, DBADDR, host->sg_dma);
return 0;
}
static struct dw_mci_dma_ops dw_mci_idmac_ops = {
.init = dw_mci_idmac_init,
.start = dw_mci_idmac_start_dma,
.stop = dw_mci_idmac_stop_dma,
.complete = dw_mci_idmac_complete_dma,
.cleanup = dw_mci_dma_cleanup,
};
#endif /* CONFIG_MMC_DW_IDMAC */
static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
{
struct scatterlist *sg;
unsigned int i, direction, sg_len;
u32 temp;
/* If we don't have a channel, we can't do DMA */
if (!host->use_dma)
return -ENODEV;
/*
* We don't do DMA on "complex" transfers, i.e. with
* non-word-aligned buffers or lengths. Also, we don't bother
* with all the DMA setup overhead for short transfers.
*/
if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
return -EINVAL;
if (data->blksz & 3)
return -EINVAL;
for_each_sg(data->sg, sg, data->sg_len, i) {
if (sg->offset & 3 || sg->length & 3)
return -EINVAL;
}
if (data->flags & MMC_DATA_READ)
direction = DMA_FROM_DEVICE;
else
direction = DMA_TO_DEVICE;
sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
direction);
dev_vdbg(&host->pdev->dev,
"sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
(unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
sg_len);
/* Enable the DMA interface */
temp = mci_readl(host, CTRL);
temp |= SDMMC_CTRL_DMA_ENABLE;
mci_writel(host, CTRL, temp);
/* Disable RX/TX IRQs, let DMA handle it */
temp = mci_readl(host, INTMASK);
temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
mci_writel(host, INTMASK, temp);
host->dma_ops->start(host, sg_len);
return 0;
}
static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
{
u32 temp;
data->error = -EINPROGRESS;
WARN_ON(host->data);
host->sg = NULL;
host->data = data;
if (dw_mci_submit_data_dma(host, data)) {
host->sg = data->sg;
host->pio_offset = 0;
if (data->flags & MMC_DATA_READ)
host->dir_status = DW_MCI_RECV_STATUS;
else
host->dir_status = DW_MCI_SEND_STATUS;
temp = mci_readl(host, INTMASK);
temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
mci_writel(host, INTMASK, temp);
temp = mci_readl(host, CTRL);
temp &= ~SDMMC_CTRL_DMA_ENABLE;
mci_writel(host, CTRL, temp);
}
}
static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
{
struct dw_mci *host = slot->host;
unsigned long timeout = jiffies + msecs_to_jiffies(500);
unsigned int cmd_status = 0;
mci_writel(host, CMDARG, arg);
wmb();
mci_writel(host, CMD, SDMMC_CMD_START | cmd);
while (time_before(jiffies, timeout)) {
cmd_status = mci_readl(host, CMD);
if (!(cmd_status & SDMMC_CMD_START))
return;
}
dev_err(&slot->mmc->class_dev,
"Timeout sending command (cmd %#x arg %#x status %#x)\n",
cmd, arg, cmd_status);
}
static void dw_mci_setup_bus(struct dw_mci_slot *slot)
{
struct dw_mci *host = slot->host;
u32 div;
if (slot->clock != host->current_speed) {
if (host->bus_hz % slot->clock)
/*
* move the + 1 after the divide to prevent
* over-clocking the card.
*/
div = ((host->bus_hz / slot->clock) >> 1) + 1;
else
div = (host->bus_hz / slot->clock) >> 1;
dev_info(&slot->mmc->class_dev,
"Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
" div = %d)\n", slot->id, host->bus_hz, slot->clock,
div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div);
/* disable clock */
mci_writel(host, CLKENA, 0);
mci_writel(host, CLKSRC, 0);
/* inform CIU */
mci_send_cmd(slot,
SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
/* set clock to desired speed */
mci_writel(host, CLKDIV, div);
/* inform CIU */
mci_send_cmd(slot,
SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
/* enable clock */
mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE);
/* inform CIU */
mci_send_cmd(slot,
SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
host->current_speed = slot->clock;
}
/* Set the current slot bus width */
mci_writel(host, CTYPE, slot->ctype);
}
static void dw_mci_start_request(struct dw_mci *host,
struct dw_mci_slot *slot)
{
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
u32 cmdflags;
mrq = slot->mrq;
if (host->pdata->select_slot)
host->pdata->select_slot(slot->id);
/* Slot specific timing and width adjustment */
dw_mci_setup_bus(slot);
host->cur_slot = slot;
host->mrq = mrq;
host->pending_events = 0;
host->completed_events = 0;
host->data_status = 0;
data = mrq->data;
if (data) {
dw_mci_set_timeout(host);
mci_writel(host, BYTCNT, data->blksz*data->blocks);
mci_writel(host, BLKSIZ, data->blksz);
}
cmd = mrq->cmd;
cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
/* this is the first command, send the initialization clock */
if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
cmdflags |= SDMMC_CMD_INIT;
if (data) {
dw_mci_submit_data(host, data);
wmb();
}
dw_mci_start_command(host, cmd, cmdflags);
if (mrq->stop)
host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
}
static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
struct mmc_request *mrq)
{
dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
host->state);
spin_lock_bh(&host->lock);
slot->mrq = mrq;
if (host->state == STATE_IDLE) {
host->state = STATE_SENDING_CMD;
dw_mci_start_request(host, slot);
} else {
list_add_tail(&slot->queue_node, &host->queue);
}
spin_unlock_bh(&host->lock);
}
static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
WARN_ON(slot->mrq);
if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
mrq->cmd->error = -ENOMEDIUM;
mmc_request_done(mmc, mrq);
return;
}
/* We don't support multiple blocks of weird lengths. */
dw_mci_queue_request(host, slot, mrq);
}
static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
/* set default 1 bit mode */
slot->ctype = SDMMC_CTYPE_1BIT;
switch (ios->bus_width) {
case MMC_BUS_WIDTH_1:
slot->ctype = SDMMC_CTYPE_1BIT;
break;
case MMC_BUS_WIDTH_4:
slot->ctype = SDMMC_CTYPE_4BIT;
break;
}
if (ios->clock) {
/*
* Use mirror of ios->clock to prevent race with mmc
* core ios update when finding the minimum.
*/
slot->clock = ios->clock;
}
switch (ios->power_mode) {
case MMC_POWER_UP:
set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
break;
default:
break;
}
}
static int dw_mci_get_ro(struct mmc_host *mmc)
{
int read_only;
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci_board *brd = slot->host->pdata;
/* Use platform get_ro function, else try on board write protect */
if (brd->get_ro)
read_only = brd->get_ro(slot->id);
else
read_only =
mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
dev_dbg(&mmc->class_dev, "card is %s\n",
read_only ? "read-only" : "read-write");
return read_only;
}
static int dw_mci_get_cd(struct mmc_host *mmc)
{
int present;
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci_board *brd = slot->host->pdata;
/* Use platform get_cd function, else try onboard card detect */
if (brd->get_cd)
present = !brd->get_cd(slot->id);
else
present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
== 0 ? 1 : 0;
if (present)
dev_dbg(&mmc->class_dev, "card is present\n");
else
dev_dbg(&mmc->class_dev, "card is not present\n");
return present;
}
static const struct mmc_host_ops dw_mci_ops = {
.request = dw_mci_request,
.set_ios = dw_mci_set_ios,
.get_ro = dw_mci_get_ro,
.get_cd = dw_mci_get_cd,
};
static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
__releases(&host->lock)
__acquires(&host->lock)
{
struct dw_mci_slot *slot;
struct mmc_host *prev_mmc = host->cur_slot->mmc;
WARN_ON(host->cmd || host->data);
host->cur_slot->mrq = NULL;
host->mrq = NULL;
if (!list_empty(&host->queue)) {
slot = list_entry(host->queue.next,
struct dw_mci_slot, queue_node);
list_del(&slot->queue_node);
dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n",
mmc_hostname(slot->mmc));
host->state = STATE_SENDING_CMD;
dw_mci_start_request(host, slot);
} else {
dev_vdbg(&host->pdev->dev, "list empty\n");
host->state = STATE_IDLE;
}
spin_unlock(&host->lock);
mmc_request_done(prev_mmc, mrq);
spin_lock(&host->lock);
}
static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
{
u32 status = host->cmd_status;
host->cmd_status = 0;
/* Read the response from the card (up to 16 bytes) */
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136) {
cmd->resp[3] = mci_readl(host, RESP0);
cmd->resp[2] = mci_readl(host, RESP1);
cmd->resp[1] = mci_readl(host, RESP2);
cmd->resp[0] = mci_readl(host, RESP3);
} else {
cmd->resp[0] = mci_readl(host, RESP0);
cmd->resp[1] = 0;
cmd->resp[2] = 0;
cmd->resp[3] = 0;
}
}
if (status & SDMMC_INT_RTO)
cmd->error = -ETIMEDOUT;
else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
cmd->error = -EILSEQ;
else if (status & SDMMC_INT_RESP_ERR)
cmd->error = -EIO;
else
cmd->error = 0;
if (cmd->error) {
/* newer ip versions need a delay between retries */
if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY)
mdelay(20);
if (cmd->data) {
host->data = NULL;
dw_mci_stop_dma(host);
}
}
}
static void dw_mci_tasklet_func(unsigned long priv)
{
struct dw_mci *host = (struct dw_mci *)priv;
struct mmc_data *data;
struct mmc_command *cmd;
enum dw_mci_state state;
enum dw_mci_state prev_state;
u32 status;
spin_lock(&host->lock);
state = host->state;
data = host->data;
do {
prev_state = state;
switch (state) {
case STATE_IDLE:
break;
case STATE_SENDING_CMD:
if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
&host->pending_events))
break;
cmd = host->cmd;
host->cmd = NULL;
set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
dw_mci_command_complete(host, host->mrq->cmd);
if (!host->mrq->data || cmd->error) {
dw_mci_request_end(host, host->mrq);
goto unlock;
}
prev_state = state = STATE_SENDING_DATA;
/* fall through */
case STATE_SENDING_DATA:
if (test_and_clear_bit(EVENT_DATA_ERROR,
&host->pending_events)) {
dw_mci_stop_dma(host);
if (data->stop)
send_stop_cmd(host, data);
state = STATE_DATA_ERROR;
break;
}
if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
&host->pending_events))
break;
set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
prev_state = state = STATE_DATA_BUSY;
/* fall through */
case STATE_DATA_BUSY:
if (!test_and_clear_bit(EVENT_DATA_COMPLETE,
&host->pending_events))
break;
host->data = NULL;
set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
status = host->data_status;
if (status & DW_MCI_DATA_ERROR_FLAGS) {
if (status & SDMMC_INT_DTO) {
dev_err(&host->pdev->dev,
"data timeout error\n");
data->error = -ETIMEDOUT;
} else if (status & SDMMC_INT_DCRC) {
dev_err(&host->pdev->dev,
"data CRC error\n");
data->error = -EILSEQ;
} else {
dev_err(&host->pdev->dev,
"data FIFO error "
"(status=%08x)\n",
status);
data->error = -EIO;
}
} else {
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
}
if (!data->stop) {
dw_mci_request_end(host, host->mrq);
goto unlock;
}
prev_state = state = STATE_SENDING_STOP;
if (!data->error)
send_stop_cmd(host, data);
/* fall through */
case STATE_SENDING_STOP:
if (!test_and_clear_bit(EVENT_CMD_COMPLETE,
&host->pending_events))
break;
host->cmd = NULL;
dw_mci_command_complete(host, host->mrq->stop);
dw_mci_request_end(host, host->mrq);
goto unlock;
case STATE_DATA_ERROR:
if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
&host->pending_events))
break;
state = STATE_DATA_BUSY;
break;
}
} while (state != prev_state);
host->state = state;
unlock:
spin_unlock(&host->lock);
}
static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
{
u16 *pdata = (u16 *)buf;
WARN_ON(cnt % 2 != 0);
cnt = cnt >> 1;
while (cnt > 0) {
mci_writew(host, DATA, *pdata++);
cnt--;
}
}
static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
{
u16 *pdata = (u16 *)buf;
WARN_ON(cnt % 2 != 0);
cnt = cnt >> 1;
while (cnt > 0) {
*pdata++ = mci_readw(host, DATA);
cnt--;
}
}
static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
{
u32 *pdata = (u32 *)buf;
WARN_ON(cnt % 4 != 0);
WARN_ON((unsigned long)pdata & 0x3);
cnt = cnt >> 2;
while (cnt > 0) {
mci_writel(host, DATA, *pdata++);
cnt--;
}
}
static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
{
u32 *pdata = (u32 *)buf;
WARN_ON(cnt % 4 != 0);
WARN_ON((unsigned long)pdata & 0x3);
cnt = cnt >> 2;
while (cnt > 0) {
*pdata++ = mci_readl(host, DATA);
cnt--;
}
}
static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
{
u64 *pdata = (u64 *)buf;
WARN_ON(cnt % 8 != 0);
cnt = cnt >> 3;
while (cnt > 0) {
mci_writeq(host, DATA, *pdata++);
cnt--;
}
}
static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
{
u64 *pdata = (u64 *)buf;
WARN_ON(cnt % 8 != 0);
cnt = cnt >> 3;
while (cnt > 0) {
*pdata++ = mci_readq(host, DATA);
cnt--;
}
}
static void dw_mci_read_data_pio(struct dw_mci *host)
{
struct scatterlist *sg = host->sg;
void *buf = sg_virt(sg);
unsigned int offset = host->pio_offset;
struct mmc_data *data = host->data;
int shift = host->data_shift;
u32 status;
unsigned int nbytes = 0, len, old_len, count = 0;
do {
len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
if (count == 0)
old_len = len;
if (offset + len <= sg->length) {
host->pull_data(host, (void *)(buf + offset), len);
offset += len;
nbytes += len;
if (offset == sg->length) {
flush_dcache_page(sg_page(sg));
host->sg = sg = sg_next(sg);
if (!sg)
goto done;
offset = 0;
buf = sg_virt(sg);
}
} else {
unsigned int remaining = sg->length - offset;
host->pull_data(host, (void *)(buf + offset),
remaining);
nbytes += remaining;
flush_dcache_page(sg_page(sg));
host->sg = sg = sg_next(sg);
if (!sg)
goto done;
offset = len - remaining;
buf = sg_virt(sg);
host->pull_data(host, buf, offset);
nbytes += offset;
}
status = mci_readl(host, MINTSTS);
mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
if (status & DW_MCI_DATA_ERROR_FLAGS) {
host->data_status = status;
data->bytes_xfered += nbytes;
smp_wmb();
set_bit(EVENT_DATA_ERROR, &host->pending_events);
tasklet_schedule(&host->tasklet);
return;
}
count++;
} while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
host->pio_offset = offset;
data->bytes_xfered += nbytes;
return;
done:
data->bytes_xfered += nbytes;
smp_wmb();
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
static void dw_mci_write_data_pio(struct dw_mci *host)
{
struct scatterlist *sg = host->sg;
void *buf = sg_virt(sg);
unsigned int offset = host->pio_offset;
struct mmc_data *data = host->data;
int shift = host->data_shift;
u32 status;
unsigned int nbytes = 0, len;
do {
len = SDMMC_FIFO_SZ -
(SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
if (offset + len <= sg->length) {
host->push_data(host, (void *)(buf + offset), len);
offset += len;
nbytes += len;
if (offset == sg->length) {
host->sg = sg = sg_next(sg);
if (!sg)
goto done;
offset = 0;
buf = sg_virt(sg);
}
} else {
unsigned int remaining = sg->length - offset;
host->push_data(host, (void *)(buf + offset),
remaining);
nbytes += remaining;
host->sg = sg = sg_next(sg);
if (!sg)
goto done;
offset = len - remaining;
buf = sg_virt(sg);
host->push_data(host, (void *)buf, offset);
nbytes += offset;
}
status = mci_readl(host, MINTSTS);
mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
if (status & DW_MCI_DATA_ERROR_FLAGS) {
host->data_status = status;
data->bytes_xfered += nbytes;
smp_wmb();
set_bit(EVENT_DATA_ERROR, &host->pending_events);
tasklet_schedule(&host->tasklet);
return;
}
} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
host->pio_offset = offset;
data->bytes_xfered += nbytes;
return;
done:
data->bytes_xfered += nbytes;
smp_wmb();
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
{
if (!host->cmd_status)
host->cmd_status = status;
smp_wmb();
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
{
struct dw_mci *host = dev_id;
u32 status, pending;
unsigned int pass_count = 0;
do {
status = mci_readl(host, RINTSTS);
pending = mci_readl(host, MINTSTS); /* read-only mask reg */
/*
* DTO fix - version 2.10a and below, and only if internal DMA
* is configured.
*/
if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) {
if (!pending &&
((mci_readl(host, STATUS) >> 17) & 0x1fff))
pending |= SDMMC_INT_DATA_OVER;
}
if (!pending)
break;
if (pending & DW_MCI_CMD_ERROR_FLAGS) {
mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
host->cmd_status = status;
smp_wmb();
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
if (pending & DW_MCI_DATA_ERROR_FLAGS) {
/* if there is an error report DATA_ERROR */
mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
host->data_status = status;
smp_wmb();
set_bit(EVENT_DATA_ERROR, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
if (pending & SDMMC_INT_DATA_OVER) {
mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
if (!host->data_status)
host->data_status = status;
smp_wmb();
if (host->dir_status == DW_MCI_RECV_STATUS) {
if (host->sg != NULL)
dw_mci_read_data_pio(host);
}
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
if (pending & SDMMC_INT_RXDR) {
mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
if (host->sg)
dw_mci_read_data_pio(host);
}
if (pending & SDMMC_INT_TXDR) {
mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
if (host->sg)
dw_mci_write_data_pio(host);
}
if (pending & SDMMC_INT_CMD_DONE) {
mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
dw_mci_cmd_interrupt(host, status);
}
if (pending & SDMMC_INT_CD) {
mci_writel(host, RINTSTS, SDMMC_INT_CD);
tasklet_schedule(&host->card_tasklet);
}
} while (pass_count++ < 5);
#ifdef CONFIG_MMC_DW_IDMAC
/* Handle DMA interrupts */
pending = mci_readl(host, IDSTS);
if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
host->dma_ops->complete(host);
}
#endif
return IRQ_HANDLED;
}
static void dw_mci_tasklet_card(unsigned long data)
{
struct dw_mci *host = (struct dw_mci *)data;
int i;
for (i = 0; i < host->num_slots; i++) {
struct dw_mci_slot *slot = host->slot[i];
struct mmc_host *mmc = slot->mmc;
struct mmc_request *mrq;
int present;
u32 ctrl;
present = dw_mci_get_cd(mmc);
while (present != slot->last_detect_state) {
spin_lock(&host->lock);
dev_dbg(&slot->mmc->class_dev, "card %s\n",
present ? "inserted" : "removed");
/* Card change detected */
slot->last_detect_state = present;
/* Power up slot */
if (present != 0) {
if (host->pdata->setpower)
host->pdata->setpower(slot->id,
mmc->ocr_avail);
set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
}
/* Clean up queue if present */
mrq = slot->mrq;
if (mrq) {
if (mrq == host->mrq) {
host->data = NULL;
host->cmd = NULL;
switch (host->state) {
case STATE_IDLE:
break;
case STATE_SENDING_CMD:
mrq->cmd->error = -ENOMEDIUM;
if (!mrq->data)
break;
/* fall through */
case STATE_SENDING_DATA:
mrq->data->error = -ENOMEDIUM;
dw_mci_stop_dma(host);
break;
case STATE_DATA_BUSY:
case STATE_DATA_ERROR:
if (mrq->data->error == -EINPROGRESS)
mrq->data->error = -ENOMEDIUM;
if (!mrq->stop)
break;
/* fall through */
case STATE_SENDING_STOP:
mrq->stop->error = -ENOMEDIUM;
break;
}
dw_mci_request_end(host, mrq);
} else {
list_del(&slot->queue_node);
mrq->cmd->error = -ENOMEDIUM;
if (mrq->data)
mrq->data->error = -ENOMEDIUM;
if (mrq->stop)
mrq->stop->error = -ENOMEDIUM;
spin_unlock(&host->lock);
mmc_request_done(slot->mmc, mrq);
spin_lock(&host->lock);
}
}
/* Power down slot */
if (present == 0) {
if (host->pdata->setpower)
host->pdata->setpower(slot->id, 0);
clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
/*
* Clear down the FIFO - doing so generates a
* block interrupt, hence setting the
* scatter-gather pointer to NULL.
*/
host->sg = NULL;
ctrl = mci_readl(host, CTRL);
ctrl |= SDMMC_CTRL_FIFO_RESET;
mci_writel(host, CTRL, ctrl);
#ifdef CONFIG_MMC_DW_IDMAC
ctrl = mci_readl(host, BMOD);
ctrl |= 0x01; /* Software reset of DMA */
mci_writel(host, BMOD, ctrl);
#endif
}
spin_unlock(&host->lock);
present = dw_mci_get_cd(mmc);
}
mmc_detect_change(slot->mmc,
msecs_to_jiffies(host->pdata->detect_delay_ms));
}
}
static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
{
struct mmc_host *mmc;
struct dw_mci_slot *slot;
mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->pdev->dev);
if (!mmc)
return -ENOMEM;
slot = mmc_priv(mmc);
slot->id = id;
slot->mmc = mmc;
slot->host = host;
mmc->ops = &dw_mci_ops;
mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510);
mmc->f_max = host->bus_hz;
if (host->pdata->get_ocr)
mmc->ocr_avail = host->pdata->get_ocr(id);
else
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
/*
* Start with slot power disabled, it will be enabled when a card
* is detected.
*/
if (host->pdata->setpower)
host->pdata->setpower(id, 0);
mmc->caps = 0;
if (host->pdata->get_bus_wd)
if (host->pdata->get_bus_wd(slot->id) >= 4)
mmc->caps |= MMC_CAP_4_BIT_DATA;
if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
mmc->caps |= MMC_CAP_SD_HIGHSPEED;
#ifdef CONFIG_MMC_DW_IDMAC
mmc->max_segs = host->ring_size;
mmc->max_blk_size = 65536;
mmc->max_blk_count = host->ring_size;
mmc->max_seg_size = 0x1000;
mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
#else
if (host->pdata->blk_settings) {
mmc->max_segs = host->pdata->blk_settings->max_segs;
mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
mmc->max_blk_count = host->pdata->blk_settings->max_blk_count;
mmc->max_req_size = host->pdata->blk_settings->max_req_size;
mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
} else {
/* Useful defaults if platform data is unset. */
mmc->max_segs = 64;
mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
mmc->max_blk_count = 512;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
}
#endif /* CONFIG_MMC_DW_IDMAC */
if (dw_mci_get_cd(mmc))
set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
else
clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
host->slot[id] = slot;
mmc_add_host(mmc);
#if defined(CONFIG_DEBUG_FS)
dw_mci_init_debugfs(slot);
#endif
/* Card initially undetected */
slot->last_detect_state = 0;
return 0;
}
static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
{
/* Shutdown detect IRQ */
if (slot->host->pdata->exit)
slot->host->pdata->exit(id);
/* Debugfs stuff is cleaned up by mmc core */
mmc_remove_host(slot->mmc);
slot->host->slot[id] = NULL;
mmc_free_host(slot->mmc);
}
static void dw_mci_init_dma(struct dw_mci *host)
{
/* Alloc memory for sg translation */
host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, PAGE_SIZE,
&host->sg_dma, GFP_KERNEL);
if (!host->sg_cpu) {
dev_err(&host->pdev->dev, "%s: could not alloc DMA memory\n",
__func__);
goto no_dma;
}
/* Determine which DMA interface to use */
#ifdef CONFIG_MMC_DW_IDMAC
host->dma_ops = &dw_mci_idmac_ops;
dev_info(&host->pdev->dev, "Using internal DMA controller.\n");
#endif
if (!host->dma_ops)
goto no_dma;
if (host->dma_ops->init) {
if (host->dma_ops->init(host)) {
dev_err(&host->pdev->dev, "%s: Unable to initialize "
"DMA Controller.\n", __func__);
goto no_dma;
}
} else {
dev_err(&host->pdev->dev, "DMA initialization not found.\n");
goto no_dma;
}
host->use_dma = 1;
return;
no_dma:
dev_info(&host->pdev->dev, "Using PIO mode.\n");
host->use_dma = 0;
return;
}
static bool mci_wait_reset(struct device *dev, struct dw_mci *host)
{
unsigned long timeout = jiffies + msecs_to_jiffies(500);
unsigned int ctrl;
mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
SDMMC_CTRL_DMA_RESET));
/* wait till resets clear */
do {
ctrl = mci_readl(host, CTRL);
if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET |
SDMMC_CTRL_DMA_RESET)))
return true;
} while (time_before(jiffies, timeout));
dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl);
return false;
}
static int dw_mci_probe(struct platform_device *pdev)
{
struct dw_mci *host;
struct resource *regs;
struct dw_mci_board *pdata;
int irq, ret, i, width;
u32 fifo_size;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs)
return -ENXIO;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL);
if (!host)
return -ENOMEM;
host->pdev = pdev;
host->pdata = pdata = pdev->dev.platform_data;
if (!pdata || !pdata->init) {
dev_err(&pdev->dev,
"Platform data must supply init function\n");
ret = -ENODEV;
goto err_freehost;
}
if (!pdata->select_slot && pdata->num_slots > 1) {
dev_err(&pdev->dev,
"Platform data must supply select_slot function\n");
ret = -ENODEV;
goto err_freehost;
}
if (!pdata->bus_hz) {
dev_err(&pdev->dev,
"Platform data must supply bus speed\n");
ret = -ENODEV;
goto err_freehost;
}
host->bus_hz = pdata->bus_hz;
host->quirks = pdata->quirks;
spin_lock_init(&host->lock);
INIT_LIST_HEAD(&host->queue);
ret = -ENOMEM;
host->regs = ioremap(regs->start, regs->end - regs->start + 1);
if (!host->regs)
goto err_freehost;
host->dma_ops = pdata->dma_ops;
dw_mci_init_dma(host);
/*
* Get the host data width - this assumes that HCON has been set with
* the correct values.
*/
i = (mci_readl(host, HCON) >> 7) & 0x7;
if (!i) {
host->push_data = dw_mci_push_data16;
host->pull_data = dw_mci_pull_data16;
width = 16;
host->data_shift = 1;
} else if (i == 2) {
host->push_data = dw_mci_push_data64;
host->pull_data = dw_mci_pull_data64;
width = 64;
host->data_shift = 3;
} else {
/* Check for a reserved value, and warn if it is */
WARN((i != 1),
"HCON reports a reserved host data width!\n"
"Defaulting to 32-bit access.\n");
host->push_data = dw_mci_push_data32;
host->pull_data = dw_mci_pull_data32;
width = 32;
host->data_shift = 2;
}
/* Reset all blocks */
if (!mci_wait_reset(&pdev->dev, host)) {
ret = -ENODEV;
goto err_dmaunmap;
}
/* Clear the interrupts for the host controller */
mci_writel(host, RINTSTS, 0xFFFFFFFF);
mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
/* Put in max timeout */
mci_writel(host, TMOUT, 0xFFFFFFFF);
/*
* FIFO threshold settings RxMark = fifo_size / 2 - 1,
* Tx Mark = fifo_size / 2 DMA Size = 8
*/
fifo_size = mci_readl(host, FIFOTH);
fifo_size = (fifo_size >> 16) & 0x7ff;
mci_writel(host, FIFOTH, ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
((fifo_size/2) << 0)));
/* disable clock to CIU */
mci_writel(host, CLKENA, 0);
mci_writel(host, CLKSRC, 0);
tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
tasklet_init(&host->card_tasklet,
dw_mci_tasklet_card, (unsigned long)host);
ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host);
if (ret)
goto err_dmaunmap;
platform_set_drvdata(pdev, host);
if (host->pdata->num_slots)
host->num_slots = host->pdata->num_slots;
else
host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
/* We need at least one slot to succeed */
for (i = 0; i < host->num_slots; i++) {
ret = dw_mci_init_slot(host, i);
if (ret) {
ret = -ENODEV;
goto err_init_slot;
}
}
/*
* Enable interrupts for command done, data over, data empty, card det,
* receive ready and error such as transmit, receive timeout, crc error
*/
mci_writel(host, RINTSTS, 0xFFFFFFFF);
mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
SDMMC_INT_TXDR | SDMMC_INT_RXDR |
DW_MCI_ERROR_FLAGS | SDMMC_INT_CD);
mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
dev_info(&pdev->dev, "DW MMC controller at irq %d, "
"%d bit host data width\n", irq, width);
if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n");
return 0;
err_init_slot:
/* De-init any initialized slots */
while (i > 0) {
if (host->slot[i])
dw_mci_cleanup_slot(host->slot[i], i);
i--;
}
free_irq(irq, host);
err_dmaunmap:
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
dma_free_coherent(&host->pdev->dev, PAGE_SIZE,
host->sg_cpu, host->sg_dma);
iounmap(host->regs);
err_freehost:
kfree(host);
return ret;
}
static int __exit dw_mci_remove(struct platform_device *pdev)
{
struct dw_mci *host = platform_get_drvdata(pdev);
int i;
mci_writel(host, RINTSTS, 0xFFFFFFFF);
mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
platform_set_drvdata(pdev, NULL);
for (i = 0; i < host->num_slots; i++) {
dev_dbg(&pdev->dev, "remove slot %d\n", i);
if (host->slot[i])
dw_mci_cleanup_slot(host->slot[i], i);
}
/* disable clock to CIU */
mci_writel(host, CLKENA, 0);
mci_writel(host, CLKSRC, 0);
free_irq(platform_get_irq(pdev, 0), host);
dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
iounmap(host->regs);
kfree(host);
return 0;
}
#ifdef CONFIG_PM
/*
* TODO: we should probably disable the clock to the card in the suspend path.
*/
static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
{
int i, ret;
struct dw_mci *host = platform_get_drvdata(pdev);
for (i = 0; i < host->num_slots; i++) {
struct dw_mci_slot *slot = host->slot[i];
if (!slot)
continue;
ret = mmc_suspend_host(slot->mmc);
if (ret < 0) {
while (--i >= 0) {
slot = host->slot[i];
if (slot)
mmc_resume_host(host->slot[i]->mmc);
}
return ret;
}
}
return 0;
}
static int dw_mci_resume(struct platform_device *pdev)
{
int i, ret;
struct dw_mci *host = platform_get_drvdata(pdev);
for (i = 0; i < host->num_slots; i++) {
struct dw_mci_slot *slot = host->slot[i];
if (!slot)
continue;
ret = mmc_resume_host(host->slot[i]->mmc);
if (ret < 0)
return ret;
}
return 0;
}
#else
#define dw_mci_suspend NULL
#define dw_mci_resume NULL
#endif /* CONFIG_PM */
static struct platform_driver dw_mci_driver = {
.remove = __exit_p(dw_mci_remove),
.suspend = dw_mci_suspend,
.resume = dw_mci_resume,
.driver = {
.name = "dw_mmc",
},
};
static int __init dw_mci_init(void)
{
return platform_driver_probe(&dw_mci_driver, dw_mci_probe);
}
static void __exit dw_mci_exit(void)
{
platform_driver_unregister(&dw_mci_driver);
}
module_init(dw_mci_init);
module_exit(dw_mci_exit);
MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
MODULE_AUTHOR("NXP Semiconductor VietNam");
MODULE_AUTHOR("Imagination Technologies Ltd");
MODULE_LICENSE("GPL v2");
/*
* Synopsys DesignWare Multimedia Card Interface driver
* (Based on NXP driver for lpc 31xx)
*
* Copyright (C) 2009 NXP Semiconductors
* Copyright (C) 2009, 2010 Imagination Technologies Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef _DW_MMC_H_
#define _DW_MMC_H_
#define SDMMC_CTRL 0x000
#define SDMMC_PWREN 0x004
#define SDMMC_CLKDIV 0x008
#define SDMMC_CLKSRC 0x00c
#define SDMMC_CLKENA 0x010
#define SDMMC_TMOUT 0x014
#define SDMMC_CTYPE 0x018
#define SDMMC_BLKSIZ 0x01c
#define SDMMC_BYTCNT 0x020
#define SDMMC_INTMASK 0x024
#define SDMMC_CMDARG 0x028
#define SDMMC_CMD 0x02c
#define SDMMC_RESP0 0x030
#define SDMMC_RESP1 0x034
#define SDMMC_RESP2 0x038
#define SDMMC_RESP3 0x03c
#define SDMMC_MINTSTS 0x040
#define SDMMC_RINTSTS 0x044
#define SDMMC_STATUS 0x048
#define SDMMC_FIFOTH 0x04c
#define SDMMC_CDETECT 0x050
#define SDMMC_WRTPRT 0x054
#define SDMMC_GPIO 0x058
#define SDMMC_TCBCNT 0x05c
#define SDMMC_TBBCNT 0x060
#define SDMMC_DEBNCE 0x064
#define SDMMC_USRID 0x068
#define SDMMC_VERID 0x06c
#define SDMMC_HCON 0x070
#define SDMMC_BMOD 0x080
#define SDMMC_PLDMND 0x084
#define SDMMC_DBADDR 0x088
#define SDMMC_IDSTS 0x08c
#define SDMMC_IDINTEN 0x090
#define SDMMC_DSCADDR 0x094
#define SDMMC_BUFADDR 0x098
#define SDMMC_DATA 0x100
#define SDMMC_DATA_ADR 0x100
/* shift bit field */
#define _SBF(f, v) ((v) << (f))
/* Control register defines */
#define SDMMC_CTRL_USE_IDMAC BIT(25)
#define SDMMC_CTRL_CEATA_INT_EN BIT(11)
#define SDMMC_CTRL_SEND_AS_CCSD BIT(10)
#define SDMMC_CTRL_SEND_CCSD BIT(9)
#define SDMMC_CTRL_ABRT_READ_DATA BIT(8)
#define SDMMC_CTRL_SEND_IRQ_RESP BIT(7)
#define SDMMC_CTRL_READ_WAIT BIT(6)
#define SDMMC_CTRL_DMA_ENABLE BIT(5)
#define SDMMC_CTRL_INT_ENABLE BIT(4)
#define SDMMC_CTRL_DMA_RESET BIT(2)
#define SDMMC_CTRL_FIFO_RESET BIT(1)
#define SDMMC_CTRL_RESET BIT(0)
/* Clock Enable register defines */
#define SDMMC_CLKEN_LOW_PWR BIT(16)
#define SDMMC_CLKEN_ENABLE BIT(0)
/* time-out register defines */
#define SDMMC_TMOUT_DATA(n) _SBF(8, (n))
#define SDMMC_TMOUT_DATA_MSK 0xFFFFFF00
#define SDMMC_TMOUT_RESP(n) ((n) & 0xFF)
#define SDMMC_TMOUT_RESP_MSK 0xFF
/* card-type register defines */
#define SDMMC_CTYPE_8BIT BIT(16)
#define SDMMC_CTYPE_4BIT BIT(0)
#define SDMMC_CTYPE_1BIT 0
/* Interrupt status & mask register defines */
#define SDMMC_INT_SDIO BIT(16)
#define SDMMC_INT_EBE BIT(15)
#define SDMMC_INT_ACD BIT(14)
#define SDMMC_INT_SBE BIT(13)
#define SDMMC_INT_HLE BIT(12)
#define SDMMC_INT_FRUN BIT(11)
#define SDMMC_INT_HTO BIT(10)
#define SDMMC_INT_DTO BIT(9)
#define SDMMC_INT_RTO BIT(8)
#define SDMMC_INT_DCRC BIT(7)
#define SDMMC_INT_RCRC BIT(6)
#define SDMMC_INT_RXDR BIT(5)
#define SDMMC_INT_TXDR BIT(4)
#define SDMMC_INT_DATA_OVER BIT(3)
#define SDMMC_INT_CMD_DONE BIT(2)
#define SDMMC_INT_RESP_ERR BIT(1)
#define SDMMC_INT_CD BIT(0)
#define SDMMC_INT_ERROR 0xbfc2
/* Command register defines */
#define SDMMC_CMD_START BIT(31)
#define SDMMC_CMD_CCS_EXP BIT(23)
#define SDMMC_CMD_CEATA_RD BIT(22)
#define SDMMC_CMD_UPD_CLK BIT(21)
#define SDMMC_CMD_INIT BIT(15)
#define SDMMC_CMD_STOP BIT(14)
#define SDMMC_CMD_PRV_DAT_WAIT BIT(13)
#define SDMMC_CMD_SEND_STOP BIT(12)
#define SDMMC_CMD_STRM_MODE BIT(11)
#define SDMMC_CMD_DAT_WR BIT(10)
#define SDMMC_CMD_DAT_EXP BIT(9)
#define SDMMC_CMD_RESP_CRC BIT(8)
#define SDMMC_CMD_RESP_LONG BIT(7)
#define SDMMC_CMD_RESP_EXP BIT(6)
#define SDMMC_CMD_INDX(n) ((n) & 0x1F)
/* Status register defines */
#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FF)
#define SDMMC_FIFO_SZ 32
/* Internal DMAC interrupt defines */
#define SDMMC_IDMAC_INT_AI BIT(9)
#define SDMMC_IDMAC_INT_NI BIT(8)
#define SDMMC_IDMAC_INT_CES BIT(5)
#define SDMMC_IDMAC_INT_DU BIT(4)
#define SDMMC_IDMAC_INT_FBE BIT(2)
#define SDMMC_IDMAC_INT_RI BIT(1)
#define SDMMC_IDMAC_INT_TI BIT(0)
/* Internal DMAC bus mode bits */
#define SDMMC_IDMAC_ENABLE BIT(7)
#define SDMMC_IDMAC_FB BIT(1)
#define SDMMC_IDMAC_SWRESET BIT(0)
/* Register access macros */
#define mci_readl(dev, reg) \
__raw_readl(dev->regs + SDMMC_##reg)
#define mci_writel(dev, reg, value) \
__raw_writel((value), dev->regs + SDMMC_##reg)
/* 16-bit FIFO access macros */
#define mci_readw(dev, reg) \
__raw_readw(dev->regs + SDMMC_##reg)
#define mci_writew(dev, reg, value) \
__raw_writew((value), dev->regs + SDMMC_##reg)
/* 64-bit FIFO access macros */
#ifdef readq
#define mci_readq(dev, reg) \
__raw_readq(dev->regs + SDMMC_##reg)
#define mci_writeq(dev, reg, value) \
__raw_writeq((value), dev->regs + SDMMC_##reg)
#else
/*
* Dummy readq implementation for architectures that don't define it.
*
* We would assume that none of these architectures would configure
* the IP block with a 64bit FIFO width, so this code will never be
* executed on those machines. Defining these macros here keeps the
* rest of the code free from ifdefs.
*/
#define mci_readq(dev, reg) \
(*(volatile u64 __force *)(dev->regs + SDMMC_##reg))
#define mci_writeq(dev, reg, value) \
(*(volatile u64 __force *)(dev->regs + SDMMC_##reg) = value)
#endif
#endif /* _DW_MMC_H_ */
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/gpio.h> #include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/irq.h> #include <asm/irq.h>
...@@ -141,10 +142,49 @@ struct mxcmci_host { ...@@ -141,10 +142,49 @@ struct mxcmci_host {
struct work_struct datawork; struct work_struct datawork;
spinlock_t lock; spinlock_t lock;
struct regulator *vcc;
}; };
static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios);
static inline void mxcmci_init_ocr(struct mxcmci_host *host)
{
host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
if (IS_ERR(host->vcc)) {
host->vcc = NULL;
} else {
host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc);
if (host->pdata && host->pdata->ocr_avail)
dev_warn(mmc_dev(host->mmc),
"pdata->ocr_avail will not be used\n");
}
if (host->vcc == NULL) {
/* fall-back to platform data */
if (host->pdata && host->pdata->ocr_avail)
host->mmc->ocr_avail = host->pdata->ocr_avail;
else
host->mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
}
}
static inline void mxcmci_set_power(struct mxcmci_host *host,
unsigned char power_mode,
unsigned int vdd)
{
if (host->vcc) {
if (power_mode == MMC_POWER_UP)
mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);
else if (power_mode == MMC_POWER_OFF)
mmc_regulator_set_ocr(host->mmc, host->vcc, 0);
}
if (host->pdata && host->pdata->setpower)
host->pdata->setpower(mmc_dev(host->mmc), vdd);
}
static inline int mxcmci_use_dma(struct mxcmci_host *host) static inline int mxcmci_use_dma(struct mxcmci_host *host)
{ {
return host->do_dma; return host->do_dma;
...@@ -680,9 +720,9 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) ...@@ -680,9 +720,9 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4; host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;
if (host->power_mode != ios->power_mode) { if (host->power_mode != ios->power_mode) {
if (host->pdata && host->pdata->setpower) mxcmci_set_power(host, ios->power_mode, ios->vdd);
host->pdata->setpower(mmc_dev(mmc), ios->vdd);
host->power_mode = ios->power_mode; host->power_mode = ios->power_mode;
if (ios->power_mode == MMC_POWER_ON) if (ios->power_mode == MMC_POWER_ON)
host->cmdat |= CMD_DAT_CONT_INIT; host->cmdat |= CMD_DAT_CONT_INIT;
} }
...@@ -807,10 +847,7 @@ static int mxcmci_probe(struct platform_device *pdev) ...@@ -807,10 +847,7 @@ static int mxcmci_probe(struct platform_device *pdev)
host->pdata = pdev->dev.platform_data; host->pdata = pdev->dev.platform_data;
spin_lock_init(&host->lock); spin_lock_init(&host->lock);
if (host->pdata && host->pdata->ocr_avail) mxcmci_init_ocr(host);
mmc->ocr_avail = host->pdata->ocr_avail;
else
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
if (host->pdata && host->pdata->dat3_card_detect) if (host->pdata && host->pdata->dat3_card_detect)
host->default_irq_mask = host->default_irq_mask =
...@@ -915,6 +952,9 @@ static int mxcmci_remove(struct platform_device *pdev) ...@@ -915,6 +952,9 @@ static int mxcmci_remove(struct platform_device *pdev)
mmc_remove_host(mmc); mmc_remove_host(mmc);
if (host->vcc)
regulator_put(host->vcc);
if (host->pdata && host->pdata->exit) if (host->pdata && host->pdata->exit)
host->pdata->exit(&pdev->dev, mmc); host->pdata->exit(&pdev->dev, mmc);
...@@ -927,7 +967,6 @@ static int mxcmci_remove(struct platform_device *pdev) ...@@ -927,7 +967,6 @@ static int mxcmci_remove(struct platform_device *pdev)
clk_put(host->clk); clk_put(host->clk);
release_mem_region(host->res->start, resource_size(host->res)); release_mem_region(host->res->start, resource_size(host->res));
release_resource(host->res);
mmc_free_host(mmc); mmc_free_host(mmc);
......
/*
* sdhci-dove.c Support for SDHCI on Marvell's Dove SoC
*
* Author: Saeed Bishara <saeed@marvell.com>
* Mike Rapoport <mike@compulab.co.il>
* Based on sdhci-cns3xxx.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/io.h>
#include <linux/mmc/host.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
static u16 sdhci_dove_readw(struct sdhci_host *host, int reg)
{
u16 ret;
switch (reg) {
case SDHCI_HOST_VERSION:
case SDHCI_SLOT_INT_STATUS:
/* those registers don't exist */
return 0;
default:
ret = readw(host->ioaddr + reg);
}
return ret;
}
static u32 sdhci_dove_readl(struct sdhci_host *host, int reg)
{
u32 ret;
switch (reg) {
case SDHCI_CAPABILITIES:
ret = readl(host->ioaddr + reg);
/* Mask the support for 3.0V */
ret &= ~SDHCI_CAN_VDD_300;
break;
default:
ret = readl(host->ioaddr + reg);
}
return ret;
}
static struct sdhci_ops sdhci_dove_ops = {
.read_w = sdhci_dove_readw,
.read_l = sdhci_dove_readl,
};
struct sdhci_pltfm_data sdhci_dove_pdata = {
.ops = &sdhci_dove_ops,
.quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
SDHCI_QUIRK_NO_BUSY_IRQ |
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_FORCE_DMA,
};
...@@ -176,6 +176,74 @@ static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc_sdio = { ...@@ -176,6 +176,74 @@ static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc_sdio = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
}; };
/* O2Micro extra registers */
#define O2_SD_LOCK_WP 0xD3
#define O2_SD_MULTI_VCC3V 0xEE
#define O2_SD_CLKREQ 0xEC
#define O2_SD_CAPS 0xE0
#define O2_SD_ADMA1 0xE2
#define O2_SD_ADMA2 0xE7
#define O2_SD_INF_MOD 0xF1
static int o2_probe(struct sdhci_pci_chip *chip)
{
int ret;
u8 scratch;
switch (chip->pdev->device) {
case PCI_DEVICE_ID_O2_8220:
case PCI_DEVICE_ID_O2_8221:
case PCI_DEVICE_ID_O2_8320:
case PCI_DEVICE_ID_O2_8321:
/* This extra setup is required due to broken ADMA. */
ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
if (ret)
return ret;
scratch &= 0x7f;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
/* Set Multi 3 to VCC3V# */
pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08);
/* Disable CLK_REQ# support after media DET */
ret = pci_read_config_byte(chip->pdev, O2_SD_CLKREQ, &scratch);
if (ret)
return ret;
scratch |= 0x20;
pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch);
/* Choose capabilities, enable SDMA. We have to write 0x01
* to the capabilities register first to unlock it.
*/
ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch);
if (ret)
return ret;
scratch |= 0x01;
pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch);
pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73);
/* Disable ADMA1/2 */
pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39);
pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08);
/* Disable the infinite transfer mode */
ret = pci_read_config_byte(chip->pdev, O2_SD_INF_MOD, &scratch);
if (ret)
return ret;
scratch |= 0x08;
pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch);
/* Lock WP */
ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch);
if (ret)
return ret;
scratch |= 0x80;
pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch);
}
return 0;
}
static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
{ {
u8 scratch; u8 scratch;
...@@ -204,6 +272,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) ...@@ -204,6 +272,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
static int jmicron_probe(struct sdhci_pci_chip *chip) static int jmicron_probe(struct sdhci_pci_chip *chip)
{ {
int ret; int ret;
u16 mmcdev = 0;
if (chip->pdev->revision == 0) { if (chip->pdev->revision == 0) {
chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR | chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR |
...@@ -225,12 +294,17 @@ static int jmicron_probe(struct sdhci_pci_chip *chip) ...@@ -225,12 +294,17 @@ static int jmicron_probe(struct sdhci_pci_chip *chip)
* 2. The MMC interface has a lower subfunction number * 2. The MMC interface has a lower subfunction number
* than the SD interface. * than the SD interface.
*/ */
if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) { if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD)
mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC;
else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD)
mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD;
if (mmcdev) {
struct pci_dev *sd_dev; struct pci_dev *sd_dev;
sd_dev = NULL; sd_dev = NULL;
while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON, while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON,
PCI_DEVICE_ID_JMICRON_JMB38X_MMC, sd_dev)) != NULL) { mmcdev, sd_dev)) != NULL) {
if ((PCI_SLOT(chip->pdev->devfn) == if ((PCI_SLOT(chip->pdev->devfn) ==
PCI_SLOT(sd_dev->devfn)) && PCI_SLOT(sd_dev->devfn)) &&
(chip->pdev->bus == sd_dev->bus)) (chip->pdev->bus == sd_dev->bus))
...@@ -290,13 +364,25 @@ static int jmicron_probe_slot(struct sdhci_pci_slot *slot) ...@@ -290,13 +364,25 @@ static int jmicron_probe_slot(struct sdhci_pci_slot *slot)
slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
} }
/* JM388 MMC doesn't support 1.8V while SD supports it */
if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 |
MMC_VDD_29_30 | MMC_VDD_30_31 |
MMC_VDD_165_195; /* allow 1.8V */
slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 |
MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */
}
/* /*
* The secondary interface requires a bit set to get the * The secondary interface requires a bit set to get the
* interrupts. * interrupts.
*/ */
if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
jmicron_enable_mmc(slot->host, 1); jmicron_enable_mmc(slot->host, 1);
slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST;
return 0; return 0;
} }
...@@ -305,7 +391,8 @@ static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead) ...@@ -305,7 +391,8 @@ static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
if (dead) if (dead)
return; return;
if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)
jmicron_enable_mmc(slot->host, 0); jmicron_enable_mmc(slot->host, 0);
} }
...@@ -313,7 +400,8 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state) ...@@ -313,7 +400,8 @@ static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
{ {
int i; int i;
if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) { if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
for (i = 0;i < chip->num_slots;i++) for (i = 0;i < chip->num_slots;i++)
jmicron_enable_mmc(chip->slots[i]->host, 0); jmicron_enable_mmc(chip->slots[i]->host, 0);
} }
...@@ -325,7 +413,8 @@ static int jmicron_resume(struct sdhci_pci_chip *chip) ...@@ -325,7 +413,8 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
{ {
int ret, i; int ret, i;
if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) { if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC ||
chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) {
for (i = 0;i < chip->num_slots;i++) for (i = 0;i < chip->num_slots;i++)
jmicron_enable_mmc(chip->slots[i]->host, 1); jmicron_enable_mmc(chip->slots[i]->host, 1);
} }
...@@ -339,6 +428,10 @@ static int jmicron_resume(struct sdhci_pci_chip *chip) ...@@ -339,6 +428,10 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)
return 0; return 0;
} }
static const struct sdhci_pci_fixes sdhci_o2 = {
.probe = o2_probe,
};
static const struct sdhci_pci_fixes sdhci_jmicron = { static const struct sdhci_pci_fixes sdhci_jmicron = {
.probe = jmicron_probe, .probe = jmicron_probe,
...@@ -509,6 +602,22 @@ static const struct pci_device_id pci_ids[] __devinitdata = { ...@@ -509,6 +602,22 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
.driver_data = (kernel_ulong_t)&sdhci_jmicron, .driver_data = (kernel_ulong_t)&sdhci_jmicron,
}, },
{
.vendor = PCI_VENDOR_ID_JMICRON,
.device = PCI_DEVICE_ID_JMICRON_JMB388_SD,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_jmicron,
},
{
.vendor = PCI_VENDOR_ID_JMICRON,
.device = PCI_DEVICE_ID_JMICRON_JMB388_ESD,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_jmicron,
},
{ {
.vendor = PCI_VENDOR_ID_SYSKONNECT, .vendor = PCI_VENDOR_ID_SYSKONNECT,
.device = 0x8000, .device = 0x8000,
...@@ -589,6 +698,46 @@ static const struct pci_device_id pci_ids[] __devinitdata = { ...@@ -589,6 +698,46 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
.driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio, .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
}, },
{
.vendor = PCI_VENDOR_ID_O2,
.device = PCI_DEVICE_ID_O2_8120,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_o2,
},
{
.vendor = PCI_VENDOR_ID_O2,
.device = PCI_DEVICE_ID_O2_8220,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_o2,
},
{
.vendor = PCI_VENDOR_ID_O2,
.device = PCI_DEVICE_ID_O2_8221,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_o2,
},
{
.vendor = PCI_VENDOR_ID_O2,
.device = PCI_DEVICE_ID_O2_8320,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_o2,
},
{
.vendor = PCI_VENDOR_ID_O2,
.device = PCI_DEVICE_ID_O2_8321,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_o2,
},
{ /* Generic SD host controller */ { /* Generic SD host controller */
PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
}, },
......
...@@ -169,6 +169,12 @@ static const struct platform_device_id sdhci_pltfm_ids[] = { ...@@ -169,6 +169,12 @@ static const struct platform_device_id sdhci_pltfm_ids[] = {
#endif #endif
#ifdef CONFIG_MMC_SDHCI_ESDHC_IMX #ifdef CONFIG_MMC_SDHCI_ESDHC_IMX
{ "sdhci-esdhc-imx", (kernel_ulong_t)&sdhci_esdhc_imx_pdata }, { "sdhci-esdhc-imx", (kernel_ulong_t)&sdhci_esdhc_imx_pdata },
#endif
#ifdef CONFIG_MMC_SDHCI_DOVE
{ "sdhci-dove", (kernel_ulong_t)&sdhci_dove_pdata },
#endif
#ifdef CONFIG_MMC_SDHCI_TEGRA
{ "sdhci-tegra", (kernel_ulong_t)&sdhci_tegra_pdata },
#endif #endif
{ }, { },
}; };
......
...@@ -22,5 +22,7 @@ struct sdhci_pltfm_host { ...@@ -22,5 +22,7 @@ struct sdhci_pltfm_host {
extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata; extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata;
extern struct sdhci_pltfm_data sdhci_esdhc_imx_pdata; extern struct sdhci_pltfm_data sdhci_esdhc_imx_pdata;
extern struct sdhci_pltfm_data sdhci_dove_pdata;
extern struct sdhci_pltfm_data sdhci_tegra_pdata;
#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */ #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
...@@ -130,6 +130,15 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost, ...@@ -130,6 +130,15 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
if (!clksrc) if (!clksrc)
return UINT_MAX; return UINT_MAX;
/*
* Clock divider's step is different as 1 from that of host controller
* when 'clk_type' is S3C_SDHCI_CLK_DIV_EXTERNAL.
*/
if (ourhost->pdata->clk_type) {
rate = clk_round_rate(clksrc, wanted);
return wanted - rate;
}
rate = clk_get_rate(clksrc); rate = clk_get_rate(clksrc);
for (div = 1; div < 256; div *= 2) { for (div = 1; div < 256; div *= 2) {
...@@ -232,6 +241,42 @@ static unsigned int sdhci_s3c_get_min_clock(struct sdhci_host *host) ...@@ -232,6 +241,42 @@ static unsigned int sdhci_s3c_get_min_clock(struct sdhci_host *host)
return min; return min;
} }
/* sdhci_cmu_get_max_clk - callback to get maximum clock frequency.*/
static unsigned int sdhci_cmu_get_max_clock(struct sdhci_host *host)
{
struct sdhci_s3c *ourhost = to_s3c(host);
return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], UINT_MAX);
}
/* sdhci_cmu_get_min_clock - callback to get minimal supported clock value. */
static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host)
{
struct sdhci_s3c *ourhost = to_s3c(host);
/*
* initial clock can be in the frequency range of
* 100KHz-400KHz, so we set it as max value.
*/
return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], 400000);
}
/* sdhci_cmu_set_clock - callback on clock change.*/
static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
{
struct sdhci_s3c *ourhost = to_s3c(host);
/* don't bother if the clock is going off */
if (clock == 0)
return;
sdhci_s3c_set_clock(host, clock);
clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock);
host->clock = clock;
}
static struct sdhci_ops sdhci_s3c_ops = { static struct sdhci_ops sdhci_s3c_ops = {
.get_max_clock = sdhci_s3c_get_max_clk, .get_max_clock = sdhci_s3c_get_max_clk,
.set_clock = sdhci_s3c_set_clock, .set_clock = sdhci_s3c_set_clock,
...@@ -361,6 +406,13 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) ...@@ -361,6 +406,13 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
clks++; clks++;
sc->clk_bus[ptr] = clk; sc->clk_bus[ptr] = clk;
/*
* save current clock index to know which clock bus
* is used later in overriding functions.
*/
sc->cur_clk = ptr;
clk_enable(clk); clk_enable(clk);
dev_info(dev, "clock source %d: %s (%ld Hz)\n", dev_info(dev, "clock source %d: %s (%ld Hz)\n",
...@@ -427,6 +479,20 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) ...@@ -427,6 +479,20 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
/* HSMMC on Samsung SoCs uses SDCLK as timeout clock */ /* HSMMC on Samsung SoCs uses SDCLK as timeout clock */
host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK; host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
/*
* If controller does not have internal clock divider,
* we can use overriding functions instead of default.
*/
if (pdata->clk_type) {
sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
}
/* It supports additional host capabilities if needed */
if (pdata->host_caps)
host->mmc->caps |= pdata->host_caps;
ret = sdhci_add_host(host); ret = sdhci_add_host(host);
if (ret) { if (ret) {
dev_err(dev, "sdhci_add_host() failed\n"); dev_err(dev, "sdhci_add_host() failed\n");
......
/*
* Copyright (C) 2010 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/err.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <mach/gpio.h>
#include <mach/sdhci.h>
#include "sdhci.h"
#include "sdhci-pltfm.h"
static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
{
u32 val;
if (unlikely(reg == SDHCI_PRESENT_STATE)) {
/* Use wp_gpio here instead? */
val = readl(host->ioaddr + reg);
return val | SDHCI_WRITE_PROTECT;
}
return readl(host->ioaddr + reg);
}
static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
{
if (unlikely(reg == SDHCI_HOST_VERSION)) {
/* Erratum: Version register is invalid in HW. */
return SDHCI_SPEC_200;
}
return readw(host->ioaddr + reg);
}
static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
{
/* Seems like we're getting spurious timeout and crc errors, so
* disable signalling of them. In case of real errors software
* timers should take care of eventually detecting them.
*/
if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
writel(val, host->ioaddr + reg);
if (unlikely(reg == SDHCI_INT_ENABLE)) {
/* Erratum: Must enable block gap interrupt detection */
u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
if (val & SDHCI_INT_CARD_INT)
gap_ctrl |= 0x8;
else
gap_ctrl &= ~0x8;
writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
}
}
static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
{
struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
struct tegra_sdhci_platform_data *plat;
plat = pdev->dev.platform_data;
if (!gpio_is_valid(plat->wp_gpio))
return -1;
return gpio_get_value(plat->wp_gpio);
}
static irqreturn_t carddetect_irq(int irq, void *data)
{
struct sdhci_host *sdhost = (struct sdhci_host *)data;
tasklet_schedule(&sdhost->card_tasklet);
return IRQ_HANDLED;
};
static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
{
struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
struct tegra_sdhci_platform_data *plat;
u32 ctrl;
plat = pdev->dev.platform_data;
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
ctrl &= ~SDHCI_CTRL_4BITBUS;
ctrl |= SDHCI_CTRL_8BITBUS;
} else {
ctrl &= ~SDHCI_CTRL_8BITBUS;
if (bus_width == MMC_BUS_WIDTH_4)
ctrl |= SDHCI_CTRL_4BITBUS;
else
ctrl &= ~SDHCI_CTRL_4BITBUS;
}
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
return 0;
}
static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
struct sdhci_pltfm_data *pdata)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
struct tegra_sdhci_platform_data *plat;
struct clk *clk;
int rc;
plat = pdev->dev.platform_data;
if (plat == NULL) {
dev_err(mmc_dev(host->mmc), "missing platform data\n");
return -ENXIO;
}
if (gpio_is_valid(plat->power_gpio)) {
rc = gpio_request(plat->power_gpio, "sdhci_power");
if (rc) {
dev_err(mmc_dev(host->mmc),
"failed to allocate power gpio\n");
goto out;
}
tegra_gpio_enable(plat->power_gpio);
gpio_direction_output(plat->power_gpio, 1);
}
if (gpio_is_valid(plat->cd_gpio)) {
rc = gpio_request(plat->cd_gpio, "sdhci_cd");
if (rc) {
dev_err(mmc_dev(host->mmc),
"failed to allocate cd gpio\n");
goto out_power;
}
tegra_gpio_enable(plat->cd_gpio);
gpio_direction_input(plat->cd_gpio);
rc = request_irq(gpio_to_irq(plat->cd_gpio), carddetect_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
mmc_hostname(host->mmc), host);
if (rc) {
dev_err(mmc_dev(host->mmc), "request irq error\n");
goto out_cd;
}
}
if (gpio_is_valid(plat->wp_gpio)) {
rc = gpio_request(plat->wp_gpio, "sdhci_wp");
if (rc) {
dev_err(mmc_dev(host->mmc),
"failed to allocate wp gpio\n");
goto out_cd;
}
tegra_gpio_enable(plat->wp_gpio);
gpio_direction_input(plat->wp_gpio);
}
clk = clk_get(mmc_dev(host->mmc), NULL);
if (IS_ERR(clk)) {
dev_err(mmc_dev(host->mmc), "clk err\n");
rc = PTR_ERR(clk);
goto out_wp;
}
clk_enable(clk);
pltfm_host->clk = clk;
if (plat->is_8bit)
host->mmc->caps |= MMC_CAP_8_BIT_DATA;
return 0;
out_wp:
if (gpio_is_valid(plat->wp_gpio)) {
tegra_gpio_disable(plat->wp_gpio);
gpio_free(plat->wp_gpio);
}
out_cd:
if (gpio_is_valid(plat->cd_gpio)) {
tegra_gpio_disable(plat->cd_gpio);
gpio_free(plat->cd_gpio);
}
out_power:
if (gpio_is_valid(plat->power_gpio)) {
tegra_gpio_disable(plat->power_gpio);
gpio_free(plat->power_gpio);
}
out:
return rc;
}
static void tegra_sdhci_pltfm_exit(struct sdhci_host *host)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
struct tegra_sdhci_platform_data *plat;
plat = pdev->dev.platform_data;
if (gpio_is_valid(plat->wp_gpio)) {
tegra_gpio_disable(plat->wp_gpio);
gpio_free(plat->wp_gpio);
}
if (gpio_is_valid(plat->cd_gpio)) {
tegra_gpio_disable(plat->cd_gpio);
gpio_free(plat->cd_gpio);
}
if (gpio_is_valid(plat->power_gpio)) {
tegra_gpio_disable(plat->power_gpio);
gpio_free(plat->power_gpio);
}
clk_disable(pltfm_host->clk);
clk_put(pltfm_host->clk);
}
static struct sdhci_ops tegra_sdhci_ops = {
.get_ro = tegra_sdhci_get_ro,
.read_l = tegra_sdhci_readl,
.read_w = tegra_sdhci_readw,
.write_l = tegra_sdhci_writel,
.platform_8bit_width = tegra_sdhci_8bit,
};
struct sdhci_pltfm_data sdhci_tegra_pdata = {
.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_SINGLE_POWER_WRITE |
SDHCI_QUIRK_NO_HISPD_BIT |
SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
.ops = &tegra_sdhci_ops,
.init = tegra_sdhci_pltfm_init,
.exit = tegra_sdhci_pltfm_exit,
};
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/leds.h> #include <linux/leds.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/host.h> #include <linux/mmc/host.h>
#include "sdhci.h" #include "sdhci.h"
...@@ -77,8 +78,11 @@ static void sdhci_dumpregs(struct sdhci_host *host) ...@@ -77,8 +78,11 @@ static void sdhci_dumpregs(struct sdhci_host *host)
printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
sdhci_readw(host, SDHCI_ACMD12_ERR), sdhci_readw(host, SDHCI_ACMD12_ERR),
sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Max curr: 0x%08x\n", printk(KERN_DEBUG DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
sdhci_readl(host, SDHCI_CAPABILITIES), sdhci_readl(host, SDHCI_CAPABILITIES),
sdhci_readl(host, SDHCI_CAPABILITIES_1));
printk(KERN_DEBUG DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
sdhci_readw(host, SDHCI_COMMAND),
sdhci_readl(host, SDHCI_MAX_CURRENT)); sdhci_readl(host, SDHCI_MAX_CURRENT));
if (host->flags & SDHCI_USE_ADMA) if (host->flags & SDHCI_USE_ADMA)
...@@ -1518,7 +1522,11 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) ...@@ -1518,7 +1522,11 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
if (intmask & SDHCI_INT_DATA_TIMEOUT) if (intmask & SDHCI_INT_DATA_TIMEOUT)
host->data->error = -ETIMEDOUT; host->data->error = -ETIMEDOUT;
else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) else if (intmask & SDHCI_INT_DATA_END_BIT)
host->data->error = -EILSEQ;
else if ((intmask & SDHCI_INT_DATA_CRC) &&
SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
!= MMC_BUS_TEST_R)
host->data->error = -EILSEQ; host->data->error = -EILSEQ;
else if (intmask & SDHCI_INT_ADMA_ERROR) { else if (intmask & SDHCI_INT_ADMA_ERROR) {
printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc)); printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
...@@ -1736,7 +1744,7 @@ EXPORT_SYMBOL_GPL(sdhci_alloc_host); ...@@ -1736,7 +1744,7 @@ EXPORT_SYMBOL_GPL(sdhci_alloc_host);
int sdhci_add_host(struct sdhci_host *host) int sdhci_add_host(struct sdhci_host *host)
{ {
struct mmc_host *mmc; struct mmc_host *mmc;
unsigned int caps; unsigned int caps, ocr_avail;
int ret; int ret;
WARN_ON(host == NULL); WARN_ON(host == NULL);
...@@ -1890,13 +1898,26 @@ int sdhci_add_host(struct sdhci_host *host) ...@@ -1890,13 +1898,26 @@ int sdhci_add_host(struct sdhci_host *host)
mmc_card_is_removable(mmc)) mmc_card_is_removable(mmc))
mmc->caps |= MMC_CAP_NEEDS_POLL; mmc->caps |= MMC_CAP_NEEDS_POLL;
mmc->ocr_avail = 0; ocr_avail = 0;
if (caps & SDHCI_CAN_VDD_330) if (caps & SDHCI_CAN_VDD_330)
mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
if (caps & SDHCI_CAN_VDD_300) if (caps & SDHCI_CAN_VDD_300)
mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31; ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
if (caps & SDHCI_CAN_VDD_180) if (caps & SDHCI_CAN_VDD_180)
mmc->ocr_avail |= MMC_VDD_165_195; ocr_avail |= MMC_VDD_165_195;
mmc->ocr_avail = ocr_avail;
mmc->ocr_avail_sdio = ocr_avail;
if (host->ocr_avail_sdio)
mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
mmc->ocr_avail_sd = ocr_avail;
if (host->ocr_avail_sd)
mmc->ocr_avail_sd &= host->ocr_avail_sd;
else /* normal SD controllers don't support 1.8V */
mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
mmc->ocr_avail_mmc = ocr_avail;
if (host->ocr_avail_mmc)
mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
if (mmc->ocr_avail == 0) { if (mmc->ocr_avail == 0) {
printk(KERN_ERR "%s: Hardware doesn't report any " printk(KERN_ERR "%s: Hardware doesn't report any "
...@@ -1928,10 +1949,14 @@ int sdhci_add_host(struct sdhci_host *host) ...@@ -1928,10 +1949,14 @@ int sdhci_add_host(struct sdhci_host *host)
* of bytes. When doing hardware scatter/gather, each entry cannot * of bytes. When doing hardware scatter/gather, each entry cannot
* be larger than 64 KiB though. * be larger than 64 KiB though.
*/ */
if (host->flags & SDHCI_USE_ADMA) if (host->flags & SDHCI_USE_ADMA) {
mmc->max_seg_size = 65536; if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
mmc->max_seg_size = 65535;
else else
mmc->max_seg_size = 65536;
} else {
mmc->max_seg_size = mmc->max_req_size; mmc->max_seg_size = mmc->max_req_size;
}
/* /*
* Maximum block size. This varies from controller to controller and * Maximum block size. This varies from controller to controller and
......
...@@ -52,6 +52,7 @@ ...@@ -52,6 +52,7 @@
#define SDHCI_CMD_RESP_SHORT_BUSY 0x03 #define SDHCI_CMD_RESP_SHORT_BUSY 0x03
#define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff)) #define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff))
#define SDHCI_GET_CMD(c) ((c>>8) & 0x3f)
#define SDHCI_RESPONSE 0x10 #define SDHCI_RESPONSE 0x10
...@@ -165,7 +166,7 @@ ...@@ -165,7 +166,7 @@
#define SDHCI_CAN_VDD_180 0x04000000 #define SDHCI_CAN_VDD_180 0x04000000
#define SDHCI_CAN_64BIT 0x10000000 #define SDHCI_CAN_64BIT 0x10000000
/* 44-47 reserved for more caps */ #define SDHCI_CAPABILITIES_1 0x44
#define SDHCI_MAX_CURRENT 0x48 #define SDHCI_MAX_CURRENT 0x48
......
...@@ -25,16 +25,261 @@ ...@@ -25,16 +25,261 @@
* double buffer support * double buffer support
* *
*/ */
#include <linux/module.h>
#include <linux/irq.h>
#include <linux/device.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/device.h>
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/mmc/host.h> #include <linux/highmem.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/mfd/core.h> #include <linux/mfd/core.h>
#include <linux/mfd/tmio.h> #include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/scatterlist.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#define CTL_SD_CMD 0x00
#define CTL_ARG_REG 0x04
#define CTL_STOP_INTERNAL_ACTION 0x08
#define CTL_XFER_BLK_COUNT 0xa
#define CTL_RESPONSE 0x0c
#define CTL_STATUS 0x1c
#define CTL_IRQ_MASK 0x20
#define CTL_SD_CARD_CLK_CTL 0x24
#define CTL_SD_XFER_LEN 0x26
#define CTL_SD_MEM_CARD_OPT 0x28
#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
#define CTL_SD_DATA_PORT 0x30
#define CTL_TRANSACTION_CTL 0x34
#define CTL_SDIO_STATUS 0x36
#define CTL_SDIO_IRQ_MASK 0x38
#define CTL_RESET_SD 0xe0
#define CTL_SDIO_REGS 0x100
#define CTL_CLK_AND_WAIT_CTL 0x138
#define CTL_RESET_SDIO 0x1e0
/* Definitions for values the CTRL_STATUS register can take. */
#define TMIO_STAT_CMDRESPEND 0x00000001
#define TMIO_STAT_DATAEND 0x00000004
#define TMIO_STAT_CARD_REMOVE 0x00000008
#define TMIO_STAT_CARD_INSERT 0x00000010
#define TMIO_STAT_SIGSTATE 0x00000020
#define TMIO_STAT_WRPROTECT 0x00000080
#define TMIO_STAT_CARD_REMOVE_A 0x00000100
#define TMIO_STAT_CARD_INSERT_A 0x00000200
#define TMIO_STAT_SIGSTATE_A 0x00000400
#define TMIO_STAT_CMD_IDX_ERR 0x00010000
#define TMIO_STAT_CRCFAIL 0x00020000
#define TMIO_STAT_STOPBIT_ERR 0x00040000
#define TMIO_STAT_DATATIMEOUT 0x00080000
#define TMIO_STAT_RXOVERFLOW 0x00100000
#define TMIO_STAT_TXUNDERRUN 0x00200000
#define TMIO_STAT_CMDTIMEOUT 0x00400000
#define TMIO_STAT_RXRDY 0x01000000
#define TMIO_STAT_TXRQ 0x02000000
#define TMIO_STAT_ILL_FUNC 0x20000000
#define TMIO_STAT_CMD_BUSY 0x40000000
#define TMIO_STAT_ILL_ACCESS 0x80000000
/* Definitions for values the CTRL_SDIO_STATUS register can take. */
#define TMIO_SDIO_STAT_IOIRQ 0x0001
#define TMIO_SDIO_STAT_EXPUB52 0x4000
#define TMIO_SDIO_STAT_EXWT 0x8000
#define TMIO_SDIO_MASK_ALL 0xc007
/* Define some IRQ masks */
/* This is the mask used at reset by the chip */
#define TMIO_MASK_ALL 0x837f031d
#define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
#define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
#define enable_mmc_irqs(host, i) \
do { \
u32 mask;\
mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
mask &= ~((i) & TMIO_MASK_IRQ); \
sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
} while (0)
#define disable_mmc_irqs(host, i) \
do { \
u32 mask;\
mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
mask |= ((i) & TMIO_MASK_IRQ); \
sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
} while (0)
#define ack_mmc_irqs(host, i) \
do { \
sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
} while (0)
/* This is arbitrary, just noone needed any higher alignment yet */
#define MAX_ALIGN 4
struct tmio_mmc_host {
void __iomem *ctl;
unsigned long bus_shift;
struct mmc_command *cmd;
struct mmc_request *mrq;
struct mmc_data *data;
struct mmc_host *mmc;
int irq;
unsigned int sdio_irq_enabled;
/* Callbacks for clock / power control */
void (*set_pwr)(struct platform_device *host, int state);
void (*set_clk_div)(struct platform_device *host, int state);
/* pio related stuff */
struct scatterlist *sg_ptr;
struct scatterlist *sg_orig;
unsigned int sg_len;
unsigned int sg_off;
struct platform_device *pdev;
/* DMA support */
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
struct tasklet_struct dma_complete;
struct tasklet_struct dma_issue;
#ifdef CONFIG_TMIO_MMC_DMA
unsigned int dma_sglen;
u8 bounce_buf[PAGE_CACHE_SIZE] __attribute__((aligned(MAX_ALIGN)));
struct scatterlist bounce_sg;
#endif
/* Track lost interrupts */
struct delayed_work delayed_reset_work;
spinlock_t lock;
unsigned long last_req_ts;
};
static void tmio_check_bounce_buffer(struct tmio_mmc_host *host);
static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
{
return readw(host->ctl + (addr << host->bus_shift));
}
static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
readsw(host->ctl + (addr << host->bus_shift), buf, count);
}
static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
{
return readw(host->ctl + (addr << host->bus_shift)) |
readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
}
static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
{
writew(val, host->ctl + (addr << host->bus_shift));
}
static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
writesw(host->ctl + (addr << host->bus_shift), buf, count);
}
static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
{
writew(val, host->ctl + (addr << host->bus_shift));
writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
{
host->sg_len = data->sg_len;
host->sg_ptr = data->sg;
host->sg_orig = data->sg;
host->sg_off = 0;
}
static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
{
host->sg_ptr = sg_next(host->sg_ptr);
host->sg_off = 0;
return --host->sg_len;
}
static char *tmio_mmc_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
{
local_irq_save(*flags);
return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
}
#include "tmio_mmc.h" static void tmio_mmc_kunmap_atomic(void *virt, unsigned long *flags)
{
kunmap_atomic(virt, KM_BIO_SRC_IRQ);
local_irq_restore(*flags);
}
#ifdef CONFIG_MMC_DEBUG
#define STATUS_TO_TEXT(a) \
do { \
if (status & TMIO_STAT_##a) \
printk(#a); \
} while (0)
void pr_debug_status(u32 status)
{
printk(KERN_DEBUG "status: %08x = ", status);
STATUS_TO_TEXT(CARD_REMOVE);
STATUS_TO_TEXT(CARD_INSERT);
STATUS_TO_TEXT(SIGSTATE);
STATUS_TO_TEXT(WRPROTECT);
STATUS_TO_TEXT(CARD_REMOVE_A);
STATUS_TO_TEXT(CARD_INSERT_A);
STATUS_TO_TEXT(SIGSTATE_A);
STATUS_TO_TEXT(CMD_IDX_ERR);
STATUS_TO_TEXT(STOPBIT_ERR);
STATUS_TO_TEXT(ILL_FUNC);
STATUS_TO_TEXT(CMD_BUSY);
STATUS_TO_TEXT(CMDRESPEND);
STATUS_TO_TEXT(DATAEND);
STATUS_TO_TEXT(CRCFAIL);
STATUS_TO_TEXT(DATATIMEOUT);
STATUS_TO_TEXT(CMDTIMEOUT);
STATUS_TO_TEXT(RXOVERFLOW);
STATUS_TO_TEXT(TXUNDERRUN);
STATUS_TO_TEXT(RXRDY);
STATUS_TO_TEXT(TXRQ);
STATUS_TO_TEXT(ILL_ACCESS);
printk("\n");
}
#else
#define pr_debug_status(s) do { } while (0)
#endif
static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct tmio_mmc_host *host = mmc_priv(mmc);
if (enable) {
host->sdio_irq_enabled = 1;
sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK,
(TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ));
} else {
sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, TMIO_SDIO_MASK_ALL);
sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
host->sdio_irq_enabled = 0;
}
}
static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
{ {
...@@ -55,8 +300,23 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) ...@@ -55,8 +300,23 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
{ {
struct mfd_cell *cell = host->pdev->dev.platform_data;
struct tmio_mmc_data *pdata = cell->driver_data;
/*
* Testing on sh-mobile showed that SDIO IRQs are unmasked when
* CTL_CLK_AND_WAIT_CTL gets written, so we have to disable the
* device IRQ here and restore the SDIO IRQ mask before
* re-enabling the device IRQ.
*/
if (pdata->flags & TMIO_MMC_SDIO_IRQ)
disable_irq(host->irq);
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
msleep(10); msleep(10);
if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled);
enable_irq(host->irq);
}
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
msleep(10); msleep(10);
...@@ -64,11 +324,21 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) ...@@ -64,11 +324,21 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
static void tmio_mmc_clk_start(struct tmio_mmc_host *host) static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
{ {
struct mfd_cell *cell = host->pdev->dev.platform_data;
struct tmio_mmc_data *pdata = cell->driver_data;
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
msleep(10); msleep(10);
/* see comment in tmio_mmc_clk_stop above */
if (pdata->flags & TMIO_MMC_SDIO_IRQ)
disable_irq(host->irq);
sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
msleep(10); msleep(10);
if (pdata->flags & TMIO_MMC_SDIO_IRQ) {
tmio_mmc_enable_sdio_irq(host->mmc, host->sdio_irq_enabled);
enable_irq(host->irq);
}
} }
static void reset(struct tmio_mmc_host *host) static void reset(struct tmio_mmc_host *host)
...@@ -82,15 +352,60 @@ static void reset(struct tmio_mmc_host *host) ...@@ -82,15 +352,60 @@ static void reset(struct tmio_mmc_host *host)
msleep(10); msleep(10);
} }
static void tmio_mmc_reset_work(struct work_struct *work)
{
struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
delayed_reset_work.work);
struct mmc_request *mrq;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
mrq = host->mrq;
/* request already finished */
if (!mrq
|| time_is_after_jiffies(host->last_req_ts +
msecs_to_jiffies(2000))) {
spin_unlock_irqrestore(&host->lock, flags);
return;
}
dev_warn(&host->pdev->dev,
"timeout waiting for hardware interrupt (CMD%u)\n",
mrq->cmd->opcode);
if (host->data)
host->data->error = -ETIMEDOUT;
else if (host->cmd)
host->cmd->error = -ETIMEDOUT;
else
mrq->cmd->error = -ETIMEDOUT;
host->cmd = NULL;
host->data = NULL;
host->mrq = NULL;
spin_unlock_irqrestore(&host->lock, flags);
reset(host);
mmc_request_done(host->mmc, mrq);
}
static void static void
tmio_mmc_finish_request(struct tmio_mmc_host *host) tmio_mmc_finish_request(struct tmio_mmc_host *host)
{ {
struct mmc_request *mrq = host->mrq; struct mmc_request *mrq = host->mrq;
if (!mrq)
return;
host->mrq = NULL; host->mrq = NULL;
host->cmd = NULL; host->cmd = NULL;
host->data = NULL; host->data = NULL;
cancel_delayed_work(&host->delayed_reset_work);
mmc_request_done(host->mmc, mrq); mmc_request_done(host->mmc, mrq);
} }
...@@ -200,6 +515,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) ...@@ -200,6 +515,7 @@ static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
return; return;
} }
/* needs to be called with host->lock held */
static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
{ {
struct mmc_data *data = host->data; struct mmc_data *data = host->data;
...@@ -233,6 +549,8 @@ static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) ...@@ -233,6 +549,8 @@ static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
if (data->flags & MMC_DATA_READ) { if (data->flags & MMC_DATA_READ) {
if (!host->chan_rx) if (!host->chan_rx)
disable_mmc_irqs(host, TMIO_MASK_READOP); disable_mmc_irqs(host, TMIO_MASK_READOP);
else
tmio_check_bounce_buffer(host);
dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
host->mrq); host->mrq);
} else { } else {
...@@ -254,10 +572,12 @@ static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) ...@@ -254,10 +572,12 @@ static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
static void tmio_mmc_data_irq(struct tmio_mmc_host *host) static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
{ {
struct mmc_data *data = host->data; struct mmc_data *data;
spin_lock(&host->lock);
data = host->data;
if (!data) if (!data)
return; goto out;
if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) { if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
/* /*
...@@ -278,6 +598,8 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host) ...@@ -278,6 +598,8 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
} else { } else {
tmio_mmc_do_data_irq(host); tmio_mmc_do_data_irq(host);
} }
out:
spin_unlock(&host->lock);
} }
static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
...@@ -286,9 +608,11 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, ...@@ -286,9 +608,11 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
struct mmc_command *cmd = host->cmd; struct mmc_command *cmd = host->cmd;
int i, addr; int i, addr;
spin_lock(&host->lock);
if (!host->cmd) { if (!host->cmd) {
pr_debug("Spurious CMD irq\n"); pr_debug("Spurious CMD irq\n");
return; goto out;
} }
host->cmd = NULL; host->cmd = NULL;
...@@ -324,8 +648,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, ...@@ -324,8 +648,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
if (!host->chan_rx) if (!host->chan_rx)
enable_mmc_irqs(host, TMIO_MASK_READOP); enable_mmc_irqs(host, TMIO_MASK_READOP);
} else { } else {
struct dma_chan *chan = host->chan_tx; if (!host->chan_tx)
if (!chan)
enable_mmc_irqs(host, TMIO_MASK_WRITEOP); enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
else else
tasklet_schedule(&host->dma_issue); tasklet_schedule(&host->dma_issue);
...@@ -334,13 +657,19 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, ...@@ -334,13 +657,19 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
tmio_mmc_finish_request(host); tmio_mmc_finish_request(host);
} }
out:
spin_unlock(&host->lock);
return; return;
} }
static irqreturn_t tmio_mmc_irq(int irq, void *devid) static irqreturn_t tmio_mmc_irq(int irq, void *devid)
{ {
struct tmio_mmc_host *host = devid; struct tmio_mmc_host *host = devid;
struct mfd_cell *cell = host->pdev->dev.platform_data;
struct tmio_mmc_data *pdata = cell->driver_data;
unsigned int ireg, irq_mask, status; unsigned int ireg, irq_mask, status;
unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
pr_debug("MMC IRQ begin\n"); pr_debug("MMC IRQ begin\n");
...@@ -348,6 +677,29 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid) ...@@ -348,6 +677,29 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
ireg = status & TMIO_MASK_IRQ & ~irq_mask; ireg = status & TMIO_MASK_IRQ & ~irq_mask;
sdio_ireg = 0;
if (!ireg && pdata->flags & TMIO_MMC_SDIO_IRQ) {
sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
sdio_irq_mask = sd_ctrl_read16(host, CTL_SDIO_IRQ_MASK);
sdio_ireg = sdio_status & TMIO_SDIO_MASK_ALL & ~sdio_irq_mask;
sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status & ~TMIO_SDIO_MASK_ALL);
if (sdio_ireg && !host->sdio_irq_enabled) {
pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
sdio_status, sdio_irq_mask, sdio_ireg);
tmio_mmc_enable_sdio_irq(host->mmc, 0);
goto out;
}
if (host->mmc->caps & MMC_CAP_SDIO_IRQ &&
sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
mmc_signal_sdio_irq(host->mmc);
if (sdio_ireg)
goto out;
}
pr_debug_status(status); pr_debug_status(status);
pr_debug_status(ireg); pr_debug_status(ireg);
...@@ -375,8 +727,10 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid) ...@@ -375,8 +727,10 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
*/ */
/* Command completion */ /* Command completion */
if (ireg & TMIO_MASK_CMD) { if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
ack_mmc_irqs(host, TMIO_MASK_CMD); ack_mmc_irqs(host,
TMIO_STAT_CMDRESPEND |
TMIO_STAT_CMDTIMEOUT);
tmio_mmc_cmd_irq(host, status); tmio_mmc_cmd_irq(host, status);
} }
...@@ -407,6 +761,16 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid) ...@@ -407,6 +761,16 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
} }
#ifdef CONFIG_TMIO_MMC_DMA #ifdef CONFIG_TMIO_MMC_DMA
static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
{
if (host->sg_ptr == &host->bounce_sg) {
unsigned long flags;
void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
tmio_mmc_kunmap_atomic(sg_vaddr, &flags);
}
}
static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
{ {
#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) #if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
...@@ -427,12 +791,39 @@ static void tmio_dma_complete(void *arg) ...@@ -427,12 +791,39 @@ static void tmio_dma_complete(void *arg)
enable_mmc_irqs(host, TMIO_STAT_DATAEND); enable_mmc_irqs(host, TMIO_STAT_DATAEND);
} }
static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
{ {
struct scatterlist *sg = host->sg_ptr; struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL; struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_rx; struct dma_chan *chan = host->chan_rx;
int ret; struct mfd_cell *cell = host->pdev->dev.platform_data;
struct tmio_mmc_data *pdata = cell->driver_data;
dma_cookie_t cookie;
int ret, i;
bool aligned = true, multiple = true;
unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
for_each_sg(sg, sg_tmp, host->sg_len, i) {
if (sg_tmp->offset & align)
aligned = false;
if (sg_tmp->length & align) {
multiple = false;
break;
}
}
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
align >= MAX_ALIGN)) || !multiple) {
ret = -EINVAL;
goto pio;
}
/* The only sg element can be unaligned, use our bounce buffer then */
if (!aligned) {
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
host->sg_ptr = &host->bounce_sg;
sg = host->sg_ptr;
}
ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE); ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE);
if (ret > 0) { if (ret > 0) {
...@@ -442,21 +833,21 @@ static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) ...@@ -442,21 +833,21 @@ static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
} }
if (desc) { if (desc) {
host->desc = desc;
desc->callback = tmio_dma_complete; desc->callback = tmio_dma_complete;
desc->callback_param = host; desc->callback_param = host;
host->cookie = desc->tx_submit(desc); cookie = desc->tx_submit(desc);
if (host->cookie < 0) { if (cookie < 0) {
host->desc = NULL; desc = NULL;
ret = host->cookie; ret = cookie;
} else { } else {
chan->device->device_issue_pending(chan); chan->device->device_issue_pending(chan);
} }
} }
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
__func__, host->sg_len, ret, host->cookie, host->mrq); __func__, host->sg_len, ret, cookie, host->mrq);
if (!host->desc) { pio:
if (!desc) {
/* DMA failed, fall back to PIO */ /* DMA failed, fall back to PIO */
if (ret >= 0) if (ret >= 0)
ret = -EIO; ret = -EIO;
...@@ -471,24 +862,49 @@ static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) ...@@ -471,24 +862,49 @@ static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
dev_warn(&host->pdev->dev, dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret); "DMA failed: %d, falling back to PIO\n", ret);
tmio_mmc_enable_dma(host, false); tmio_mmc_enable_dma(host, false);
reset(host);
/* Fail this request, let above layers recover */
host->mrq->cmd->error = ret;
tmio_mmc_finish_request(host);
} }
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
desc, host->cookie, host->sg_len); desc, cookie, host->sg_len);
return ret > 0 ? 0 : ret;
} }
static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
{ {
struct scatterlist *sg = host->sg_ptr; struct scatterlist *sg = host->sg_ptr, *sg_tmp;
struct dma_async_tx_descriptor *desc = NULL; struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_tx; struct dma_chan *chan = host->chan_tx;
int ret; struct mfd_cell *cell = host->pdev->dev.platform_data;
struct tmio_mmc_data *pdata = cell->driver_data;
dma_cookie_t cookie;
int ret, i;
bool aligned = true, multiple = true;
unsigned int align = (1 << pdata->dma->alignment_shift) - 1;
for_each_sg(sg, sg_tmp, host->sg_len, i) {
if (sg_tmp->offset & align)
aligned = false;
if (sg_tmp->length & align) {
multiple = false;
break;
}
}
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
align >= MAX_ALIGN)) || !multiple) {
ret = -EINVAL;
goto pio;
}
/* The only sg element can be unaligned, use our bounce buffer then */
if (!aligned) {
unsigned long flags;
void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags);
sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length);
memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length);
tmio_mmc_kunmap_atomic(sg_vaddr, &flags);
host->sg_ptr = &host->bounce_sg;
sg = host->sg_ptr;
}
ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE); ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE);
if (ret > 0) { if (ret > 0) {
...@@ -498,19 +914,19 @@ static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) ...@@ -498,19 +914,19 @@ static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
} }
if (desc) { if (desc) {
host->desc = desc;
desc->callback = tmio_dma_complete; desc->callback = tmio_dma_complete;
desc->callback_param = host; desc->callback_param = host;
host->cookie = desc->tx_submit(desc); cookie = desc->tx_submit(desc);
if (host->cookie < 0) { if (cookie < 0) {
host->desc = NULL; desc = NULL;
ret = host->cookie; ret = cookie;
} }
} }
dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
__func__, host->sg_len, ret, host->cookie, host->mrq); __func__, host->sg_len, ret, cookie, host->mrq);
if (!host->desc) { pio:
if (!desc) {
/* DMA failed, fall back to PIO */ /* DMA failed, fall back to PIO */
if (ret >= 0) if (ret >= 0)
ret = -EIO; ret = -EIO;
...@@ -525,30 +941,22 @@ static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) ...@@ -525,30 +941,22 @@ static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
dev_warn(&host->pdev->dev, dev_warn(&host->pdev->dev,
"DMA failed: %d, falling back to PIO\n", ret); "DMA failed: %d, falling back to PIO\n", ret);
tmio_mmc_enable_dma(host, false); tmio_mmc_enable_dma(host, false);
reset(host);
/* Fail this request, let above layers recover */
host->mrq->cmd->error = ret;
tmio_mmc_finish_request(host);
} }
dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
desc, host->cookie); desc, cookie);
return ret > 0 ? 0 : ret;
} }
static int tmio_mmc_start_dma(struct tmio_mmc_host *host, static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
struct mmc_data *data) struct mmc_data *data)
{ {
if (data->flags & MMC_DATA_READ) { if (data->flags & MMC_DATA_READ) {
if (host->chan_rx) if (host->chan_rx)
return tmio_mmc_start_dma_rx(host); tmio_mmc_start_dma_rx(host);
} else { } else {
if (host->chan_tx) if (host->chan_tx)
return tmio_mmc_start_dma_tx(host); tmio_mmc_start_dma_tx(host);
} }
return 0;
} }
static void tmio_issue_tasklet_fn(unsigned long priv) static void tmio_issue_tasklet_fn(unsigned long priv)
...@@ -562,6 +970,12 @@ static void tmio_issue_tasklet_fn(unsigned long priv) ...@@ -562,6 +970,12 @@ static void tmio_issue_tasklet_fn(unsigned long priv)
static void tmio_tasklet_fn(unsigned long arg) static void tmio_tasklet_fn(unsigned long arg)
{ {
struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
if (!host->data)
goto out;
if (host->data->flags & MMC_DATA_READ) if (host->data->flags & MMC_DATA_READ)
dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen, dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
...@@ -571,6 +985,8 @@ static void tmio_tasklet_fn(unsigned long arg) ...@@ -571,6 +985,8 @@ static void tmio_tasklet_fn(unsigned long arg)
DMA_TO_DEVICE); DMA_TO_DEVICE);
tmio_mmc_do_data_irq(host); tmio_mmc_do_data_irq(host);
out:
spin_unlock_irqrestore(&host->lock, flags);
} }
/* It might be necessary to make filter MFD specific */ /* It might be necessary to make filter MFD specific */
...@@ -584,9 +1000,6 @@ static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) ...@@ -584,9 +1000,6 @@ static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
static void tmio_mmc_request_dma(struct tmio_mmc_host *host, static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata) struct tmio_mmc_data *pdata)
{ {
host->cookie = -EINVAL;
host->desc = NULL;
/* We can only either use DMA for both Tx and Rx or not use it at all */ /* We can only either use DMA for both Tx and Rx or not use it at all */
if (pdata->dma) { if (pdata->dma) {
dma_cap_mask_t mask; dma_cap_mask_t mask;
...@@ -632,15 +1045,15 @@ static void tmio_mmc_release_dma(struct tmio_mmc_host *host) ...@@ -632,15 +1045,15 @@ static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
host->chan_rx = NULL; host->chan_rx = NULL;
dma_release_channel(chan); dma_release_channel(chan);
} }
host->cookie = -EINVAL;
host->desc = NULL;
} }
#else #else
static int tmio_mmc_start_dma(struct tmio_mmc_host *host, static void tmio_check_bounce_buffer(struct tmio_mmc_host *host)
{
}
static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
struct mmc_data *data) struct mmc_data *data)
{ {
return 0;
} }
static void tmio_mmc_request_dma(struct tmio_mmc_host *host, static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
...@@ -682,7 +1095,9 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host, ...@@ -682,7 +1095,9 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
return tmio_mmc_start_dma(host, data); tmio_mmc_start_dma(host, data);
return 0;
} }
/* Process requests from the MMC layer */ /* Process requests from the MMC layer */
...@@ -694,6 +1109,8 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) ...@@ -694,6 +1109,8 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
if (host->mrq) if (host->mrq)
pr_debug("request not null\n"); pr_debug("request not null\n");
host->last_req_ts = jiffies;
wmb();
host->mrq = mrq; host->mrq = mrq;
if (mrq->data) { if (mrq->data) {
...@@ -703,10 +1120,14 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) ...@@ -703,10 +1120,14 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
} }
ret = tmio_mmc_start_command(host, mrq->cmd); ret = tmio_mmc_start_command(host, mrq->cmd);
if (!ret) if (!ret) {
schedule_delayed_work(&host->delayed_reset_work,
msecs_to_jiffies(2000));
return; return;
}
fail: fail:
host->mrq = NULL;
mrq->cmd->error = ret; mrq->cmd->error = ret;
mmc_request_done(mmc, mrq); mmc_request_done(mmc, mrq);
} }
...@@ -780,6 +1201,7 @@ static const struct mmc_host_ops tmio_mmc_ops = { ...@@ -780,6 +1201,7 @@ static const struct mmc_host_ops tmio_mmc_ops = {
.set_ios = tmio_mmc_set_ios, .set_ios = tmio_mmc_set_ios,
.get_ro = tmio_mmc_get_ro, .get_ro = tmio_mmc_get_ro,
.get_cd = tmio_mmc_get_cd, .get_cd = tmio_mmc_get_cd,
.enable_sdio_irq = tmio_mmc_enable_sdio_irq,
}; };
#ifdef CONFIG_PM #ifdef CONFIG_PM
...@@ -864,10 +1286,15 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev) ...@@ -864,10 +1286,15 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
goto host_free; goto host_free;
mmc->ops = &tmio_mmc_ops; mmc->ops = &tmio_mmc_ops;
mmc->caps = MMC_CAP_4_BIT_DATA; mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities;
mmc->caps |= pdata->capabilities;
mmc->f_max = pdata->hclk; mmc->f_max = pdata->hclk;
mmc->f_min = mmc->f_max / 512; mmc->f_min = mmc->f_max / 512;
mmc->max_segs = 32;
mmc->max_blk_size = 512;
mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
mmc->max_segs;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
if (pdata->ocr_mask) if (pdata->ocr_mask)
mmc->ocr_avail = pdata->ocr_mask; mmc->ocr_avail = pdata->ocr_mask;
else else
...@@ -890,12 +1317,19 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev) ...@@ -890,12 +1317,19 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
goto cell_disable; goto cell_disable;
disable_mmc_irqs(host, TMIO_MASK_ALL); disable_mmc_irqs(host, TMIO_MASK_ALL);
if (pdata->flags & TMIO_MMC_SDIO_IRQ)
tmio_mmc_enable_sdio_irq(mmc, 0);
ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED | ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host); IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host);
if (ret) if (ret)
goto cell_disable; goto cell_disable;
spin_lock_init(&host->lock);
/* Init delayed work for request timeouts */
INIT_DELAYED_WORK(&host->delayed_reset_work, tmio_mmc_reset_work);
/* See if we also get DMA */ /* See if we also get DMA */
tmio_mmc_request_dma(host, pdata); tmio_mmc_request_dma(host, pdata);
...@@ -934,6 +1368,7 @@ static int __devexit tmio_mmc_remove(struct platform_device *dev) ...@@ -934,6 +1368,7 @@ static int __devexit tmio_mmc_remove(struct platform_device *dev)
if (mmc) { if (mmc) {
struct tmio_mmc_host *host = mmc_priv(mmc); struct tmio_mmc_host *host = mmc_priv(mmc);
mmc_remove_host(mmc); mmc_remove_host(mmc);
cancel_delayed_work_sync(&host->delayed_reset_work);
tmio_mmc_release_dma(host); tmio_mmc_release_dma(host);
free_irq(host->irq, host); free_irq(host->irq, host);
if (cell->disable) if (cell->disable)
......
/* Definitons for use with the tmio_mmc.c
*
* (c) 2004 Ian Molton <spyro@f2s.com>
* (c) 2007 Ian Molton <spyro@f2s.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#define CTL_SD_CMD 0x00
#define CTL_ARG_REG 0x04
#define CTL_STOP_INTERNAL_ACTION 0x08
#define CTL_XFER_BLK_COUNT 0xa
#define CTL_RESPONSE 0x0c
#define CTL_STATUS 0x1c
#define CTL_IRQ_MASK 0x20
#define CTL_SD_CARD_CLK_CTL 0x24
#define CTL_SD_XFER_LEN 0x26
#define CTL_SD_MEM_CARD_OPT 0x28
#define CTL_SD_ERROR_DETAIL_STATUS 0x2c
#define CTL_SD_DATA_PORT 0x30
#define CTL_TRANSACTION_CTL 0x34
#define CTL_RESET_SD 0xe0
#define CTL_SDIO_REGS 0x100
#define CTL_CLK_AND_WAIT_CTL 0x138
#define CTL_RESET_SDIO 0x1e0
/* Definitions for values the CTRL_STATUS register can take. */
#define TMIO_STAT_CMDRESPEND 0x00000001
#define TMIO_STAT_DATAEND 0x00000004
#define TMIO_STAT_CARD_REMOVE 0x00000008
#define TMIO_STAT_CARD_INSERT 0x00000010
#define TMIO_STAT_SIGSTATE 0x00000020
#define TMIO_STAT_WRPROTECT 0x00000080
#define TMIO_STAT_CARD_REMOVE_A 0x00000100
#define TMIO_STAT_CARD_INSERT_A 0x00000200
#define TMIO_STAT_SIGSTATE_A 0x00000400
#define TMIO_STAT_CMD_IDX_ERR 0x00010000
#define TMIO_STAT_CRCFAIL 0x00020000
#define TMIO_STAT_STOPBIT_ERR 0x00040000
#define TMIO_STAT_DATATIMEOUT 0x00080000
#define TMIO_STAT_RXOVERFLOW 0x00100000
#define TMIO_STAT_TXUNDERRUN 0x00200000
#define TMIO_STAT_CMDTIMEOUT 0x00400000
#define TMIO_STAT_RXRDY 0x01000000
#define TMIO_STAT_TXRQ 0x02000000
#define TMIO_STAT_ILL_FUNC 0x20000000
#define TMIO_STAT_CMD_BUSY 0x40000000
#define TMIO_STAT_ILL_ACCESS 0x80000000
/* Define some IRQ masks */
/* This is the mask used at reset by the chip */
#define TMIO_MASK_ALL 0x837f031d
#define TMIO_MASK_READOP (TMIO_STAT_RXRDY | TMIO_STAT_DATAEND)
#define TMIO_MASK_WRITEOP (TMIO_STAT_TXRQ | TMIO_STAT_DATAEND)
#define TMIO_MASK_CMD (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT | \
TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
#define enable_mmc_irqs(host, i) \
do { \
u32 mask;\
mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
mask &= ~((i) & TMIO_MASK_IRQ); \
sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
} while (0)
#define disable_mmc_irqs(host, i) \
do { \
u32 mask;\
mask = sd_ctrl_read32((host), CTL_IRQ_MASK); \
mask |= ((i) & TMIO_MASK_IRQ); \
sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \
} while (0)
#define ack_mmc_irqs(host, i) \
do { \
sd_ctrl_write32((host), CTL_STATUS, ~(i)); \
} while (0)
struct tmio_mmc_host {
void __iomem *ctl;
unsigned long bus_shift;
struct mmc_command *cmd;
struct mmc_request *mrq;
struct mmc_data *data;
struct mmc_host *mmc;
int irq;
/* Callbacks for clock / power control */
void (*set_pwr)(struct platform_device *host, int state);
void (*set_clk_div)(struct platform_device *host, int state);
/* pio related stuff */
struct scatterlist *sg_ptr;
unsigned int sg_len;
unsigned int sg_off;
struct platform_device *pdev;
/* DMA support */
struct dma_chan *chan_rx;
struct dma_chan *chan_tx;
struct tasklet_struct dma_complete;
struct tasklet_struct dma_issue;
#ifdef CONFIG_TMIO_MMC_DMA
struct dma_async_tx_descriptor *desc;
unsigned int dma_sglen;
dma_cookie_t cookie;
#endif
};
#include <linux/io.h>
static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
{
return readw(host->ctl + (addr << host->bus_shift));
}
static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
readsw(host->ctl + (addr << host->bus_shift), buf, count);
}
static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
{
return readw(host->ctl + (addr << host->bus_shift)) |
readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
}
static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr,
u16 val)
{
writew(val, host->ctl + (addr << host->bus_shift));
}
static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
u16 *buf, int count)
{
writesw(host->ctl + (addr << host->bus_shift), buf, count);
}
static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr,
u32 val)
{
writew(val, host->ctl + (addr << host->bus_shift));
writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
#include <linux/scatterlist.h>
#include <linux/blkdev.h>
static inline void tmio_mmc_init_sg(struct tmio_mmc_host *host,
struct mmc_data *data)
{
host->sg_len = data->sg_len;
host->sg_ptr = data->sg;
host->sg_off = 0;
}
static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host)
{
host->sg_ptr = sg_next(host->sg_ptr);
host->sg_off = 0;
return --host->sg_len;
}
static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
unsigned long *flags)
{
local_irq_save(*flags);
return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
}
static inline void tmio_mmc_kunmap_atomic(void *virt,
unsigned long *flags)
{
kunmap_atomic(virt, KM_BIO_SRC_IRQ);
local_irq_restore(*flags);
}
#ifdef CONFIG_MMC_DEBUG
#define STATUS_TO_TEXT(a) \
do { \
if (status & TMIO_STAT_##a) \
printk(#a); \
} while (0)
void pr_debug_status(u32 status)
{
printk(KERN_DEBUG "status: %08x = ", status);
STATUS_TO_TEXT(CARD_REMOVE);
STATUS_TO_TEXT(CARD_INSERT);
STATUS_TO_TEXT(SIGSTATE);
STATUS_TO_TEXT(WRPROTECT);
STATUS_TO_TEXT(CARD_REMOVE_A);
STATUS_TO_TEXT(CARD_INSERT_A);
STATUS_TO_TEXT(SIGSTATE_A);
STATUS_TO_TEXT(CMD_IDX_ERR);
STATUS_TO_TEXT(STOPBIT_ERR);
STATUS_TO_TEXT(ILL_FUNC);
STATUS_TO_TEXT(CMD_BUSY);
STATUS_TO_TEXT(CMDRESPEND);
STATUS_TO_TEXT(DATAEND);
STATUS_TO_TEXT(CRCFAIL);
STATUS_TO_TEXT(DATATIMEOUT);
STATUS_TO_TEXT(CMDTIMEOUT);
STATUS_TO_TEXT(RXOVERFLOW);
STATUS_TO_TEXT(TXUNDERRUN);
STATUS_TO_TEXT(RXRDY);
STATUS_TO_TEXT(TXRQ);
STATUS_TO_TEXT(ILL_ACCESS);
printk("\n");
}
#else
#define pr_debug_status(s) do { } while (0)
#endif
...@@ -57,6 +57,10 @@ ...@@ -57,6 +57,10 @@
* is configured in 4-bit mode. * is configured in 4-bit mode.
*/ */
#define TMIO_MMC_BLKSZ_2BYTES (1 << 1) #define TMIO_MMC_BLKSZ_2BYTES (1 << 1)
/*
* Some controllers can support SDIO IRQ signalling.
*/
#define TMIO_MMC_SDIO_IRQ (1 << 2)
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base); int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
...@@ -66,6 +70,7 @@ void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state); ...@@ -66,6 +70,7 @@ void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state);
struct tmio_mmc_dma { struct tmio_mmc_dma {
void *chan_priv_tx; void *chan_priv_tx;
void *chan_priv_rx; void *chan_priv_rx;
int alignment_shift;
}; };
/* /*
......
/*
* Synopsys DesignWare Multimedia Card Interface driver
* (Based on NXP driver for lpc 31xx)
*
* Copyright (C) 2009 NXP Semiconductors
* Copyright (C) 2009, 2010 Imagination Technologies Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef _LINUX_MMC_DW_MMC_H_
#define _LINUX_MMC_DW_MMC_H_
#define MAX_MCI_SLOTS 2
enum dw_mci_state {
STATE_IDLE = 0,
STATE_SENDING_CMD,
STATE_SENDING_DATA,
STATE_DATA_BUSY,
STATE_SENDING_STOP,
STATE_DATA_ERROR,
};
enum {
EVENT_CMD_COMPLETE = 0,
EVENT_XFER_COMPLETE,
EVENT_DATA_COMPLETE,
EVENT_DATA_ERROR,
EVENT_XFER_ERROR
};
struct mmc_data;
/**
* struct dw_mci - MMC controller state shared between all slots
* @lock: Spinlock protecting the queue and associated data.
* @regs: Pointer to MMIO registers.
* @sg: Scatterlist entry currently being processed by PIO code, if any.
* @pio_offset: Offset into the current scatterlist entry.
* @cur_slot: The slot which is currently using the controller.
* @mrq: The request currently being processed on @cur_slot,
* or NULL if the controller is idle.
* @cmd: The command currently being sent to the card, or NULL.
* @data: The data currently being transferred, or NULL if no data
* transfer is in progress.
* @use_dma: Whether DMA channel is initialized or not.
* @sg_dma: Bus address of DMA buffer.
* @sg_cpu: Virtual address of DMA buffer.
* @dma_ops: Pointer to platform-specific DMA callbacks.
* @cmd_status: Snapshot of SR taken upon completion of the current
* command. Only valid when EVENT_CMD_COMPLETE is pending.
* @data_status: Snapshot of SR taken upon completion of the current
* data transfer. Only valid when EVENT_DATA_COMPLETE or
* EVENT_DATA_ERROR is pending.
* @stop_cmdr: Value to be loaded into CMDR when the stop command is
* to be sent.
* @dir_status: Direction of current transfer.
* @tasklet: Tasklet running the request state machine.
* @card_tasklet: Tasklet handling card detect.
* @pending_events: Bitmask of events flagged by the interrupt handler
* to be processed by the tasklet.
* @completed_events: Bitmask of events which the state machine has
* processed.
* @state: Tasklet state.
* @queue: List of slots waiting for access to the controller.
* @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
* rate and timeout calculations.
* @current_speed: Configured rate of the controller.
* @num_slots: Number of slots available.
* @pdev: Platform device associated with the MMC controller.
* @pdata: Platform data associated with the MMC controller.
* @slot: Slots sharing this MMC controller.
* @data_shift: log2 of FIFO item size.
* @push_data: Pointer to FIFO push function.
* @pull_data: Pointer to FIFO pull function.
* @quirks: Set of quirks that apply to specific versions of the IP.
*
* Locking
* =======
*
* @lock is a softirq-safe spinlock protecting @queue as well as
* @cur_slot, @mrq and @state. These must always be updated
* at the same time while holding @lock.
*
* The @mrq field of struct dw_mci_slot is also protected by @lock,
* and must always be written at the same time as the slot is added to
* @queue.
*
* @pending_events and @completed_events are accessed using atomic bit
* operations, so they don't need any locking.
*
* None of the fields touched by the interrupt handler need any
* locking. However, ordering is important: Before EVENT_DATA_ERROR or
* EVENT_DATA_COMPLETE is set in @pending_events, all data-related
* interrupts must be disabled and @data_status updated with a
* snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the
* CMDRDY interupt must be disabled and @cmd_status updated with a
* snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the
* bytes_xfered field of @data must be written. This is ensured by
* using barriers.
*/
struct dw_mci {
spinlock_t lock;
void __iomem *regs;
struct scatterlist *sg;
unsigned int pio_offset;
struct dw_mci_slot *cur_slot;
struct mmc_request *mrq;
struct mmc_command *cmd;
struct mmc_data *data;
/* DMA interface members*/
int use_dma;
dma_addr_t sg_dma;
void *sg_cpu;
struct dw_mci_dma_ops *dma_ops;
#ifdef CONFIG_MMC_DW_IDMAC
unsigned int ring_size;
#else
struct dw_mci_dma_data *dma_data;
#endif
u32 cmd_status;
u32 data_status;
u32 stop_cmdr;
u32 dir_status;
struct tasklet_struct tasklet;
struct tasklet_struct card_tasklet;
unsigned long pending_events;
unsigned long completed_events;
enum dw_mci_state state;
struct list_head queue;
u32 bus_hz;
u32 current_speed;
u32 num_slots;
struct platform_device *pdev;
struct dw_mci_board *pdata;
struct dw_mci_slot *slot[MAX_MCI_SLOTS];
/* FIFO push and pull */
int data_shift;
void (*push_data)(struct dw_mci *host, void *buf, int cnt);
void (*pull_data)(struct dw_mci *host, void *buf, int cnt);
/* Workaround flags */
u32 quirks;
};
/* DMA ops for Internal/External DMAC interface */
struct dw_mci_dma_ops {
/* DMA Ops */
int (*init)(struct dw_mci *host);
void (*start)(struct dw_mci *host, unsigned int sg_len);
void (*complete)(struct dw_mci *host);
void (*stop)(struct dw_mci *host);
void (*cleanup)(struct dw_mci *host);
void (*exit)(struct dw_mci *host);
};
/* IP Quirks/flags. */
/* No special quirks or flags to cater for */
#define DW_MCI_QUIRK_NONE 0
/* DTO fix for command transmission with IDMAC configured */
#define DW_MCI_QUIRK_IDMAC_DTO 1
/* delay needed between retries on some 2.11a implementations */
#define DW_MCI_QUIRK_RETRY_DELAY 2
/* High Speed Capable - Supports HS cards (upto 50MHz) */
#define DW_MCI_QUIRK_HIGHSPEED 4
struct dma_pdata;
struct block_settings {
unsigned short max_segs; /* see blk_queue_max_segments */
unsigned int max_blk_size; /* maximum size of one mmc block */
unsigned int max_blk_count; /* maximum number of blocks in one req*/
unsigned int max_req_size; /* maximum number of bytes in one req*/
unsigned int max_seg_size; /* see blk_queue_max_segment_size */
};
/* Board platform data */
struct dw_mci_board {
u32 num_slots;
u32 quirks; /* Workaround / Quirk flags */
unsigned int bus_hz; /* Bus speed */
/* delay in mS before detecting cards after interrupt */
u32 detect_delay_ms;
int (*init)(u32 slot_id, irq_handler_t , void *);
int (*get_ro)(u32 slot_id);
int (*get_cd)(u32 slot_id);
int (*get_ocr)(u32 slot_id);
int (*get_bus_wd)(u32 slot_id);
/*
* Enable power to selected slot and set voltage to desired level.
* Voltage levels are specified using MMC_VDD_xxx defines defined
* in linux/mmc/host.h file.
*/
void (*setpower)(u32 slot_id, u32 volt);
void (*exit)(u32 slot_id);
void (*select_slot)(u32 slot_id);
struct dw_mci_dma_ops *dma_ops;
struct dma_pdata *data;
struct block_settings *blk_settings;
};
#endif /* _LINUX_MMC_DW_MMC_H_ */
...@@ -131,6 +131,9 @@ struct mmc_host { ...@@ -131,6 +131,9 @@ struct mmc_host {
unsigned int f_max; unsigned int f_max;
unsigned int f_init; unsigned int f_init;
u32 ocr_avail; u32 ocr_avail;
u32 ocr_avail_sdio; /* SDIO-specific OCR */
u32 ocr_avail_sd; /* SD-specific OCR */
u32 ocr_avail_mmc; /* MMC-specific OCR */
struct notifier_block pm_notify; struct notifier_block pm_notify;
#define MMC_VDD_165_195 0x00000080 /* VDD voltage 1.65 - 1.95 */ #define MMC_VDD_165_195 0x00000080 /* VDD voltage 1.65 - 1.95 */
...@@ -169,9 +172,20 @@ struct mmc_host { ...@@ -169,9 +172,20 @@ struct mmc_host {
#define MMC_CAP_1_2V_DDR (1 << 12) /* can support */ #define MMC_CAP_1_2V_DDR (1 << 12) /* can support */
/* DDR mode at 1.2V */ /* DDR mode at 1.2V */
#define MMC_CAP_POWER_OFF_CARD (1 << 13) /* Can power off after boot */ #define MMC_CAP_POWER_OFF_CARD (1 << 13) /* Can power off after boot */
#define MMC_CAP_BUS_WIDTH_TEST (1 << 14) /* CMD14/CMD19 bus width ok */
mmc_pm_flag_t pm_caps; /* supported pm features */ mmc_pm_flag_t pm_caps; /* supported pm features */
#ifdef CONFIG_MMC_CLKGATE
int clk_requests; /* internal reference counter */
unsigned int clk_delay; /* number of MCI clk hold cycles */
bool clk_gated; /* clock gated */
struct work_struct clk_gate_work; /* delayed clock gate */
unsigned int clk_old; /* old clock value cache */
spinlock_t clk_lock; /* lock for clk fields */
struct mutex clk_gate_mutex; /* mutex for clock gating */
#endif
/* host specific block data */ /* host specific block data */
unsigned int max_seg_size; /* see blk_queue_max_segment_size */ unsigned int max_seg_size; /* see blk_queue_max_segment_size */
unsigned short max_segs; /* see blk_queue_max_segments */ unsigned short max_segs; /* see blk_queue_max_segments */
...@@ -307,5 +321,10 @@ static inline int mmc_card_is_removable(struct mmc_host *host) ...@@ -307,5 +321,10 @@ static inline int mmc_card_is_removable(struct mmc_host *host)
return !(host->caps & MMC_CAP_NONREMOVABLE) && mmc_assume_removable; return !(host->caps & MMC_CAP_NONREMOVABLE) && mmc_assume_removable;
} }
static inline int mmc_card_is_powered_resumed(struct mmc_host *host)
{
return host->pm_flags & MMC_PM_KEEP_POWER;
}
#endif #endif
...@@ -40,7 +40,9 @@ ...@@ -40,7 +40,9 @@
#define MMC_READ_DAT_UNTIL_STOP 11 /* adtc [31:0] dadr R1 */ #define MMC_READ_DAT_UNTIL_STOP 11 /* adtc [31:0] dadr R1 */
#define MMC_STOP_TRANSMISSION 12 /* ac R1b */ #define MMC_STOP_TRANSMISSION 12 /* ac R1b */
#define MMC_SEND_STATUS 13 /* ac [31:16] RCA R1 */ #define MMC_SEND_STATUS 13 /* ac [31:16] RCA R1 */
#define MMC_BUS_TEST_R 14 /* adtc R1 */
#define MMC_GO_INACTIVE_STATE 15 /* ac [31:16] RCA */ #define MMC_GO_INACTIVE_STATE 15 /* ac [31:16] RCA */
#define MMC_BUS_TEST_W 19 /* adtc R1 */
#define MMC_SPI_READ_OCR 58 /* spi spi_R3 */ #define MMC_SPI_READ_OCR 58 /* spi spi_R3 */
#define MMC_SPI_CRC_ON_OFF 59 /* spi [0:0] flag spi_R1 */ #define MMC_SPI_CRC_ON_OFF 59 /* spi [0:0] flag spi_R1 */
......
...@@ -83,6 +83,8 @@ struct sdhci_host { ...@@ -83,6 +83,8 @@ struct sdhci_host {
#define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28) #define SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 (1<<28)
/* Controller doesn't have HISPD bit field in HI-SPEED SD card */ /* Controller doesn't have HISPD bit field in HI-SPEED SD card */
#define SDHCI_QUIRK_NO_HISPD_BIT (1<<29) #define SDHCI_QUIRK_NO_HISPD_BIT (1<<29)
/* Controller treats ADMA descriptors with length 0000h incorrectly */
#define SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC (1<<30)
int irq; /* Device IRQ */ int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */ void __iomem *ioaddr; /* Mapped address */
...@@ -139,6 +141,10 @@ struct sdhci_host { ...@@ -139,6 +141,10 @@ struct sdhci_host {
unsigned int caps; /* Alternative capabilities */ unsigned int caps; /* Alternative capabilities */
unsigned int ocr_avail_sdio; /* OCR bit masks */
unsigned int ocr_avail_sd;
unsigned int ocr_avail_mmc;
unsigned long private[0] ____cacheline_aligned; unsigned long private[0] ____cacheline_aligned;
}; };
#endif /* __SDHCI_H */ #endif /* __SDHCI_H */
...@@ -1650,6 +1650,11 @@ ...@@ -1650,6 +1650,11 @@
#define PCI_DEVICE_ID_O2_6836 0x6836 #define PCI_DEVICE_ID_O2_6836 0x6836
#define PCI_DEVICE_ID_O2_6812 0x6872 #define PCI_DEVICE_ID_O2_6812 0x6872
#define PCI_DEVICE_ID_O2_6933 0x6933 #define PCI_DEVICE_ID_O2_6933 0x6933
#define PCI_DEVICE_ID_O2_8120 0x8120
#define PCI_DEVICE_ID_O2_8220 0x8220
#define PCI_DEVICE_ID_O2_8221 0x8221
#define PCI_DEVICE_ID_O2_8320 0x8320
#define PCI_DEVICE_ID_O2_8321 0x8321
#define PCI_VENDOR_ID_3DFX 0x121a #define PCI_VENDOR_ID_3DFX 0x121a
#define PCI_DEVICE_ID_3DFX_VOODOO 0x0001 #define PCI_DEVICE_ID_3DFX_VOODOO 0x0001
...@@ -2363,6 +2368,8 @@ ...@@ -2363,6 +2368,8 @@
#define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381 #define PCI_DEVICE_ID_JMICRON_JMB38X_SD 0x2381
#define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382 #define PCI_DEVICE_ID_JMICRON_JMB38X_MMC 0x2382
#define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383 #define PCI_DEVICE_ID_JMICRON_JMB38X_MS 0x2383
#define PCI_DEVICE_ID_JMICRON_JMB388_SD 0x2391
#define PCI_DEVICE_ID_JMICRON_JMB388_ESD 0x2392
#define PCI_VENDOR_ID_KORENIX 0x1982 #define PCI_VENDOR_ID_KORENIX 0x1982
#define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600 #define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment