Commit e93252fa authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev:
  [PATCH] libata: Remove dependence on host_set->dev for SAS
  [PATCH] libata: ata_scsi_ioctl cleanup
  [PATCH] libata: ata_scsi_queuecmd cleanup
  [libata] export ata_dev_pair; trim trailing whitespace
  [PATCH] libata: add ata_dev_pair helper
  [PATCH] Make libata not powerdown drivers on PM_EVENT_FREEZE.
  [PATCH] libata: make ata_set_mode() responsible for failure handling
  [PATCH] libata: use ata_dev_disable() in ata_bus_probe()
  [PATCH] libata: implement ata_dev_disable()
  [PATCH] libata: check if port is disabled after internal command
  [PATCH] libata: make per-dev transfer mode limits per-dev
  [PATCH] libata: add per-dev pio/mwdma/udma_mask
  [PATCH] libata: implement ata_unpack_xfermask()
  [libata] Move some bmdma-specific code to libata-bmdma.c
  [libata sata_uli] kill scr_addr abuse
  [libata sata_nv] eliminate duplicate codepaths with iomap
  [libata sata_nv] cleanups: convert #defines to enums; remove in-file history
  [libata sata_sil24] cleanups: use pci_iomap(), kzalloc()
parents f125b561 2f1f610b
...@@ -418,6 +418,240 @@ u8 ata_altstatus(struct ata_port *ap) ...@@ -418,6 +418,240 @@ u8 ata_altstatus(struct ata_port *ap)
return inb(ap->ioaddr.altstatus_addr); return inb(ap->ioaddr.altstatus_addr);
} }
/**
* ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
u8 dmactl;
void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
/* load PRD table addr. */
mb(); /* make sure PRD table writes are visible to controller */
writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
/* specify data direction, triple-check start bit is clear */
dmactl = readb(mmio + ATA_DMA_CMD);
dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
if (!rw)
dmactl |= ATA_DMA_WR;
writeb(dmactl, mmio + ATA_DMA_CMD);
/* issue r/w command */
ap->ops->exec_command(ap, &qc->tf);
}
/**
* ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
u8 dmactl;
/* start host DMA transaction */
dmactl = readb(mmio + ATA_DMA_CMD);
writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
/* Strictly, one may wish to issue a readb() here, to
* flush the mmio write. However, control also passes
* to the hardware at this point, and it will interrupt
* us when we are to resume control. So, in effect,
* we don't care when the mmio write flushes.
* Further, a read of the DMA status register _immediately_
* following the write may not be what certain flaky hardware
* is expected, so I think it is best to not add a readb()
* without first all the MMIO ATA cards/mobos.
* Or maybe I'm just being paranoid.
*/
}
/**
* ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
u8 dmactl;
/* load PRD table addr. */
outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
/* specify data direction, triple-check start bit is clear */
dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
if (!rw)
dmactl |= ATA_DMA_WR;
outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
/* issue r/w command */
ap->ops->exec_command(ap, &qc->tf);
}
/**
* ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
u8 dmactl;
/* start host DMA transaction */
dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
outb(dmactl | ATA_DMA_START,
ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
}
/**
* ata_bmdma_start - Start a PCI IDE BMDMA transaction
* @qc: Info associated with this ATA transaction.
*
* Writes the ATA_DMA_START flag to the DMA command register.
*
* May be used as the bmdma_start() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
void ata_bmdma_start(struct ata_queued_cmd *qc)
{
if (qc->ap->flags & ATA_FLAG_MMIO)
ata_bmdma_start_mmio(qc);
else
ata_bmdma_start_pio(qc);
}
/**
* ata_bmdma_setup - Set up PCI IDE BMDMA transaction
* @qc: Info associated with this ATA transaction.
*
* Writes address of PRD table to device's PRD Table Address
* register, sets the DMA control register, and calls
* ops->exec_command() to start the transfer.
*
* May be used as the bmdma_setup() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
void ata_bmdma_setup(struct ata_queued_cmd *qc)
{
if (qc->ap->flags & ATA_FLAG_MMIO)
ata_bmdma_setup_mmio(qc);
else
ata_bmdma_setup_pio(qc);
}
/**
* ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
* @ap: Port associated with this ATA transaction.
*
* Clear interrupt and error flags in DMA status register.
*
* May be used as the irq_clear() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
void ata_bmdma_irq_clear(struct ata_port *ap)
{
if (!ap->ioaddr.bmdma_addr)
return;
if (ap->flags & ATA_FLAG_MMIO) {
void __iomem *mmio =
((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
writeb(readb(mmio), mmio);
} else {
unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
outb(inb(addr), addr);
}
}
/**
* ata_bmdma_status - Read PCI IDE BMDMA status
* @ap: Port associated with this ATA transaction.
*
* Read and return BMDMA status register.
*
* May be used as the bmdma_status() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
u8 ata_bmdma_status(struct ata_port *ap)
{
u8 host_stat;
if (ap->flags & ATA_FLAG_MMIO) {
void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
host_stat = readb(mmio + ATA_DMA_STATUS);
} else
host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
return host_stat;
}
/**
* ata_bmdma_stop - Stop PCI IDE BMDMA transfer
* @qc: Command we are ending DMA for
*
* Clears the ATA_DMA_START flag in the dma control register
*
* May be used as the bmdma_stop() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
void ata_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
if (ap->flags & ATA_FLAG_MMIO) {
void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
/* clear start/stop bit */
writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
mmio + ATA_DMA_CMD);
} else {
/* clear start/stop bit */
outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
}
/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
ata_altstatus(ap); /* dummy read */
}
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
static struct ata_probe_ent * static struct ata_probe_ent *
ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port) ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
......
...@@ -64,9 +64,9 @@ ...@@ -64,9 +64,9 @@
static unsigned int ata_dev_init_params(struct ata_port *ap, static unsigned int ata_dev_init_params(struct ata_port *ap,
struct ata_device *dev); struct ata_device *dev);
static void ata_set_mode(struct ata_port *ap); static void ata_set_mode(struct ata_port *ap);
static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
static unsigned int ata_dev_xfermask(struct ata_port *ap,
struct ata_device *dev); struct ata_device *dev);
static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
static unsigned int ata_unique_id = 1; static unsigned int ata_unique_id = 1;
static struct workqueue_struct *ata_wq; static struct workqueue_struct *ata_wq;
...@@ -252,6 +252,29 @@ static unsigned int ata_pack_xfermask(unsigned int pio_mask, ...@@ -252,6 +252,29 @@ static unsigned int ata_pack_xfermask(unsigned int pio_mask,
((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
} }
/**
* ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
* @xfer_mask: xfer_mask to unpack
* @pio_mask: resulting pio_mask
* @mwdma_mask: resulting mwdma_mask
* @udma_mask: resulting udma_mask
*
* Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
* Any NULL distination masks will be ignored.
*/
static void ata_unpack_xfermask(unsigned int xfer_mask,
unsigned int *pio_mask,
unsigned int *mwdma_mask,
unsigned int *udma_mask)
{
if (pio_mask)
*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
if (mwdma_mask)
*mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
if (udma_mask)
*udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
}
static const struct ata_xfer_ent { static const struct ata_xfer_ent {
unsigned int shift, bits; unsigned int shift, bits;
u8 base; u8 base;
...@@ -372,6 +395,15 @@ static const char *ata_mode_string(unsigned int xfer_mask) ...@@ -372,6 +395,15 @@ static const char *ata_mode_string(unsigned int xfer_mask)
return "<n/a>"; return "<n/a>";
} }
static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev)
{
if (ata_dev_present(dev)) {
printk(KERN_WARNING "ata%u: dev %u disabled\n",
ap->id, dev->devno);
dev->class++;
}
}
/** /**
* ata_pio_devchk - PATA device presence detection * ata_pio_devchk - PATA device presence detection
* @ap: ATA channel to examine * @ap: ATA channel to examine
...@@ -987,6 +1019,22 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev, ...@@ -987,6 +1019,22 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
ata_qc_free(qc); ata_qc_free(qc);
/* XXX - Some LLDDs (sata_mv) disable port on command failure.
* Until those drivers are fixed, we detect the condition
* here, fail the command with AC_ERR_SYSTEM and reenable the
* port.
*
* Note that this doesn't change any behavior as internal
* command failure results in disabling the device in the
* higher layer for LLDDs without new reset/EH callbacks.
*
* Kill the following code as soon as those drivers are fixed.
*/
if (ap->flags & ATA_FLAG_PORT_DISABLED) {
err_mask |= AC_ERR_SYSTEM;
ata_port_probe(ap);
}
return err_mask; return err_mask;
} }
...@@ -1305,7 +1353,7 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev, ...@@ -1305,7 +1353,7 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
if (print_info) if (print_info)
printk(KERN_INFO "ata%u(%u): applying bridge limits\n", printk(KERN_INFO "ata%u(%u): applying bridge limits\n",
ap->id, dev->devno); ap->id, dev->devno);
ap->udma_mask &= ATA_UDMA5; dev->udma_mask &= ATA_UDMA5;
dev->max_sectors = ATA_MAX_SECTORS; dev->max_sectors = ATA_MAX_SECTORS;
} }
...@@ -1316,8 +1364,6 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev, ...@@ -1316,8 +1364,6 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
return 0; return 0;
err_out_nosup: err_out_nosup:
printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
ap->id, dev->devno);
DPRINTK("EXIT, err\n"); DPRINTK("EXIT, err\n");
return rc; return rc;
} }
...@@ -1384,7 +1430,7 @@ static int ata_bus_probe(struct ata_port *ap) ...@@ -1384,7 +1430,7 @@ static int ata_bus_probe(struct ata_port *ap)
} }
if (ata_dev_configure(ap, dev, 1)) { if (ata_dev_configure(ap, dev, 1)) {
dev->class++; /* disable device */ ata_dev_disable(ap, dev);
continue; continue;
} }
...@@ -1529,6 +1575,23 @@ void sata_phy_reset(struct ata_port *ap) ...@@ -1529,6 +1575,23 @@ void sata_phy_reset(struct ata_port *ap)
ata_bus_reset(ap); ata_bus_reset(ap);
} }
/**
* ata_dev_pair - return other device on cable
* @ap: port
* @adev: device
*
* Obtain the other device on the same cable, or if none is
* present NULL is returned
*/
struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev)
{
struct ata_device *pair = &ap->device[1 - adev->devno];
if (!ata_dev_present(pair))
return NULL;
return pair;
}
/** /**
* ata_port_disable - Disable port. * ata_port_disable - Disable port.
* @ap: Port to be disabled. * @ap: Port to be disabled.
...@@ -1697,20 +1760,28 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed, ...@@ -1697,20 +1760,28 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
return 0; return 0;
} }
static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
{ {
if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED)) unsigned int err_mask;
return; int rc;
if (dev->xfer_shift == ATA_SHIFT_PIO) if (dev->xfer_shift == ATA_SHIFT_PIO)
dev->flags |= ATA_DFLAG_PIO; dev->flags |= ATA_DFLAG_PIO;
ata_dev_set_xfermode(ap, dev); err_mask = ata_dev_set_xfermode(ap, dev);
if (err_mask) {
printk(KERN_ERR
"ata%u: failed to set xfermode (err_mask=0x%x)\n",
ap->id, err_mask);
return -EIO;
}
if (ata_dev_revalidate(ap, dev, 0)) { rc = ata_dev_revalidate(ap, dev, 0);
printk(KERN_ERR "ata%u: failed to revalidate after set " if (rc) {
"xfermode, disabled\n", ap->id); printk(KERN_ERR
ata_port_disable(ap); "ata%u: failed to revalidate after set xfermode\n",
ap->id);
return rc;
} }
DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
...@@ -1719,6 +1790,7 @@ static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) ...@@ -1719,6 +1790,7 @@ static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
printk(KERN_INFO "ata%u: dev %u configured for %s\n", printk(KERN_INFO "ata%u: dev %u configured for %s\n",
ap->id, dev->devno, ap->id, dev->devno,
ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode))); ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
return 0;
} }
static int ata_host_set_pio(struct ata_port *ap) static int ata_host_set_pio(struct ata_port *ap)
...@@ -1778,16 +1850,19 @@ static void ata_set_mode(struct ata_port *ap) ...@@ -1778,16 +1850,19 @@ static void ata_set_mode(struct ata_port *ap)
/* step 1: calculate xfer_mask */ /* step 1: calculate xfer_mask */
for (i = 0; i < ATA_MAX_DEVICES; i++) { for (i = 0; i < ATA_MAX_DEVICES; i++) {
struct ata_device *dev = &ap->device[i]; struct ata_device *dev = &ap->device[i];
unsigned int xfer_mask; unsigned int pio_mask, dma_mask;
if (!ata_dev_present(dev)) if (!ata_dev_present(dev))
continue; continue;
xfer_mask = ata_dev_xfermask(ap, dev); ata_dev_xfermask(ap, dev);
dev->pio_mode = ata_xfer_mask2mode(xfer_mask & ATA_MASK_PIO); /* TODO: let LLDD filter dev->*_mask here */
dev->dma_mode = ata_xfer_mask2mode(xfer_mask & (ATA_MASK_MWDMA |
ATA_MASK_UDMA)); pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
dev->pio_mode = ata_xfer_mask2mode(pio_mask);
dev->dma_mode = ata_xfer_mask2mode(dma_mask);
} }
/* step 2: always set host PIO timings */ /* step 2: always set host PIO timings */
...@@ -1799,11 +1874,15 @@ static void ata_set_mode(struct ata_port *ap) ...@@ -1799,11 +1874,15 @@ static void ata_set_mode(struct ata_port *ap)
ata_host_set_dma(ap); ata_host_set_dma(ap);
/* step 4: update devices' xfer mode */ /* step 4: update devices' xfer mode */
for (i = 0; i < ATA_MAX_DEVICES; i++) for (i = 0; i < ATA_MAX_DEVICES; i++) {
ata_dev_set_mode(ap, &ap->device[i]); struct ata_device *dev = &ap->device[i];
if (ap->flags & ATA_FLAG_PORT_DISABLED) if (!ata_dev_present(dev))
return; continue;
if (ata_dev_set_mode(ap, dev))
goto err_out;
}
if (ap->ops->post_set_mode) if (ap->ops->post_set_mode)
ap->ops->post_set_mode(ap); ap->ops->post_set_mode(ap);
...@@ -2630,18 +2709,15 @@ static int ata_dma_blacklisted(const struct ata_device *dev) ...@@ -2630,18 +2709,15 @@ static int ata_dma_blacklisted(const struct ata_device *dev)
* @ap: Port on which the device to compute xfermask for resides * @ap: Port on which the device to compute xfermask for resides
* @dev: Device to compute xfermask for * @dev: Device to compute xfermask for
* *
* Compute supported xfermask of @dev. This function is * Compute supported xfermask of @dev and store it in
* responsible for applying all known limits including host * dev->*_mask. This function is responsible for applying all
* controller limits, device blacklist, etc... * known limits including host controller limits, device
* blacklist, etc...
* *
* LOCKING: * LOCKING:
* None. * None.
*
* RETURNS:
* Computed xfermask.
*/ */
static unsigned int ata_dev_xfermask(struct ata_port *ap, static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
struct ata_device *dev)
{ {
unsigned long xfer_mask; unsigned long xfer_mask;
int i; int i;
...@@ -2654,6 +2730,8 @@ static unsigned int ata_dev_xfermask(struct ata_port *ap, ...@@ -2654,6 +2730,8 @@ static unsigned int ata_dev_xfermask(struct ata_port *ap,
struct ata_device *d = &ap->device[i]; struct ata_device *d = &ap->device[i];
if (!ata_dev_present(d)) if (!ata_dev_present(d))
continue; continue;
xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask,
d->udma_mask);
xfer_mask &= ata_id_xfermask(d->id); xfer_mask &= ata_id_xfermask(d->id);
if (ata_dma_blacklisted(d)) if (ata_dma_blacklisted(d))
xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
...@@ -2663,7 +2741,8 @@ static unsigned int ata_dev_xfermask(struct ata_port *ap, ...@@ -2663,7 +2741,8 @@ static unsigned int ata_dev_xfermask(struct ata_port *ap,
printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, " printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, "
"disabling DMA\n", ap->id, dev->devno); "disabling DMA\n", ap->id, dev->devno);
return xfer_mask; ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
&dev->udma_mask);
} }
/** /**
...@@ -2676,11 +2755,16 @@ static unsigned int ata_dev_xfermask(struct ata_port *ap, ...@@ -2676,11 +2755,16 @@ static unsigned int ata_dev_xfermask(struct ata_port *ap,
* *
* LOCKING: * LOCKING:
* PCI/etc. bus probe sem. * PCI/etc. bus probe sem.
*
* RETURNS:
* 0 on success, AC_ERR_* mask otherwise.
*/ */
static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev) static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
struct ata_device *dev)
{ {
struct ata_taskfile tf; struct ata_taskfile tf;
unsigned int err_mask;
/* set up set-features taskfile */ /* set up set-features taskfile */
DPRINTK("set features - xfer mode\n"); DPRINTK("set features - xfer mode\n");
...@@ -2692,13 +2776,10 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev) ...@@ -2692,13 +2776,10 @@ static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
tf.protocol = ATA_PROT_NODATA; tf.protocol = ATA_PROT_NODATA;
tf.nsect = dev->xfer_mode; tf.nsect = dev->xfer_mode;
if (ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0)) { err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0);
printk(KERN_ERR "ata%u: failed to set xfermode, disabled\n",
ap->id);
ata_port_disable(ap);
}
DPRINTK("EXIT\n"); DPRINTK("EXIT, err_mask=%x\n", err_mask);
return err_mask;
} }
/** /**
...@@ -2775,7 +2856,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) ...@@ -2775,7 +2856,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
if (qc->flags & ATA_QCFLAG_SG) { if (qc->flags & ATA_QCFLAG_SG) {
if (qc->n_elem) if (qc->n_elem)
dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
/* restore last sg */ /* restore last sg */
sg[qc->orig_n_elem - 1].length += qc->pad_len; sg[qc->orig_n_elem - 1].length += qc->pad_len;
if (pad_buf) { if (pad_buf) {
...@@ -2786,7 +2867,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) ...@@ -2786,7 +2867,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
} }
} else { } else {
if (qc->n_elem) if (qc->n_elem)
dma_unmap_single(ap->host_set->dev, dma_unmap_single(ap->dev,
sg_dma_address(&sg[0]), sg_dma_len(&sg[0]), sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
dir); dir);
/* restore sg */ /* restore sg */
...@@ -2997,7 +3078,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) ...@@ -2997,7 +3078,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc)
goto skip_map; goto skip_map;
} }
dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, dma_address = dma_map_single(ap->dev, qc->buf_virt,
sg->length, dir); sg->length, dir);
if (dma_mapping_error(dma_address)) { if (dma_mapping_error(dma_address)) {
/* restore sg */ /* restore sg */
...@@ -3085,7 +3166,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc) ...@@ -3085,7 +3166,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
} }
dir = qc->dma_dir; dir = qc->dma_dir;
n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir); n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
if (n_elem < 1) { if (n_elem < 1) {
/* restore last sg */ /* restore last sg */
lsg->length += qc->pad_len; lsg->length += qc->pad_len;
...@@ -4064,240 +4145,6 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) ...@@ -4064,240 +4145,6 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
return 0; return 0;
} }
/**
* ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
u8 dmactl;
void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
/* load PRD table addr. */
mb(); /* make sure PRD table writes are visible to controller */
writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
/* specify data direction, triple-check start bit is clear */
dmactl = readb(mmio + ATA_DMA_CMD);
dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
if (!rw)
dmactl |= ATA_DMA_WR;
writeb(dmactl, mmio + ATA_DMA_CMD);
/* issue r/w command */
ap->ops->exec_command(ap, &qc->tf);
}
/**
* ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
u8 dmactl;
/* start host DMA transaction */
dmactl = readb(mmio + ATA_DMA_CMD);
writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
/* Strictly, one may wish to issue a readb() here, to
* flush the mmio write. However, control also passes
* to the hardware at this point, and it will interrupt
* us when we are to resume control. So, in effect,
* we don't care when the mmio write flushes.
* Further, a read of the DMA status register _immediately_
* following the write may not be what certain flaky hardware
* is expected, so I think it is best to not add a readb()
* without first all the MMIO ATA cards/mobos.
* Or maybe I'm just being paranoid.
*/
}
/**
* ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
u8 dmactl;
/* load PRD table addr. */
outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
/* specify data direction, triple-check start bit is clear */
dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
if (!rw)
dmactl |= ATA_DMA_WR;
outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
/* issue r/w command */
ap->ops->exec_command(ap, &qc->tf);
}
/**
* ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
u8 dmactl;
/* start host DMA transaction */
dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
outb(dmactl | ATA_DMA_START,
ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
}
/**
* ata_bmdma_start - Start a PCI IDE BMDMA transaction
* @qc: Info associated with this ATA transaction.
*
* Writes the ATA_DMA_START flag to the DMA command register.
*
* May be used as the bmdma_start() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
void ata_bmdma_start(struct ata_queued_cmd *qc)
{
if (qc->ap->flags & ATA_FLAG_MMIO)
ata_bmdma_start_mmio(qc);
else
ata_bmdma_start_pio(qc);
}
/**
* ata_bmdma_setup - Set up PCI IDE BMDMA transaction
* @qc: Info associated with this ATA transaction.
*
* Writes address of PRD table to device's PRD Table Address
* register, sets the DMA control register, and calls
* ops->exec_command() to start the transfer.
*
* May be used as the bmdma_setup() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
void ata_bmdma_setup(struct ata_queued_cmd *qc)
{
if (qc->ap->flags & ATA_FLAG_MMIO)
ata_bmdma_setup_mmio(qc);
else
ata_bmdma_setup_pio(qc);
}
/**
* ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
* @ap: Port associated with this ATA transaction.
*
* Clear interrupt and error flags in DMA status register.
*
* May be used as the irq_clear() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
void ata_bmdma_irq_clear(struct ata_port *ap)
{
if (!ap->ioaddr.bmdma_addr)
return;
if (ap->flags & ATA_FLAG_MMIO) {
void __iomem *mmio =
((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
writeb(readb(mmio), mmio);
} else {
unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
outb(inb(addr), addr);
}
}
/**
* ata_bmdma_status - Read PCI IDE BMDMA status
* @ap: Port associated with this ATA transaction.
*
* Read and return BMDMA status register.
*
* May be used as the bmdma_status() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
u8 ata_bmdma_status(struct ata_port *ap)
{
u8 host_stat;
if (ap->flags & ATA_FLAG_MMIO) {
void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
host_stat = readb(mmio + ATA_DMA_STATUS);
} else
host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
return host_stat;
}
/**
* ata_bmdma_stop - Stop PCI IDE BMDMA transfer
* @qc: Command we are ending DMA for
*
* Clears the ATA_DMA_START flag in the dma control register
*
* May be used as the bmdma_stop() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
*/
void ata_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
if (ap->flags & ATA_FLAG_MMIO) {
void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
/* clear start/stop bit */
writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
mmio + ATA_DMA_CMD);
} else {
/* clear start/stop bit */
outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
}
/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
ata_altstatus(ap); /* dummy read */
}
/** /**
* ata_host_intr - Handle host interrupt for given (port, task) * ata_host_intr - Handle host interrupt for given (port, task)
* @ap: Port on which interrupt arrived (possibly...) * @ap: Port on which interrupt arrived (possibly...)
...@@ -4506,13 +4353,14 @@ int ata_device_resume(struct ata_port *ap, struct ata_device *dev) ...@@ -4506,13 +4353,14 @@ int ata_device_resume(struct ata_port *ap, struct ata_device *dev)
* Flush the cache on the drive, if appropriate, then issue a * Flush the cache on the drive, if appropriate, then issue a
* standbynow command. * standbynow command.
*/ */
int ata_device_suspend(struct ata_port *ap, struct ata_device *dev) int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state)
{ {
if (!ata_dev_present(dev)) if (!ata_dev_present(dev))
return 0; return 0;
if (dev->class == ATA_DEV_ATA) if (dev->class == ATA_DEV_ATA)
ata_flush_cache(ap, dev); ata_flush_cache(ap, dev);
if (state.event != PM_EVENT_FREEZE)
ata_standby_drive(ap, dev); ata_standby_drive(ap, dev);
ap->flags |= ATA_FLAG_SUSPENDED; ap->flags |= ATA_FLAG_SUSPENDED;
return 0; return 0;
...@@ -4533,7 +4381,7 @@ int ata_device_suspend(struct ata_port *ap, struct ata_device *dev) ...@@ -4533,7 +4381,7 @@ int ata_device_suspend(struct ata_port *ap, struct ata_device *dev)
int ata_port_start (struct ata_port *ap) int ata_port_start (struct ata_port *ap)
{ {
struct device *dev = ap->host_set->dev; struct device *dev = ap->dev;
int rc; int rc;
ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL); ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
...@@ -4566,7 +4414,7 @@ int ata_port_start (struct ata_port *ap) ...@@ -4566,7 +4414,7 @@ int ata_port_start (struct ata_port *ap)
void ata_port_stop (struct ata_port *ap) void ata_port_stop (struct ata_port *ap)
{ {
struct device *dev = ap->host_set->dev; struct device *dev = ap->dev;
dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
ata_pad_free(ap, dev); ata_pad_free(ap, dev);
...@@ -4632,6 +4480,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, ...@@ -4632,6 +4480,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
ap->host = host; ap->host = host;
ap->ctl = ATA_DEVCTL_OBS; ap->ctl = ATA_DEVCTL_OBS;
ap->host_set = host_set; ap->host_set = host_set;
ap->dev = ent->dev;
ap->port_no = port_no; ap->port_no = port_no;
ap->hard_port_no = ap->hard_port_no =
ent->legacy_mode ? ent->hard_port_no : port_no; ent->legacy_mode ? ent->hard_port_no : port_no;
...@@ -4647,8 +4496,13 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, ...@@ -4647,8 +4496,13 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
INIT_WORK(&ap->port_task, NULL, NULL); INIT_WORK(&ap->port_task, NULL, NULL);
INIT_LIST_HEAD(&ap->eh_done_q); INIT_LIST_HEAD(&ap->eh_done_q);
for (i = 0; i < ATA_MAX_DEVICES; i++) for (i = 0; i < ATA_MAX_DEVICES; i++) {
ap->device[i].devno = i; struct ata_device *dev = &ap->device[i];
dev->devno = i;
dev->pio_mask = UINT_MAX;
dev->mwdma_mask = UINT_MAX;
dev->udma_mask = UINT_MAX;
}
#ifdef ATA_IRQ_TRAP #ifdef ATA_IRQ_TRAP
ap->stats.unhandled_irq = 1; ap->stats.unhandled_irq = 1;
...@@ -5114,6 +4968,8 @@ EXPORT_SYMBOL_GPL(ata_std_postreset); ...@@ -5114,6 +4968,8 @@ EXPORT_SYMBOL_GPL(ata_std_postreset);
EXPORT_SYMBOL_GPL(ata_std_probe_reset); EXPORT_SYMBOL_GPL(ata_std_probe_reset);
EXPORT_SYMBOL_GPL(ata_drive_probe_reset); EXPORT_SYMBOL_GPL(ata_drive_probe_reset);
EXPORT_SYMBOL_GPL(ata_dev_revalidate); EXPORT_SYMBOL_GPL(ata_dev_revalidate);
EXPORT_SYMBOL_GPL(ata_dev_classify);
EXPORT_SYMBOL_GPL(ata_dev_pair);
EXPORT_SYMBOL_GPL(ata_port_disable); EXPORT_SYMBOL_GPL(ata_port_disable);
EXPORT_SYMBOL_GPL(ata_ratelimit); EXPORT_SYMBOL_GPL(ata_ratelimit);
EXPORT_SYMBOL_GPL(ata_busy_sleep); EXPORT_SYMBOL_GPL(ata_busy_sleep);
...@@ -5124,7 +4980,6 @@ EXPORT_SYMBOL_GPL(ata_scsi_error); ...@@ -5124,7 +4980,6 @@ EXPORT_SYMBOL_GPL(ata_scsi_error);
EXPORT_SYMBOL_GPL(ata_scsi_slave_config); EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
EXPORT_SYMBOL_GPL(ata_scsi_release); EXPORT_SYMBOL_GPL(ata_scsi_release);
EXPORT_SYMBOL_GPL(ata_host_intr); EXPORT_SYMBOL_GPL(ata_host_intr);
EXPORT_SYMBOL_GPL(ata_dev_classify);
EXPORT_SYMBOL_GPL(ata_id_string); EXPORT_SYMBOL_GPL(ata_id_string);
EXPORT_SYMBOL_GPL(ata_id_c_string); EXPORT_SYMBOL_GPL(ata_id_c_string);
EXPORT_SYMBOL_GPL(ata_scsi_simulate); EXPORT_SYMBOL_GPL(ata_scsi_simulate);
......
...@@ -267,20 +267,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) ...@@ -267,20 +267,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg) int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
{ {
struct ata_port *ap;
struct ata_device *dev;
int val = -EINVAL, rc = -EINVAL; int val = -EINVAL, rc = -EINVAL;
ap = (struct ata_port *) &scsidev->host->hostdata[0];
if (!ap)
goto out;
dev = ata_scsi_find_dev(ap, scsidev);
if (!dev) {
rc = -ENODEV;
goto out;
}
switch (cmd) { switch (cmd) {
case ATA_IOC_GET_IO32: case ATA_IOC_GET_IO32:
val = 0; val = 0;
...@@ -309,7 +297,6 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg) ...@@ -309,7 +297,6 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
break; break;
} }
out:
return rc; return rc;
} }
...@@ -414,12 +401,12 @@ int ata_scsi_device_resume(struct scsi_device *sdev) ...@@ -414,12 +401,12 @@ int ata_scsi_device_resume(struct scsi_device *sdev)
return ata_device_resume(ap, dev); return ata_device_resume(ap, dev);
} }
int ata_scsi_device_suspend(struct scsi_device *sdev) int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
{ {
struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0]; struct ata_port *ap = (struct ata_port *) &sdev->host->hostdata[0];
struct ata_device *dev = &ap->device[sdev->id]; struct ata_device *dev = &ap->device[sdev->id];
return ata_device_suspend(ap, dev); return ata_device_suspend(ap, dev, state);
} }
/** /**
...@@ -2597,6 +2584,21 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap, ...@@ -2597,6 +2584,21 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap,
#endif #endif
} }
static inline void __ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
struct ata_port *ap, struct ata_device *dev)
{
if (dev->class == ATA_DEV_ATA) {
ata_xlat_func_t xlat_func = ata_get_xlat_func(dev,
cmd->cmnd[0]);
if (xlat_func)
ata_scsi_translate(ap, dev, cmd, done, xlat_func);
else
ata_scsi_simulate(ap, dev, cmd, done);
} else
ata_scsi_translate(ap, dev, cmd, done, atapi_xlat);
}
/** /**
* ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device
* @cmd: SCSI command to be sent * @cmd: SCSI command to be sent
...@@ -2631,24 +2633,13 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) ...@@ -2631,24 +2633,13 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
ata_scsi_dump_cdb(ap, cmd); ata_scsi_dump_cdb(ap, cmd);
dev = ata_scsi_find_dev(ap, scsidev); dev = ata_scsi_find_dev(ap, scsidev);
if (unlikely(!dev)) { if (likely(dev))
__ata_scsi_queuecmd(cmd, done, ap, dev);
else {
cmd->result = (DID_BAD_TARGET << 16); cmd->result = (DID_BAD_TARGET << 16);
done(cmd); done(cmd);
goto out_unlock;
} }
if (dev->class == ATA_DEV_ATA) {
ata_xlat_func_t xlat_func = ata_get_xlat_func(dev,
cmd->cmnd[0]);
if (xlat_func)
ata_scsi_translate(ap, dev, cmd, done, xlat_func);
else
ata_scsi_simulate(ap, dev, cmd, done);
} else
ata_scsi_translate(ap, dev, cmd, done, atapi_xlat);
out_unlock:
spin_unlock(&ap->host_set->lock); spin_unlock(&ap->host_set->lock);
spin_lock(shost->host_lock); spin_lock(shost->host_lock);
return 0; return 0;
......
...@@ -29,34 +29,6 @@ ...@@ -29,34 +29,6 @@
* NV-specific details such as register offsets, SATA phy location, * NV-specific details such as register offsets, SATA phy location,
* hotplug info, etc. * hotplug info, etc.
* *
* 0.10
* - Fixed spurious interrupts issue seen with the Maxtor 6H500F0 500GB
* drive. Also made the check_hotplug() callbacks return whether there
* was a hotplug interrupt or not. This was not the source of the
* spurious interrupts, but is the right thing to do anyway.
*
* 0.09
* - Fixed bug introduced by 0.08's MCP51 and MCP55 support.
*
* 0.08
* - Added support for MCP51 and MCP55.
*
* 0.07
* - Added support for RAID class code.
*
* 0.06
* - Added generic SATA support by using a pci_device_id that filters on
* the IDE storage class code.
*
* 0.03
* - Fixed a bug where the hotplug handlers for non-CK804/MCP04 were using
* mmio_base, which is only set for the CK804/MCP04 case.
*
* 0.02
* - Added support for CK804 SATA controller.
*
* 0.01
* - Initial revision.
*/ */
#include <linux/config.h> #include <linux/config.h>
...@@ -74,53 +46,55 @@ ...@@ -74,53 +46,55 @@
#define DRV_NAME "sata_nv" #define DRV_NAME "sata_nv"
#define DRV_VERSION "0.8" #define DRV_VERSION "0.8"
#define NV_PORTS 2 enum {
#define NV_PIO_MASK 0x1f NV_PORTS = 2,
#define NV_MWDMA_MASK 0x07 NV_PIO_MASK = 0x1f,
#define NV_UDMA_MASK 0x7f NV_MWDMA_MASK = 0x07,
#define NV_PORT0_SCR_REG_OFFSET 0x00 NV_UDMA_MASK = 0x7f,
#define NV_PORT1_SCR_REG_OFFSET 0x40 NV_PORT0_SCR_REG_OFFSET = 0x00,
NV_PORT1_SCR_REG_OFFSET = 0x40,
#define NV_INT_STATUS 0x10
#define NV_INT_STATUS_CK804 0x440 NV_INT_STATUS = 0x10,
#define NV_INT_STATUS_PDEV_INT 0x01 NV_INT_STATUS_CK804 = 0x440,
#define NV_INT_STATUS_PDEV_PM 0x02 NV_INT_STATUS_PDEV_INT = 0x01,
#define NV_INT_STATUS_PDEV_ADDED 0x04 NV_INT_STATUS_PDEV_PM = 0x02,
#define NV_INT_STATUS_PDEV_REMOVED 0x08 NV_INT_STATUS_PDEV_ADDED = 0x04,
#define NV_INT_STATUS_SDEV_INT 0x10 NV_INT_STATUS_PDEV_REMOVED = 0x08,
#define NV_INT_STATUS_SDEV_PM 0x20 NV_INT_STATUS_SDEV_INT = 0x10,
#define NV_INT_STATUS_SDEV_ADDED 0x40 NV_INT_STATUS_SDEV_PM = 0x20,
#define NV_INT_STATUS_SDEV_REMOVED 0x80 NV_INT_STATUS_SDEV_ADDED = 0x40,
#define NV_INT_STATUS_PDEV_HOTPLUG (NV_INT_STATUS_PDEV_ADDED | \ NV_INT_STATUS_SDEV_REMOVED = 0x80,
NV_INT_STATUS_PDEV_REMOVED) NV_INT_STATUS_PDEV_HOTPLUG = (NV_INT_STATUS_PDEV_ADDED |
#define NV_INT_STATUS_SDEV_HOTPLUG (NV_INT_STATUS_SDEV_ADDED | \ NV_INT_STATUS_PDEV_REMOVED),
NV_INT_STATUS_SDEV_REMOVED) NV_INT_STATUS_SDEV_HOTPLUG = (NV_INT_STATUS_SDEV_ADDED |
#define NV_INT_STATUS_HOTPLUG (NV_INT_STATUS_PDEV_HOTPLUG | \ NV_INT_STATUS_SDEV_REMOVED),
NV_INT_STATUS_SDEV_HOTPLUG) NV_INT_STATUS_HOTPLUG = (NV_INT_STATUS_PDEV_HOTPLUG |
NV_INT_STATUS_SDEV_HOTPLUG),
#define NV_INT_ENABLE 0x11
#define NV_INT_ENABLE_CK804 0x441 NV_INT_ENABLE = 0x11,
#define NV_INT_ENABLE_PDEV_MASK 0x01 NV_INT_ENABLE_CK804 = 0x441,
#define NV_INT_ENABLE_PDEV_PM 0x02 NV_INT_ENABLE_PDEV_MASK = 0x01,
#define NV_INT_ENABLE_PDEV_ADDED 0x04 NV_INT_ENABLE_PDEV_PM = 0x02,
#define NV_INT_ENABLE_PDEV_REMOVED 0x08 NV_INT_ENABLE_PDEV_ADDED = 0x04,
#define NV_INT_ENABLE_SDEV_MASK 0x10 NV_INT_ENABLE_PDEV_REMOVED = 0x08,
#define NV_INT_ENABLE_SDEV_PM 0x20 NV_INT_ENABLE_SDEV_MASK = 0x10,
#define NV_INT_ENABLE_SDEV_ADDED 0x40 NV_INT_ENABLE_SDEV_PM = 0x20,
#define NV_INT_ENABLE_SDEV_REMOVED 0x80 NV_INT_ENABLE_SDEV_ADDED = 0x40,
#define NV_INT_ENABLE_PDEV_HOTPLUG (NV_INT_ENABLE_PDEV_ADDED | \ NV_INT_ENABLE_SDEV_REMOVED = 0x80,
NV_INT_ENABLE_PDEV_REMOVED) NV_INT_ENABLE_PDEV_HOTPLUG = (NV_INT_ENABLE_PDEV_ADDED |
#define NV_INT_ENABLE_SDEV_HOTPLUG (NV_INT_ENABLE_SDEV_ADDED | \ NV_INT_ENABLE_PDEV_REMOVED),
NV_INT_ENABLE_SDEV_REMOVED) NV_INT_ENABLE_SDEV_HOTPLUG = (NV_INT_ENABLE_SDEV_ADDED |
#define NV_INT_ENABLE_HOTPLUG (NV_INT_ENABLE_PDEV_HOTPLUG | \ NV_INT_ENABLE_SDEV_REMOVED),
NV_INT_ENABLE_SDEV_HOTPLUG) NV_INT_ENABLE_HOTPLUG = (NV_INT_ENABLE_PDEV_HOTPLUG |
NV_INT_ENABLE_SDEV_HOTPLUG),
#define NV_INT_CONFIG 0x12
#define NV_INT_CONFIG_METHD 0x01 // 0 = INT, 1 = SMI NV_INT_CONFIG = 0x12,
NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
// For PCI config register 20
#define NV_MCP_SATA_CFG_20 0x50 // For PCI config register 20
#define NV_MCP_SATA_CFG_20_SATA_SPACE_EN 0x04 NV_MCP_SATA_CFG_20 = 0x50,
NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
};
static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static irqreturn_t nv_interrupt (int irq, void *dev_instance, static irqreturn_t nv_interrupt (int irq, void *dev_instance,
...@@ -175,8 +149,6 @@ static const struct pci_device_id nv_pci_tbl[] = { ...@@ -175,8 +149,6 @@ static const struct pci_device_id nv_pci_tbl[] = {
{ 0, } /* terminate list */ { 0, } /* terminate list */
}; };
#define NV_HOST_FLAGS_SCR_MMIO 0x00000001
struct nv_host_desc struct nv_host_desc
{ {
enum nv_host_type host_type; enum nv_host_type host_type;
...@@ -332,36 +304,23 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance, ...@@ -332,36 +304,23 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance,
static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg) static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
{ {
struct ata_host_set *host_set = ap->host_set;
struct nv_host *host = host_set->private_data;
if (sc_reg > SCR_CONTROL) if (sc_reg > SCR_CONTROL)
return 0xffffffffU; return 0xffffffffU;
if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
return readl((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
else
return inl(ap->ioaddr.scr_addr + (sc_reg * 4));
} }
static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
{ {
struct ata_host_set *host_set = ap->host_set;
struct nv_host *host = host_set->private_data;
if (sc_reg > SCR_CONTROL) if (sc_reg > SCR_CONTROL)
return; return;
if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
writel(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
else
outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));
} }
static void nv_host_stop (struct ata_host_set *host_set) static void nv_host_stop (struct ata_host_set *host_set)
{ {
struct nv_host *host = host_set->private_data; struct nv_host *host = host_set->private_data;
struct pci_dev *pdev = to_pci_dev(host_set->dev);
// Disable hotplug event interrupts. // Disable hotplug event interrupts.
if (host->host_desc->disable_hotplug) if (host->host_desc->disable_hotplug)
...@@ -369,8 +328,7 @@ static void nv_host_stop (struct ata_host_set *host_set) ...@@ -369,8 +328,7 @@ static void nv_host_stop (struct ata_host_set *host_set)
kfree(host); kfree(host);
if (host_set->mmio_base) ata_pci_host_stop(host_set);
pci_iounmap(pdev, host_set->mmio_base);
} }
static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
...@@ -382,6 +340,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -382,6 +340,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
int pci_dev_busy = 0; int pci_dev_busy = 0;
int rc; int rc;
u32 bar; u32 bar;
unsigned long base;
// Make sure this is a SATA controller by counting the number of bars // Make sure this is a SATA controller by counting the number of bars
// (NVIDIA SATA controllers will always have six bars). Otherwise, // (NVIDIA SATA controllers will always have six bars). Otherwise,
...@@ -426,31 +385,16 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -426,31 +385,16 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
probe_ent->private_data = host; probe_ent->private_data = host;
if (pci_resource_flags(pdev, 5) & IORESOURCE_MEM)
host->host_flags |= NV_HOST_FLAGS_SCR_MMIO;
if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) {
unsigned long base;
probe_ent->mmio_base = pci_iomap(pdev, 5, 0); probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
if (probe_ent->mmio_base == NULL) { if (!probe_ent->mmio_base) {
rc = -EIO; rc = -EIO;
goto err_out_free_host; goto err_out_free_host;
} }
base = (unsigned long)probe_ent->mmio_base; base = (unsigned long)probe_ent->mmio_base;
probe_ent->port[0].scr_addr = probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
base + NV_PORT0_SCR_REG_OFFSET; probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
probe_ent->port[1].scr_addr =
base + NV_PORT1_SCR_REG_OFFSET;
} else {
probe_ent->port[0].scr_addr =
pci_resource_start(pdev, 5) | NV_PORT0_SCR_REG_OFFSET;
probe_ent->port[1].scr_addr =
pci_resource_start(pdev, 5) | NV_PORT1_SCR_REG_OFFSET;
}
pci_set_master(pdev); pci_set_master(pdev);
...@@ -467,7 +411,6 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -467,7 +411,6 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
return 0; return 0;
err_out_iounmap: err_out_iounmap:
if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
pci_iounmap(pdev, probe_ent->mmio_base); pci_iounmap(pdev, probe_ent->mmio_base);
err_out_free_host: err_out_free_host:
kfree(host); kfree(host);
......
...@@ -371,7 +371,7 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev) ...@@ -371,7 +371,7 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
if (quirks & SIL_QUIRK_UDMA5MAX) { if (quirks & SIL_QUIRK_UDMA5MAX) {
printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n", printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n",
ap->id, dev->devno, model_num); ap->id, dev->devno, model_num);
ap->udma_mask &= ATA_UDMA5; dev->udma_mask &= ATA_UDMA5;
return; return;
} }
} }
......
...@@ -842,9 +842,10 @@ static void sil24_port_stop(struct ata_port *ap) ...@@ -842,9 +842,10 @@ static void sil24_port_stop(struct ata_port *ap)
static void sil24_host_stop(struct ata_host_set *host_set) static void sil24_host_stop(struct ata_host_set *host_set)
{ {
struct sil24_host_priv *hpriv = host_set->private_data; struct sil24_host_priv *hpriv = host_set->private_data;
struct pci_dev *pdev = to_pci_dev(host_set->dev);
iounmap(hpriv->host_base); pci_iounmap(pdev, hpriv->host_base);
iounmap(hpriv->port_base); pci_iounmap(pdev, hpriv->port_base);
kfree(hpriv); kfree(hpriv);
} }
...@@ -871,26 +872,23 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -871,26 +872,23 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable; goto out_disable;
rc = -ENOMEM; rc = -ENOMEM;
/* ioremap mmio registers */ /* map mmio registers */
host_base = ioremap(pci_resource_start(pdev, 0), host_base = pci_iomap(pdev, 0, 0);
pci_resource_len(pdev, 0));
if (!host_base) if (!host_base)
goto out_free; goto out_free;
port_base = ioremap(pci_resource_start(pdev, 2), port_base = pci_iomap(pdev, 2, 0);
pci_resource_len(pdev, 2));
if (!port_base) if (!port_base)
goto out_free; goto out_free;
/* allocate & init probe_ent and hpriv */ /* allocate & init probe_ent and hpriv */
probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
if (!probe_ent) if (!probe_ent)
goto out_free; goto out_free;
hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL); hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
if (!hpriv) if (!hpriv)
goto out_free; goto out_free;
memset(probe_ent, 0, sizeof(*probe_ent));
probe_ent->dev = pci_dev_to_dev(pdev); probe_ent->dev = pci_dev_to_dev(pdev);
INIT_LIST_HEAD(&probe_ent->node); INIT_LIST_HEAD(&probe_ent->node);
...@@ -907,7 +905,6 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -907,7 +905,6 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
probe_ent->mmio_base = port_base; probe_ent->mmio_base = port_base;
probe_ent->private_data = hpriv; probe_ent->private_data = hpriv;
memset(hpriv, 0, sizeof(*hpriv));
hpriv->host_base = host_base; hpriv->host_base = host_base;
hpriv->port_base = port_base; hpriv->port_base = port_base;
...@@ -1011,9 +1008,9 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1011,9 +1008,9 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
out_free: out_free:
if (host_base) if (host_base)
iounmap(host_base); pci_iounmap(pdev, host_base);
if (port_base) if (port_base)
iounmap(port_base); pci_iounmap(pdev, port_base);
kfree(probe_ent); kfree(probe_ent);
kfree(hpriv); kfree(hpriv);
pci_release_regions(pdev); pci_release_regions(pdev);
......
...@@ -44,6 +44,8 @@ enum { ...@@ -44,6 +44,8 @@ enum {
uli_5287 = 1, uli_5287 = 1,
uli_5281 = 2, uli_5281 = 2,
uli_max_ports = 4,
/* PCI configuration registers */ /* PCI configuration registers */
ULI5287_BASE = 0x90, /* sata0 phy SCR registers */ ULI5287_BASE = 0x90, /* sata0 phy SCR registers */
ULI5287_OFFS = 0x10, /* offset from sata0->sata1 phy regs */ ULI5287_OFFS = 0x10, /* offset from sata0->sata1 phy regs */
...@@ -51,6 +53,10 @@ enum { ...@@ -51,6 +53,10 @@ enum {
ULI5281_OFFS = 0x60, /* offset from sata0->sata1 phy regs */ ULI5281_OFFS = 0x60, /* offset from sata0->sata1 phy regs */
}; };
struct uli_priv {
unsigned int scr_cfg_addr[uli_max_ports];
};
static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg); static u32 uli_scr_read (struct ata_port *ap, unsigned int sc_reg);
static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); static void uli_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
...@@ -137,7 +143,8 @@ MODULE_VERSION(DRV_VERSION); ...@@ -137,7 +143,8 @@ MODULE_VERSION(DRV_VERSION);
static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg) static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
{ {
return ap->ioaddr.scr_addr + (4 * sc_reg); struct uli_priv *hpriv = ap->host_set->private_data;
return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg);
} }
static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg) static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
...@@ -182,6 +189,7 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -182,6 +189,7 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
int rc; int rc;
unsigned int board_idx = (unsigned int) ent->driver_data; unsigned int board_idx = (unsigned int) ent->driver_data;
int pci_dev_busy = 0; int pci_dev_busy = 0;
struct uli_priv *hpriv;
if (!printed_version++) if (!printed_version++)
dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n"); dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
...@@ -210,10 +218,18 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -210,10 +218,18 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_regions; goto err_out_regions;
} }
hpriv = kzalloc(sizeof(*hpriv), GFP_KERNEL);
if (!hpriv) {
rc = -ENOMEM;
goto err_out_probe_ent;
}
probe_ent->private_data = hpriv;
switch (board_idx) { switch (board_idx) {
case uli_5287: case uli_5287:
probe_ent->port[0].scr_addr = ULI5287_BASE; hpriv->scr_cfg_addr[0] = ULI5287_BASE;
probe_ent->port[1].scr_addr = ULI5287_BASE + ULI5287_OFFS; hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
probe_ent->n_ports = 4; probe_ent->n_ports = 4;
probe_ent->port[2].cmd_addr = pci_resource_start(pdev, 0) + 8; probe_ent->port[2].cmd_addr = pci_resource_start(pdev, 0) + 8;
...@@ -221,27 +237,27 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -221,27 +237,27 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
probe_ent->port[2].ctl_addr = probe_ent->port[2].ctl_addr =
(pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4; (pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4;
probe_ent->port[2].bmdma_addr = pci_resource_start(pdev, 4) + 16; probe_ent->port[2].bmdma_addr = pci_resource_start(pdev, 4) + 16;
probe_ent->port[2].scr_addr = ULI5287_BASE + ULI5287_OFFS*4; hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4;
probe_ent->port[3].cmd_addr = pci_resource_start(pdev, 2) + 8; probe_ent->port[3].cmd_addr = pci_resource_start(pdev, 2) + 8;
probe_ent->port[3].altstatus_addr = probe_ent->port[3].altstatus_addr =
probe_ent->port[3].ctl_addr = probe_ent->port[3].ctl_addr =
(pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4; (pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4;
probe_ent->port[3].bmdma_addr = pci_resource_start(pdev, 4) + 24; probe_ent->port[3].bmdma_addr = pci_resource_start(pdev, 4) + 24;
probe_ent->port[3].scr_addr = ULI5287_BASE + ULI5287_OFFS*5; hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5;
ata_std_ports(&probe_ent->port[2]); ata_std_ports(&probe_ent->port[2]);
ata_std_ports(&probe_ent->port[3]); ata_std_ports(&probe_ent->port[3]);
break; break;
case uli_5289: case uli_5289:
probe_ent->port[0].scr_addr = ULI5287_BASE; hpriv->scr_cfg_addr[0] = ULI5287_BASE;
probe_ent->port[1].scr_addr = ULI5287_BASE + ULI5287_OFFS; hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
break; break;
case uli_5281: case uli_5281:
probe_ent->port[0].scr_addr = ULI5281_BASE; hpriv->scr_cfg_addr[0] = ULI5281_BASE;
probe_ent->port[1].scr_addr = ULI5281_BASE + ULI5281_OFFS; hpriv->scr_cfg_addr[1] = ULI5281_BASE + ULI5281_OFFS;
break; break;
default: default:
...@@ -258,9 +274,10 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -258,9 +274,10 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
return 0; return 0;
err_out_probe_ent:
kfree(probe_ent);
err_out_regions: err_out_regions:
pci_release_regions(pdev); pci_release_regions(pdev);
err_out: err_out:
if (!pci_dev_busy) if (!pci_dev_busy)
pci_disable_device(pdev); pci_disable_device(pdev);
......
...@@ -286,7 +286,7 @@ static int scsi_bus_suspend(struct device * dev, pm_message_t state) ...@@ -286,7 +286,7 @@ static int scsi_bus_suspend(struct device * dev, pm_message_t state)
return err; return err;
if (sht->suspend) if (sht->suspend)
err = sht->suspend(sdev); err = sht->suspend(sdev, state);
return err; return err;
} }
......
...@@ -358,6 +358,11 @@ struct ata_device { ...@@ -358,6 +358,11 @@ struct ata_device {
unsigned int max_sectors; /* per-device max sectors */ unsigned int max_sectors; /* per-device max sectors */
unsigned int cdb_len; unsigned int cdb_len;
/* per-dev xfer mask */
unsigned int pio_mask;
unsigned int mwdma_mask;
unsigned int udma_mask;
/* for CHS addressing */ /* for CHS addressing */
u16 cylinders; /* Number of cylinders */ u16 cylinders; /* Number of cylinders */
u16 heads; /* Number of heads */ u16 heads; /* Number of heads */
...@@ -395,6 +400,7 @@ struct ata_port { ...@@ -395,6 +400,7 @@ struct ata_port {
struct ata_host_stats stats; struct ata_host_stats stats;
struct ata_host_set *host_set; struct ata_host_set *host_set;
struct device *dev;
struct work_struct port_task; struct work_struct port_task;
...@@ -515,9 +521,9 @@ extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); ...@@ -515,9 +521,9 @@ extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
extern int ata_scsi_release(struct Scsi_Host *host); extern int ata_scsi_release(struct Scsi_Host *host);
extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
extern int ata_scsi_device_resume(struct scsi_device *); extern int ata_scsi_device_resume(struct scsi_device *);
extern int ata_scsi_device_suspend(struct scsi_device *); extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state);
extern int ata_device_resume(struct ata_port *, struct ata_device *); extern int ata_device_resume(struct ata_port *, struct ata_device *);
extern int ata_device_suspend(struct ata_port *, struct ata_device *); extern int ata_device_suspend(struct ata_port *, struct ata_device *, pm_message_t state);
extern int ata_ratelimit(void); extern int ata_ratelimit(void);
extern unsigned int ata_busy_sleep(struct ata_port *ap, extern unsigned int ata_busy_sleep(struct ata_port *ap,
unsigned long timeout_pat, unsigned long timeout_pat,
...@@ -568,6 +574,8 @@ extern int ata_std_bios_param(struct scsi_device *sdev, ...@@ -568,6 +574,8 @@ extern int ata_std_bios_param(struct scsi_device *sdev,
struct block_device *bdev, struct block_device *bdev,
sector_t capacity, int geom[]); sector_t capacity, int geom[]);
extern int ata_scsi_slave_config(struct scsi_device *sdev); extern int ata_scsi_slave_config(struct scsi_device *sdev);
extern struct ata_device *ata_dev_pair(struct ata_port *ap,
struct ata_device *adev);
/* /*
* Timing helpers * Timing helpers
......
...@@ -286,7 +286,7 @@ struct scsi_host_template { ...@@ -286,7 +286,7 @@ struct scsi_host_template {
* suspend support * suspend support
*/ */
int (*resume)(struct scsi_device *); int (*resume)(struct scsi_device *);
int (*suspend)(struct scsi_device *); int (*suspend)(struct scsi_device *, pm_message_t state);
/* /*
* Name of proc directory * Name of proc directory
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment