Commit 9b358e30 authored by Mark Lord's avatar Mark Lord Committed by Jeff Garzik

[PATCH] sata_mv: prevent unnecessary double-resets

The mv_err_intr() function is invoked from the driver's interrupt handler,
as well as from the timeout function.  This patch prevents it from triggering
a one-after-the-other double reset of the controller when invoked
from the timeout function.

This also adds a check for a timeout race condition that has been observed
to occur with this driver in earlier kernels.  This should not be needed,
in theory, but in practice it has caught bugs.  Maybe nuke it at a later date.
Signed-off-by: default avatarMark Lord <liml@rtr.ca>
Signed-off-by: default avatarJeff Garzik <jeff@garzik.org>
parent 2f880b65
...@@ -1291,6 +1291,7 @@ static u8 mv_get_crpb_status(struct ata_port *ap) ...@@ -1291,6 +1291,7 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
/** /**
* mv_err_intr - Handle error interrupts on the port * mv_err_intr - Handle error interrupts on the port
* @ap: ATA channel to manipulate * @ap: ATA channel to manipulate
* @reset_allowed: bool: 0 == don't trigger from reset here
* *
* In most cases, just clear the interrupt and move on. However, * In most cases, just clear the interrupt and move on. However,
* some cases require an eDMA reset, which is done right before * some cases require an eDMA reset, which is done right before
...@@ -1301,7 +1302,7 @@ static u8 mv_get_crpb_status(struct ata_port *ap) ...@@ -1301,7 +1302,7 @@ static u8 mv_get_crpb_status(struct ata_port *ap)
* LOCKING: * LOCKING:
* Inherited from caller. * Inherited from caller.
*/ */
static void mv_err_intr(struct ata_port *ap) static void mv_err_intr(struct ata_port *ap, int reset_allowed)
{ {
void __iomem *port_mmio = mv_ap_base(ap); void __iomem *port_mmio = mv_ap_base(ap);
u32 edma_err_cause, serr = 0; u32 edma_err_cause, serr = 0;
...@@ -1323,9 +1324,8 @@ static void mv_err_intr(struct ata_port *ap) ...@@ -1323,9 +1324,8 @@ static void mv_err_intr(struct ata_port *ap)
writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
/* check for fatal here and recover if needed */ /* check for fatal here and recover if needed */
if (EDMA_ERR_FATAL & edma_err_cause) { if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
mv_stop_and_reset(ap); mv_stop_and_reset(ap);
}
} }
/** /**
...@@ -1406,7 +1406,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, ...@@ -1406,7 +1406,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
shift++; /* skip bit 8 in the HC Main IRQ reg */ shift++; /* skip bit 8 in the HC Main IRQ reg */
} }
if ((PORT0_ERR << shift) & relevant) { if ((PORT0_ERR << shift) & relevant) {
mv_err_intr(ap); mv_err_intr(ap, 1);
err_mask |= AC_ERR_OTHER; err_mask |= AC_ERR_OTHER;
handled = 1; handled = 1;
} }
...@@ -2031,11 +2031,14 @@ static void mv_eng_timeout(struct ata_port *ap) ...@@ -2031,11 +2031,14 @@ static void mv_eng_timeout(struct ata_port *ap)
ap->host_set->mmio_base, ap, qc, qc->scsicmd, ap->host_set->mmio_base, ap, qc, qc->scsicmd,
&qc->scsicmd->cmnd); &qc->scsicmd->cmnd);
mv_err_intr(ap); mv_err_intr(ap, 0);
mv_stop_and_reset(ap); mv_stop_and_reset(ap);
qc->err_mask |= AC_ERR_TIMEOUT; WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
ata_eh_qc_complete(qc); if (qc->flags & ATA_QCFLAG_ACTIVE) {
qc->err_mask |= AC_ERR_TIMEOUT;
ata_eh_qc_complete(qc);
}
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment