Commit 0bbddb8c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata

Pull libata updates from Tejun Heo:

 - libata has always been limiting the maximum queue depth to 31, with
   one entry set aside mostly for historical reasons. This didn't use to
   make much difference but Jens found out that modern hard drives can
   actually perform measurably better with the extra one queue depth.
   Jens updated libata core so that it can make use of full 32 queue
   depth

 - Damien updated command retry logic in error handling so that it
   doesn't unnecessarily retry when upper layer (SCSI) is gonna handle
   them

 - A couple misc changes

* 'for-4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata:
  sata_fsl: use the right type for tag bitshift
  ahci: enable full queue depth of 32
  libata: don't clamp queue depth to ATA_MAX_QUEUE - 1
  libata: add extra internal command
  sata_nv: set host can_queue count appropriately
  libata: remove assumption that ATA_MAX_QUEUE - 1 is the max
  libata: use ata_tag_internal() consistently
  libata: bump ->qc_active to a 64-bit type
  libata: convert core and drivers to ->hw_tag usage
  libata: introduce notion of separate hardware tags
  libata: Fix command retry decision
  libata: Honor RQF_QUIET flag
  libata: Make ata_dev_set_mode() less verbose
  libata: Fix ata_err_string()
  libata: Fix comment typo in ata_eh_analyze_tf()
  sata_nv: don't use block layer bounce buffer
  ata: hpt37x: Convert to use match_string() helper
parents 476d9ff6 88e10092
...@@ -271,7 +271,7 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) ...@@ -271,7 +271,7 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
* Fill in command table information. First, the header, * Fill in command table information. First, the header,
* a SATA Register - Host to Device command FIS. * a SATA Register - Host to Device command FIS.
*/ */
cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; cmd_tbl = pp->cmd_tbl + qc->hw_tag * AHCI_CMD_TBL_SZ;
ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
if (is_atapi) { if (is_atapi) {
...@@ -294,7 +294,7 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) ...@@ -294,7 +294,7 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
if (is_atapi) if (is_atapi)
opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
ahci_fill_cmd_slot(pp, qc->tag, opts); ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
} }
static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc) static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
......
...@@ -390,7 +390,7 @@ extern struct device_attribute *ahci_sdev_attrs[]; ...@@ -390,7 +390,7 @@ extern struct device_attribute *ahci_sdev_attrs[];
*/ */
#define AHCI_SHT(drv_name) \ #define AHCI_SHT(drv_name) \
ATA_NCQ_SHT(drv_name), \ ATA_NCQ_SHT(drv_name), \
.can_queue = AHCI_MAX_CMDS - 1, \ .can_queue = AHCI_MAX_CMDS, \
.sg_tablesize = AHCI_MAX_SG, \ .sg_tablesize = AHCI_MAX_SG, \
.dma_boundary = AHCI_DMA_BOUNDARY, \ .dma_boundary = AHCI_DMA_BOUNDARY, \
.shost_attrs = ahci_shost_attrs, \ .shost_attrs = ahci_shost_attrs, \
......
...@@ -1649,7 +1649,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc) ...@@ -1649,7 +1649,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
* Fill in command table information. First, the header, * Fill in command table information. First, the header,
* a SATA Register - Host to Device command FIS. * a SATA Register - Host to Device command FIS.
*/ */
cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ; cmd_tbl = pp->cmd_tbl + qc->hw_tag * AHCI_CMD_TBL_SZ;
ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl); ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
if (is_atapi) { if (is_atapi) {
...@@ -1670,7 +1670,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc) ...@@ -1670,7 +1670,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
if (is_atapi) if (is_atapi)
opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
ahci_fill_cmd_slot(pp, qc->tag, opts); ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
} }
static void ahci_fbs_dec_intr(struct ata_port *ap) static void ahci_fbs_dec_intr(struct ata_port *ap)
...@@ -2006,7 +2006,7 @@ unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) ...@@ -2006,7 +2006,7 @@ unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
pp->active_link = qc->dev->link; pp->active_link = qc->dev->link;
if (ata_is_ncq(qc->tf.protocol)) if (ata_is_ncq(qc->tf.protocol))
writel(1 << qc->tag, port_mmio + PORT_SCR_ACT); writel(1 << qc->hw_tag, port_mmio + PORT_SCR_ACT);
if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) { if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
u32 fbs = readl(port_mmio + PORT_FBS); u32 fbs = readl(port_mmio + PORT_FBS);
...@@ -2016,7 +2016,7 @@ unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) ...@@ -2016,7 +2016,7 @@ unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
pp->fbs_last_dev = qc->dev->link->pmp; pp->fbs_last_dev = qc->dev->link->pmp;
} }
writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE); writel(1 << qc->hw_tag, port_mmio + PORT_CMD_ISSUE);
ahci_sw_activity(qc->dev->link); ahci_sw_activity(qc->dev->link);
......
...@@ -759,7 +759,7 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, ...@@ -759,7 +759,7 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf->flags |= tf_flags; tf->flags |= tf_flags;
if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { if (ata_ncq_enabled(dev) && !ata_tag_internal(tag)) {
/* yay, NCQ */ /* yay, NCQ */
if (!lba_48_ok(block, n_block)) if (!lba_48_ok(block, n_block))
return -ERANGE; return -ERANGE;
...@@ -1570,8 +1570,9 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, ...@@ -1570,8 +1570,9 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
u8 command = tf->command; u8 command = tf->command;
int auto_timeout = 0; int auto_timeout = 0;
struct ata_queued_cmd *qc; struct ata_queued_cmd *qc;
unsigned int tag, preempted_tag; unsigned int preempted_tag;
u32 preempted_sactive, preempted_qc_active; u32 preempted_sactive;
u64 preempted_qc_active;
int preempted_nr_active_links; int preempted_nr_active_links;
DECLARE_COMPLETION_ONSTACK(wait); DECLARE_COMPLETION_ONSTACK(wait);
unsigned long flags; unsigned long flags;
...@@ -1587,20 +1588,10 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, ...@@ -1587,20 +1588,10 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
} }
/* initialize internal qc */ /* initialize internal qc */
qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
/* XXX: Tag 0 is used for drivers with legacy EH as some qc->tag = ATA_TAG_INTERNAL;
* drivers choke if any other tag is given. This breaks qc->hw_tag = 0;
* ata_tag_internal() test for those drivers. Don't use new
* EH stuff without converting to it.
*/
if (ap->ops->error_handler)
tag = ATA_TAG_INTERNAL;
else
tag = 0;
qc = __ata_qc_from_tag(ap, tag);
qc->tag = tag;
qc->scsicmd = NULL; qc->scsicmd = NULL;
qc->ap = ap; qc->ap = ap;
qc->dev = dev; qc->dev = dev;
...@@ -2295,7 +2286,7 @@ static int ata_dev_config_ncq(struct ata_device *dev, ...@@ -2295,7 +2286,7 @@ static int ata_dev_config_ncq(struct ata_device *dev,
return 0; return 0;
} }
if (ap->flags & ATA_FLAG_NCQ) { if (ap->flags & ATA_FLAG_NCQ) {
hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
dev->flags |= ATA_DFLAG_NCQ; dev->flags |= ATA_DFLAG_NCQ;
} }
...@@ -3573,9 +3564,11 @@ static int ata_dev_set_mode(struct ata_device *dev) ...@@ -3573,9 +3564,11 @@ static int ata_dev_set_mode(struct ata_device *dev)
DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
dev->xfer_shift, (int)dev->xfer_mode); dev->xfer_shift, (int)dev->xfer_mode);
ata_dev_info(dev, "configured for %s%s\n", if (!(ehc->i.flags & ATA_EHI_QUIET) ||
ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), ehc->i.flags & ATA_EHI_DID_HARDRESET)
dev_err_whine); ata_dev_info(dev, "configured for %s%s\n",
ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
dev_err_whine);
return 0; return 0;
...@@ -5133,7 +5126,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag) ...@@ -5133,7 +5126,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
} }
qc = __ata_qc_from_tag(ap, tag); qc = __ata_qc_from_tag(ap, tag);
qc->tag = tag; qc->tag = qc->hw_tag = tag;
qc->scsicmd = NULL; qc->scsicmd = NULL;
qc->ap = ap; qc->ap = ap;
qc->dev = dev; qc->dev = dev;
...@@ -5163,7 +5156,7 @@ void ata_qc_free(struct ata_queued_cmd *qc) ...@@ -5163,7 +5156,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
qc->flags = 0; qc->flags = 0;
tag = qc->tag; tag = qc->tag;
if (likely(ata_tag_valid(tag))) { if (ata_tag_valid(tag)) {
qc->tag = ATA_TAG_POISON; qc->tag = ATA_TAG_POISON;
if (ap->flags & ATA_FLAG_SAS_HOST) if (ap->flags & ATA_FLAG_SAS_HOST)
ata_sas_free_tag(tag, ap); ata_sas_free_tag(tag, ap);
...@@ -5185,7 +5178,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) ...@@ -5185,7 +5178,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
/* command should be marked inactive atomically with qc completion */ /* command should be marked inactive atomically with qc completion */
if (ata_is_ncq(qc->tf.protocol)) { if (ata_is_ncq(qc->tf.protocol)) {
link->sactive &= ~(1 << qc->tag); link->sactive &= ~(1 << qc->hw_tag);
if (!link->sactive) if (!link->sactive)
ap->nr_active_links--; ap->nr_active_links--;
} else { } else {
...@@ -5203,7 +5196,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc) ...@@ -5203,7 +5196,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
* is called. (when rc != 0 and atapi request sense is needed) * is called. (when rc != 0 and atapi request sense is needed)
*/ */
qc->flags &= ~ATA_QCFLAG_ACTIVE; qc->flags &= ~ATA_QCFLAG_ACTIVE;
ap->qc_active &= ~(1 << qc->tag); ap->qc_active &= ~(1ULL << qc->tag);
/* call completion callback */ /* call completion callback */
qc->complete_fn(qc); qc->complete_fn(qc);
...@@ -5360,29 +5353,29 @@ void ata_qc_complete(struct ata_queued_cmd *qc) ...@@ -5360,29 +5353,29 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
* RETURNS: * RETURNS:
* Number of completed commands on success, -errno otherwise. * Number of completed commands on success, -errno otherwise.
*/ */
int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
{ {
int nr_done = 0; int nr_done = 0;
u32 done_mask; u64 done_mask;
done_mask = ap->qc_active ^ qc_active; done_mask = ap->qc_active ^ qc_active;
if (unlikely(done_mask & qc_active)) { if (unlikely(done_mask & qc_active)) {
ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n", ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
ap->qc_active, qc_active); ap->qc_active, qc_active);
return -EINVAL; return -EINVAL;
} }
while (done_mask) { while (done_mask) {
struct ata_queued_cmd *qc; struct ata_queued_cmd *qc;
unsigned int tag = __ffs(done_mask); unsigned int tag = __ffs64(done_mask);
qc = ata_qc_from_tag(ap, tag); qc = ata_qc_from_tag(ap, tag);
if (qc) { if (qc) {
ata_qc_complete(qc); ata_qc_complete(qc);
nr_done++; nr_done++;
} }
done_mask &= ~(1 << tag); done_mask &= ~(1ULL << tag);
} }
return nr_done; return nr_done;
...@@ -5413,11 +5406,11 @@ void ata_qc_issue(struct ata_queued_cmd *qc) ...@@ -5413,11 +5406,11 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));
if (ata_is_ncq(prot)) { if (ata_is_ncq(prot)) {
WARN_ON_ONCE(link->sactive & (1 << qc->tag)); WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));
if (!link->sactive) if (!link->sactive)
ap->nr_active_links++; ap->nr_active_links++;
link->sactive |= 1 << qc->tag; link->sactive |= 1 << qc->hw_tag;
} else { } else {
WARN_ON_ONCE(link->sactive); WARN_ON_ONCE(link->sactive);
...@@ -5426,7 +5419,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc) ...@@ -5426,7 +5419,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
} }
qc->flags |= ATA_QCFLAG_ACTIVE; qc->flags |= ATA_QCFLAG_ACTIVE;
ap->qc_active |= 1 << qc->tag; ap->qc_active |= 1ULL << qc->tag;
/* /*
* We guarantee to LLDs that they will have at least one * We guarantee to LLDs that they will have at least one
...@@ -6425,7 +6418,7 @@ void ata_host_init(struct ata_host *host, struct device *dev, ...@@ -6425,7 +6418,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
{ {
spin_lock_init(&host->lock); spin_lock_init(&host->lock);
mutex_init(&host->eh_mutex); mutex_init(&host->eh_mutex);
host->n_tags = ATA_MAX_QUEUE - 1; host->n_tags = ATA_MAX_QUEUE;
host->dev = dev; host->dev = dev;
host->ops = ops; host->ops = ops;
} }
...@@ -6507,7 +6500,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) ...@@ -6507,7 +6500,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
{ {
int i, rc; int i, rc;
host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1); host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);
/* host must have been started */ /* host must have been started */
if (!(host->flags & ATA_HOST_STARTED)) { if (!(host->flags & ATA_HOST_STARTED)) {
......
...@@ -822,9 +822,12 @@ static int ata_eh_nr_in_flight(struct ata_port *ap) ...@@ -822,9 +822,12 @@ static int ata_eh_nr_in_flight(struct ata_port *ap)
int nr = 0; int nr = 0;
/* count only non-internal commands */ /* count only non-internal commands */
for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
if (ata_tag_internal(tag))
continue;
if (ata_qc_from_tag(ap, tag)) if (ata_qc_from_tag(ap, tag))
nr++; nr++;
}
return nr; return nr;
} }
...@@ -849,7 +852,7 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t) ...@@ -849,7 +852,7 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t)
/* No progress during the last interval, tag all /* No progress during the last interval, tag all
* in-flight qcs as timed out and freeze the port. * in-flight qcs as timed out and freeze the port.
*/ */
for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) { for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
if (qc) if (qc)
qc->err_mask |= AC_ERR_TIMEOUT; qc->err_mask |= AC_ERR_TIMEOUT;
...@@ -1003,7 +1006,8 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) ...@@ -1003,7 +1006,8 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
/* we're gonna abort all commands, no need for fast drain */ /* we're gonna abort all commands, no need for fast drain */
ata_eh_set_pending(ap, 0); ata_eh_set_pending(ap, 0);
for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { /* include internal tag in iteration */
for (tag = 0; tag <= ATA_MAX_QUEUE; tag++) {
struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
if (qc && (!link || qc->dev->link == link)) { if (qc && (!link || qc->dev->link == link)) {
...@@ -1432,6 +1436,10 @@ static const char *ata_err_string(unsigned int err_mask) ...@@ -1432,6 +1436,10 @@ static const char *ata_err_string(unsigned int err_mask)
return "invalid argument"; return "invalid argument";
if (err_mask & AC_ERR_DEV) if (err_mask & AC_ERR_DEV)
return "device error"; return "device error";
if (err_mask & AC_ERR_NCQ)
return "NCQ error";
if (err_mask & AC_ERR_NODEV_HINT)
return "Polling detection error";
return "unknown error"; return "unknown error";
} }
...@@ -1815,10 +1823,10 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc, ...@@ -1815,10 +1823,10 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
if (qc->flags & ATA_QCFLAG_SENSE_VALID) { if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
int ret = scsi_check_sense(qc->scsicmd); int ret = scsi_check_sense(qc->scsicmd);
/* /*
* SUCCESS here means that the sense code could * SUCCESS here means that the sense code could be
* evaluated and should be passed to the upper layers * evaluated and should be passed to the upper layers
* for correct evaluation. * for correct evaluation.
* FAILED means the sense code could not interpreted * FAILED means the sense code could not be interpreted
* and the device would need to be reset. * and the device would need to be reset.
* NEEDS_RETRY and ADD_TO_MLQUEUE means that the * NEEDS_RETRY and ADD_TO_MLQUEUE means that the
* command would need to be retried. * command would need to be retried.
...@@ -2098,6 +2106,21 @@ static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc) ...@@ -2098,6 +2106,21 @@ static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */ return qc->err_mask != AC_ERR_DEV; /* retry if not dev error */
} }
/**
* ata_eh_quiet - check if we need to be quiet about a command error
* @qc: qc to check
*
* Look at the qc flags anbd its scsi command request flags to determine
* if we need to be quiet about the command failure.
*/
static inline bool ata_eh_quiet(struct ata_queued_cmd *qc)
{
if (qc->scsicmd &&
qc->scsicmd->request->rq_flags & RQF_QUIET)
qc->flags |= ATA_QCFLAG_QUIET;
return qc->flags & ATA_QCFLAG_QUIET;
}
/** /**
* ata_eh_link_autopsy - analyze error and determine recovery action * ata_eh_link_autopsy - analyze error and determine recovery action
* @link: host link to perform autopsy on * @link: host link to perform autopsy on
...@@ -2115,7 +2138,7 @@ static void ata_eh_link_autopsy(struct ata_link *link) ...@@ -2115,7 +2138,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
struct ata_eh_context *ehc = &link->eh_context; struct ata_eh_context *ehc = &link->eh_context;
struct ata_device *dev; struct ata_device *dev;
unsigned int all_err_mask = 0, eflags = 0; unsigned int all_err_mask = 0, eflags = 0;
int tag; int tag, nr_failed = 0, nr_quiet = 0;
u32 serror; u32 serror;
int rc; int rc;
...@@ -2167,12 +2190,16 @@ static void ata_eh_link_autopsy(struct ata_link *link) ...@@ -2167,12 +2190,16 @@ static void ata_eh_link_autopsy(struct ata_link *link)
if (qc->err_mask & ~AC_ERR_OTHER) if (qc->err_mask & ~AC_ERR_OTHER)
qc->err_mask &= ~AC_ERR_OTHER; qc->err_mask &= ~AC_ERR_OTHER;
/* SENSE_VALID trumps dev/unknown error and revalidation */ /*
* SENSE_VALID trumps dev/unknown error and revalidation. Upper
* layers will determine whether the command is worth retrying
* based on the sense data and device class/type. Otherwise,
* determine directly if the command is worth retrying using its
* error mask and flags.
*/
if (qc->flags & ATA_QCFLAG_SENSE_VALID) if (qc->flags & ATA_QCFLAG_SENSE_VALID)
qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
else if (ata_eh_worth_retry(qc))
/* determine whether the command is worth retrying */
if (ata_eh_worth_retry(qc))
qc->flags |= ATA_QCFLAG_RETRY; qc->flags |= ATA_QCFLAG_RETRY;
/* accumulate error info */ /* accumulate error info */
...@@ -2181,8 +2208,17 @@ static void ata_eh_link_autopsy(struct ata_link *link) ...@@ -2181,8 +2208,17 @@ static void ata_eh_link_autopsy(struct ata_link *link)
if (qc->flags & ATA_QCFLAG_IO) if (qc->flags & ATA_QCFLAG_IO)
eflags |= ATA_EFLAG_IS_IO; eflags |= ATA_EFLAG_IS_IO;
trace_ata_eh_link_autopsy_qc(qc); trace_ata_eh_link_autopsy_qc(qc);
/* Count quiet errors */
if (ata_eh_quiet(qc))
nr_quiet++;
nr_failed++;
} }
/* If all failed commands requested silence, then be quiet */
if (nr_quiet == nr_failed)
ehc->i.flags |= ATA_EHI_QUIET;
/* enforce default EH actions */ /* enforce default EH actions */
if (ap->pflags & ATA_PFLAG_FROZEN || if (ap->pflags & ATA_PFLAG_FROZEN ||
all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT)) all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
......
...@@ -872,6 +872,9 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev, ...@@ -872,6 +872,9 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
qc->sg = scsi_sglist(cmd); qc->sg = scsi_sglist(cmd);
qc->n_elem = scsi_sg_count(cmd); qc->n_elem = scsi_sg_count(cmd);
if (cmd->request->rq_flags & RQF_QUIET)
qc->flags |= ATA_QCFLAG_QUIET;
} else { } else {
cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1); cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
cmd->scsi_done(cmd); cmd->scsi_done(cmd);
...@@ -1316,7 +1319,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, ...@@ -1316,7 +1319,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
int depth; int depth;
depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id)); depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
depth = min(ATA_MAX_QUEUE - 1, depth); depth = min(ATA_MAX_QUEUE, depth);
scsi_change_queue_depth(sdev, depth); scsi_change_queue_depth(sdev, depth);
} }
...@@ -1429,7 +1432,7 @@ int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, ...@@ -1429,7 +1432,7 @@ int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev,
/* limit and apply queue depth */ /* limit and apply queue depth */
queue_depth = min(queue_depth, sdev->host->can_queue); queue_depth = min(queue_depth, sdev->host->can_queue);
queue_depth = min(queue_depth, ata_id_queue_depth(dev->id)); queue_depth = min(queue_depth, ata_id_queue_depth(dev->id));
queue_depth = min(queue_depth, ATA_MAX_QUEUE - 1); queue_depth = min(queue_depth, ATA_MAX_QUEUE);
if (sdev->queue_depth == queue_depth) if (sdev->queue_depth == queue_depth)
return -EINVAL; return -EINVAL;
...@@ -1895,7 +1898,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) ...@@ -1895,7 +1898,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
qc->nbytes = n_block * scmd->device->sector_size; qc->nbytes = n_block * scmd->device->sector_size;
rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags, rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags,
qc->tag, class); qc->hw_tag, class);
if (likely(rc == 0)) if (likely(rc == 0))
return 0; return 0;
...@@ -3233,7 +3236,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) ...@@ -3233,7 +3236,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
/* For NCQ commands copy the tag value */ /* For NCQ commands copy the tag value */
if (ata_is_ncq(tf->protocol)) if (ata_is_ncq(tf->protocol))
tf->nsect = qc->tag << 3; tf->nsect = qc->hw_tag << 3;
/* enforce correct master/slave bit */ /* enforce correct master/slave bit */
tf->device = dev->devno ? tf->device = dev->devno ?
...@@ -3513,7 +3516,7 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc) ...@@ -3513,7 +3516,7 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
tf->protocol = ATA_PROT_NCQ; tf->protocol = ATA_PROT_NCQ;
tf->command = ATA_CMD_FPDMA_SEND; tf->command = ATA_CMD_FPDMA_SEND;
tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f; tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f;
tf->nsect = qc->tag << 3; tf->nsect = qc->hw_tag << 3;
tf->hob_feature = (size / 512) >> 8; tf->hob_feature = (size / 512) >> 8;
tf->feature = size / 512; tf->feature = size / 512;
...@@ -3733,7 +3736,7 @@ static unsigned int ata_scsi_zbc_in_xlat(struct ata_queued_cmd *qc) ...@@ -3733,7 +3736,7 @@ static unsigned int ata_scsi_zbc_in_xlat(struct ata_queued_cmd *qc)
tf->protocol = ATA_PROT_NCQ; tf->protocol = ATA_PROT_NCQ;
tf->command = ATA_CMD_FPDMA_RECV; tf->command = ATA_CMD_FPDMA_RECV;
tf->hob_nsect = ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN & 0x1f; tf->hob_nsect = ATA_SUBCMD_FPDMA_RECV_ZAC_MGMT_IN & 0x1f;
tf->nsect = qc->tag << 3; tf->nsect = qc->hw_tag << 3;
tf->feature = sect & 0xff; tf->feature = sect & 0xff;
tf->hob_feature = (sect >> 8) & 0xff; tf->hob_feature = (sect >> 8) & 0xff;
tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES | (options << 8); tf->auxiliary = ATA_SUBCMD_ZAC_MGMT_IN_REPORT_ZONES | (options << 8);
...@@ -3812,7 +3815,7 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc) ...@@ -3812,7 +3815,7 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
tf->protocol = ATA_PROT_NCQ_NODATA; tf->protocol = ATA_PROT_NCQ_NODATA;
tf->command = ATA_CMD_NCQ_NON_DATA; tf->command = ATA_CMD_NCQ_NON_DATA;
tf->feature = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT; tf->feature = ATA_SUBCMD_NCQ_NON_DATA_ZAC_MGMT_OUT;
tf->nsect = qc->tag << 3; tf->nsect = qc->hw_tag << 3;
tf->auxiliary = sa | ((u16)all << 8); tf->auxiliary = sa | ((u16)all << 8);
} else { } else {
tf->protocol = ATA_PROT_NODATA; tf->protocol = ATA_PROT_NODATA;
...@@ -5117,7 +5120,7 @@ int ata_sas_allocate_tag(struct ata_port *ap) ...@@ -5117,7 +5120,7 @@ int ata_sas_allocate_tag(struct ata_port *ap)
tag = tag < max_queue ? tag : 0; tag = tag < max_queue ? tag : 0;
/* the last tag is reserved for internal command. */ /* the last tag is reserved for internal command. */
if (tag == ATA_TAG_INTERNAL) if (ata_tag_internal(tag))
continue; continue;
if (!test_and_set_bit(tag, &ap->sas_tag_allocated)) { if (!test_and_set_bit(tag, &ap->sas_tag_allocated)) {
......
...@@ -224,17 +224,14 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, ...@@ -224,17 +224,14 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
const char * const list[]) const char * const list[])
{ {
unsigned char model_num[ATA_ID_PROD_LEN + 1]; unsigned char model_num[ATA_ID_PROD_LEN + 1];
int i = 0; int i;
ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
while (list[i] != NULL) { i = match_string(list, -1, model_num);
if (!strcmp(list[i], model_num)) { if (i >= 0) {
pr_warn("%s is not supported for %s\n", pr_warn("%s is not supported for %s\n", modestr, list[i]);
modestr, list[i]); return 1;
return 1;
}
i++;
} }
return 0; return 0;
} }
......
...@@ -761,7 +761,7 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status) ...@@ -761,7 +761,7 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
if (tag > 0) { if (tag > 0) {
dev_info(ap->dev, dev_info(ap->dev,
"%s tag=%u cmd=0x%02x dma dir=%s proto=%s dmacr=0x%08x\n", "%s tag=%u cmd=0x%02x dma dir=%s proto=%s dmacr=0x%08x\n",
__func__, qc->tag, qc->tf.command, __func__, qc->hw_tag, qc->tf.command,
get_dma_dir_descript(qc->dma_dir), get_dma_dir_descript(qc->dma_dir),
get_prot_descript(qc->tf.protocol), get_prot_descript(qc->tf.protocol),
sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr)); sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
...@@ -789,7 +789,7 @@ static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc, ...@@ -789,7 +789,7 @@ static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
{ {
u8 status = 0; u8 status = 0;
u32 mask = 0x0; u32 mask = 0x0;
u8 tag = qc->tag; u8 tag = qc->hw_tag;
struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap); struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
hsdev->sactive_queued = 0; hsdev->sactive_queued = 0;
...@@ -997,7 +997,7 @@ static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag) ...@@ -997,7 +997,7 @@ static void sata_dwc_bmdma_setup_by_tag(struct ata_queued_cmd *qc, u8 tag)
static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc) static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
{ {
u8 tag = qc->tag; u8 tag = qc->hw_tag;
if (ata_is_ncq(qc->tf.protocol)) { if (ata_is_ncq(qc->tf.protocol)) {
dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
...@@ -1059,7 +1059,7 @@ static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag) ...@@ -1059,7 +1059,7 @@ static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc) static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
{ {
u8 tag = qc->tag; u8 tag = qc->hw_tag;
if (ata_is_ncq(qc->tf.protocol)) { if (ata_is_ncq(qc->tf.protocol)) {
dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n", dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
...@@ -1074,17 +1074,17 @@ static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc) ...@@ -1074,17 +1074,17 @@ static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc) static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
{ {
u32 sactive; u32 sactive;
u8 tag = qc->tag; u8 tag = qc->hw_tag;
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap); struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
#ifdef DEBUG_NCQ #ifdef DEBUG_NCQ
if (qc->tag > 0 || ap->link.sactive > 1) if (qc->hw_tag > 0 || ap->link.sactive > 1)
dev_info(ap->dev, dev_info(ap->dev,
"%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n", "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n",
__func__, ap->print_id, qc->tf.command, __func__, ap->print_id, qc->tf.command,
ata_get_cmd_descript(qc->tf.command), ata_get_cmd_descript(qc->tf.command),
qc->tag, get_prot_descript(qc->tf.protocol), qc->hw_tag, get_prot_descript(qc->tf.protocol),
ap->link.active_tag, ap->link.sactive); ap->link.active_tag, ap->link.sactive);
#endif #endif
......
...@@ -519,7 +519,7 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) ...@@ -519,7 +519,7 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc)
struct sata_fsl_port_priv *pp = ap->private_data; struct sata_fsl_port_priv *pp = ap->private_data;
struct sata_fsl_host_priv *host_priv = ap->host->private_data; struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base; void __iomem *hcr_base = host_priv->hcr_base;
unsigned int tag = sata_fsl_tag(qc->tag, hcr_base); unsigned int tag = sata_fsl_tag(qc->hw_tag, hcr_base);
struct command_desc *cd; struct command_desc *cd;
u32 desc_info = CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE; u32 desc_info = CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE;
u32 num_prde = 0; u32 num_prde = 0;
...@@ -566,7 +566,7 @@ static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc) ...@@ -566,7 +566,7 @@ static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
struct sata_fsl_host_priv *host_priv = ap->host->private_data; struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base; void __iomem *hcr_base = host_priv->hcr_base;
unsigned int tag = sata_fsl_tag(qc->tag, hcr_base); unsigned int tag = sata_fsl_tag(qc->hw_tag, hcr_base);
VPRINTK("xx_qc_issue called,CQ=0x%x,CA=0x%x,CE=0x%x,CC=0x%x\n", VPRINTK("xx_qc_issue called,CQ=0x%x,CA=0x%x,CE=0x%x,CC=0x%x\n",
ioread32(CQ + hcr_base), ioread32(CQ + hcr_base),
...@@ -595,7 +595,7 @@ static bool sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc) ...@@ -595,7 +595,7 @@ static bool sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc)
struct sata_fsl_port_priv *pp = qc->ap->private_data; struct sata_fsl_port_priv *pp = qc->ap->private_data;
struct sata_fsl_host_priv *host_priv = qc->ap->host->private_data; struct sata_fsl_host_priv *host_priv = qc->ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base; void __iomem *hcr_base = host_priv->hcr_base;
unsigned int tag = sata_fsl_tag(qc->tag, hcr_base); unsigned int tag = sata_fsl_tag(qc->hw_tag, hcr_base);
struct command_desc *cd; struct command_desc *cd;
cd = pp->cmdentry + tag; cd = pp->cmdentry + tag;
...@@ -1266,7 +1266,7 @@ static void sata_fsl_host_intr(struct ata_port *ap) ...@@ -1266,7 +1266,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
} }
VPRINTK("Status of all queues :\n"); VPRINTK("Status of all queues :\n");
VPRINTK("done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%x\n", VPRINTK("done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%llx\n",
done_mask, done_mask,
ioread32(hcr_base + CA), ioread32(hcr_base + CA),
ioread32(hcr_base + CE), ioread32(hcr_base + CE),
...@@ -1293,7 +1293,7 @@ static void sata_fsl_host_intr(struct ata_port *ap) ...@@ -1293,7 +1293,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask); ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
return; return;
} else if ((ap->qc_active & (1 << ATA_TAG_INTERNAL))) { } else if ((ap->qc_active & (1ULL << ATA_TAG_INTERNAL))) {
iowrite32(1, hcr_base + CC); iowrite32(1, hcr_base + CC);
qc = ata_qc_from_tag(ap, ATA_TAG_INTERNAL); qc = ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
......
...@@ -1802,7 +1802,7 @@ static void mv_fill_sg(struct ata_queued_cmd *qc) ...@@ -1802,7 +1802,7 @@ static void mv_fill_sg(struct ata_queued_cmd *qc)
struct mv_sg *mv_sg, *last_sg = NULL; struct mv_sg *mv_sg, *last_sg = NULL;
unsigned int si; unsigned int si;
mv_sg = pp->sg_tbl[qc->tag]; mv_sg = pp->sg_tbl[qc->hw_tag];
for_each_sg(qc->sg, sg, qc->n_elem, si) { for_each_sg(qc->sg, sg, qc->n_elem, si) {
dma_addr_t addr = sg_dma_address(sg); dma_addr_t addr = sg_dma_address(sg);
u32 sg_len = sg_dma_len(sg); u32 sg_len = sg_dma_len(sg);
...@@ -1903,9 +1903,9 @@ static void mv_bmdma_setup(struct ata_queued_cmd *qc) ...@@ -1903,9 +1903,9 @@ static void mv_bmdma_setup(struct ata_queued_cmd *qc)
writel(0, port_mmio + BMDMA_CMD); writel(0, port_mmio + BMDMA_CMD);
/* load PRD table addr. */ /* load PRD table addr. */
writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16, writel((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16,
port_mmio + BMDMA_PRD_HIGH); port_mmio + BMDMA_PRD_HIGH);
writelfl(pp->sg_tbl_dma[qc->tag], writelfl(pp->sg_tbl_dma[qc->hw_tag],
port_mmio + BMDMA_PRD_LOW); port_mmio + BMDMA_PRD_LOW);
/* issue r/w command */ /* issue r/w command */
...@@ -2071,17 +2071,17 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) ...@@ -2071,17 +2071,17 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
*/ */
if (!(tf->flags & ATA_TFLAG_WRITE)) if (!(tf->flags & ATA_TFLAG_WRITE))
flags |= CRQB_FLAG_READ; flags |= CRQB_FLAG_READ;
WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
flags |= qc->tag << CRQB_TAG_SHIFT; flags |= qc->hw_tag << CRQB_TAG_SHIFT;
flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
/* get current queue index from software */ /* get current queue index from software */
in_index = pp->req_idx; in_index = pp->req_idx;
pp->crqb[in_index].sg_addr = pp->crqb[in_index].sg_addr =
cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
pp->crqb[in_index].sg_addr_hi = pp->crqb[in_index].sg_addr_hi =
cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
cw = &pp->crqb[in_index].ata_cmd[0]; cw = &pp->crqb[in_index].ata_cmd[0];
...@@ -2164,17 +2164,17 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) ...@@ -2164,17 +2164,17 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
if (!(tf->flags & ATA_TFLAG_WRITE)) if (!(tf->flags & ATA_TFLAG_WRITE))
flags |= CRQB_FLAG_READ; flags |= CRQB_FLAG_READ;
WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); WARN_ON(MV_MAX_Q_DEPTH <= qc->hw_tag);
flags |= qc->tag << CRQB_TAG_SHIFT; flags |= qc->hw_tag << CRQB_TAG_SHIFT;
flags |= qc->tag << CRQB_HOSTQ_SHIFT; flags |= qc->hw_tag << CRQB_HOSTQ_SHIFT;
flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
/* get current queue index from software */ /* get current queue index from software */
in_index = pp->req_idx; in_index = pp->req_idx;
crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff);
crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16); crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16);
crqb->flags = cpu_to_le32(flags); crqb->flags = cpu_to_le32(flags);
crqb->ata_cmd[0] = cpu_to_le32( crqb->ata_cmd[0] = cpu_to_le32(
...@@ -2539,7 +2539,7 @@ static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) ...@@ -2539,7 +2539,7 @@ static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
failed_links = hweight16(new_map); failed_links = hweight16(new_map);
ata_port_info(ap, ata_port_info(ap,
"%s: pmp_map=%04x qc_map=%04x failed_links=%d nr_active_links=%d\n", "%s: pmp_map=%04x qc_map=%04llx failed_links=%d nr_active_links=%d\n",
__func__, pp->delayed_eh_pmp_map, __func__, pp->delayed_eh_pmp_map,
ap->qc_active, failed_links, ap->qc_active, failed_links,
ap->nr_active_links); ap->nr_active_links);
......
...@@ -400,7 +400,7 @@ static struct scsi_host_template nv_adma_sht = { ...@@ -400,7 +400,7 @@ static struct scsi_host_template nv_adma_sht = {
static struct scsi_host_template nv_swncq_sht = { static struct scsi_host_template nv_swncq_sht = {
ATA_NCQ_SHT(DRV_NAME), ATA_NCQ_SHT(DRV_NAME),
.can_queue = ATA_MAX_QUEUE, .can_queue = ATA_MAX_QUEUE - 1,
.sg_tablesize = LIBATA_MAX_PRD, .sg_tablesize = LIBATA_MAX_PRD,
.dma_boundary = ATA_DMA_BOUNDARY, .dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = nv_swncq_slave_config, .slave_configure = nv_swncq_slave_config,
...@@ -740,32 +740,16 @@ static int nv_adma_slave_config(struct scsi_device *sdev) ...@@ -740,32 +740,16 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
sdev1 = ap->host->ports[1]->link.device[0].sdev; sdev1 = ap->host->ports[1]->link.device[0].sdev;
if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
(port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) { (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
/** We have to set the DMA mask to 32-bit if either port is in /*
ATAPI mode, since they are on the same PCI device which is * We have to set the DMA mask to 32-bit if either port is in
used for DMA mapping. If we set the mask we also need to set * ATAPI mode, since they are on the same PCI device which is
the bounce limit on both ports to ensure that the block * used for DMA mapping. If either SCSI device is not allocated
layer doesn't feed addresses that cause DMA mapping to * yet, it's OK since that port will discover its correct
choke. If either SCSI device is not allocated yet, it's OK * setting when it does get allocated.
since that port will discover its correct setting when it */
does get allocated. rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
Note: Setting 32-bit mask should not fail. */
if (sdev0)
blk_queue_bounce_limit(sdev0->request_queue,
ATA_DMA_MASK);
if (sdev1)
blk_queue_bounce_limit(sdev1->request_queue,
ATA_DMA_MASK);
dma_set_mask(&pdev->dev, ATA_DMA_MASK);
} else { } else {
/** This shouldn't fail as it was set to this value before */ rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
dma_set_mask(&pdev->dev, pp->adma_dma_mask);
if (sdev0)
blk_queue_bounce_limit(sdev0->request_queue,
pp->adma_dma_mask);
if (sdev1)
blk_queue_bounce_limit(sdev1->request_queue,
pp->adma_dma_mask);
} }
blk_queue_segment_boundary(sdev->request_queue, segment_boundary); blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
...@@ -1131,12 +1115,11 @@ static int nv_adma_port_start(struct ata_port *ap) ...@@ -1131,12 +1115,11 @@ static int nv_adma_port_start(struct ata_port *ap)
VPRINTK("ENTER\n"); VPRINTK("ENTER\n");
/* Ensure DMA mask is set to 32-bit before allocating legacy PRD and /*
pad buffers */ * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); * pad buffers.
if (rc) */
return rc; rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) if (rc)
return rc; return rc;
...@@ -1156,13 +1139,16 @@ static int nv_adma_port_start(struct ata_port *ap) ...@@ -1156,13 +1139,16 @@ static int nv_adma_port_start(struct ata_port *ap)
pp->notifier_clear_block = pp->gen_block + pp->notifier_clear_block = pp->gen_block +
NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no); NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
/* Now that the legacy PRD and padding buffer are allocated we can /*
safely raise the DMA mask to allocate the CPB/APRD table. * Now that the legacy PRD and padding buffer are allocated we can
These are allowed to fail since we store the value that ends up * try to raise the DMA mask to allocate the CPB/APRD table.
being used to set as the bounce limit in slave_config later if */
needed. */ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); if (rc) {
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc)
return rc;
}
pp->adma_dma_mask = *dev->dma_mask; pp->adma_dma_mask = *dev->dma_mask;
mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ, mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
...@@ -1356,11 +1342,11 @@ static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb) ...@@ -1356,11 +1342,11 @@ static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
for_each_sg(qc->sg, sg, qc->n_elem, si) { for_each_sg(qc->sg, sg, qc->n_elem, si) {
aprd = (si < 5) ? &cpb->aprd[si] : aprd = (si < 5) ? &cpb->aprd[si] :
&pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)]; &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
nv_adma_fill_aprd(qc, sg, si, aprd); nv_adma_fill_aprd(qc, sg, si, aprd);
} }
if (si > 5) if (si > 5)
cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag))); cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
else else
cpb->next_aprd = cpu_to_le64(0); cpb->next_aprd = cpu_to_le64(0);
} }
...@@ -1385,7 +1371,7 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc) ...@@ -1385,7 +1371,7 @@ static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
static void nv_adma_qc_prep(struct ata_queued_cmd *qc) static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
{ {
struct nv_adma_port_priv *pp = qc->ap->private_data; struct nv_adma_port_priv *pp = qc->ap->private_data;
struct nv_adma_cpb *cpb = &pp->cpb[qc->tag]; struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
u8 ctl_flags = NV_CPB_CTL_CPB_VALID | u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
NV_CPB_CTL_IEN; NV_CPB_CTL_IEN;
...@@ -1403,7 +1389,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc) ...@@ -1403,7 +1389,7 @@ static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
wmb(); wmb();
cpb->len = 3; cpb->len = 3;
cpb->tag = qc->tag; cpb->tag = qc->hw_tag;
cpb->next_cpb_idx = 0; cpb->next_cpb_idx = 0;
/* turn on NCQ flags for NCQ commands */ /* turn on NCQ flags for NCQ commands */
...@@ -1466,9 +1452,9 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) ...@@ -1466,9 +1452,9 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
pp->last_issue_ncq = curr_ncq; pp->last_issue_ncq = curr_ncq;
} }
writew(qc->tag, mmio + NV_ADMA_APPEND); writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
DPRINTK("Issued tag %u\n", qc->tag); DPRINTK("Issued tag %u\n", qc->hw_tag);
return 0; return 0;
} }
...@@ -1730,8 +1716,8 @@ static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc) ...@@ -1730,8 +1716,8 @@ static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
/* queue is full */ /* queue is full */
WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE); WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
dq->defer_bits |= (1 << qc->tag); dq->defer_bits |= (1 << qc->hw_tag);
dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag; dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
} }
static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap) static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
...@@ -1796,7 +1782,7 @@ static void nv_swncq_ncq_stop(struct ata_port *ap) ...@@ -1796,7 +1782,7 @@ static void nv_swncq_ncq_stop(struct ata_port *ap)
u32 sactive; u32 sactive;
u32 done_mask; u32 done_mask;
ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n", ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
ap->qc_active, ap->link.sactive); ap->qc_active, ap->link.sactive);
ata_port_err(ap, ata_port_err(ap,
"SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n " "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
...@@ -2010,7 +1996,7 @@ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc) ...@@ -2010,7 +1996,7 @@ static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
struct ata_bmdma_prd *prd; struct ata_bmdma_prd *prd;
unsigned int si, idx; unsigned int si, idx;
prd = pp->prd + ATA_MAX_PRD * qc->tag; prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
idx = 0; idx = 0;
for_each_sg(qc->sg, sg, qc->n_elem, si) { for_each_sg(qc->sg, sg, qc->n_elem, si) {
...@@ -2048,16 +2034,16 @@ static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap, ...@@ -2048,16 +2034,16 @@ static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
DPRINTK("Enter\n"); DPRINTK("Enter\n");
writel((1 << qc->tag), pp->sactive_block); writel((1 << qc->hw_tag), pp->sactive_block);
pp->last_issue_tag = qc->tag; pp->last_issue_tag = qc->hw_tag;
pp->dhfis_bits &= ~(1 << qc->tag); pp->dhfis_bits &= ~(1 << qc->hw_tag);
pp->dmafis_bits &= ~(1 << qc->tag); pp->dmafis_bits &= ~(1 << qc->hw_tag);
pp->qc_active |= (0x1 << qc->tag); pp->qc_active |= (0x1 << qc->hw_tag);
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */ ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
ap->ops->sff_exec_command(ap, &qc->tf); ap->ops->sff_exec_command(ap, &qc->tf);
DPRINTK("Issued tag %u\n", qc->tag); DPRINTK("Issued tag %u\n", qc->hw_tag);
return 0; return 0;
} }
...@@ -2207,7 +2193,7 @@ static void nv_swncq_dmafis(struct ata_port *ap) ...@@ -2207,7 +2193,7 @@ static void nv_swncq_dmafis(struct ata_port *ap)
rw = qc->tf.flags & ATA_TFLAG_WRITE; rw = qc->tf.flags & ATA_TFLAG_WRITE;
/* load PRD table addr. */ /* load PRD table addr. */
iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag, iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
/* specify data direction, triple-check start bit is clear */ /* specify data direction, triple-check start bit is clear */
......
...@@ -849,7 +849,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc) ...@@ -849,7 +849,7 @@ static void sil24_qc_prep(struct ata_queued_cmd *qc)
struct sil24_sge *sge; struct sil24_sge *sge;
u16 ctrl = 0; u16 ctrl = 0;
cb = &pp->cmd_block[sil24_tag(qc->tag)]; cb = &pp->cmd_block[sil24_tag(qc->hw_tag)];
if (!ata_is_atapi(qc->tf.protocol)) { if (!ata_is_atapi(qc->tf.protocol)) {
prb = &cb->ata.prb; prb = &cb->ata.prb;
...@@ -891,7 +891,7 @@ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) ...@@ -891,7 +891,7 @@ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
struct sil24_port_priv *pp = ap->private_data; struct sil24_port_priv *pp = ap->private_data;
void __iomem *port = sil24_port_base(ap); void __iomem *port = sil24_port_base(ap);
unsigned int tag = sil24_tag(qc->tag); unsigned int tag = sil24_tag(qc->hw_tag);
dma_addr_t paddr; dma_addr_t paddr;
void __iomem *activate; void __iomem *activate;
...@@ -911,7 +911,7 @@ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc) ...@@ -911,7 +911,7 @@ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc) static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc)
{ {
sil24_read_tf(qc->ap, qc->tag, &qc->result_tf); sil24_read_tf(qc->ap, qc->hw_tag, &qc->result_tf);
return true; return true;
} }
......
...@@ -125,9 +125,8 @@ enum { ...@@ -125,9 +125,8 @@ enum {
LIBATA_MAX_PRD = ATA_MAX_PRD / 2, LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
LIBATA_DUMB_MAX_PRD = ATA_MAX_PRD / 4, /* Worst case */ LIBATA_DUMB_MAX_PRD = ATA_MAX_PRD / 4, /* Worst case */
ATA_DEF_QUEUE = 1, ATA_DEF_QUEUE = 1,
/* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
ATA_MAX_QUEUE = 32, ATA_MAX_QUEUE = 32,
ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1, ATA_TAG_INTERNAL = ATA_MAX_QUEUE,
ATA_SHORT_PAUSE = 16, ATA_SHORT_PAUSE = 16,
ATAPI_MAX_DRAIN = 16 << 10, ATAPI_MAX_DRAIN = 16 << 10,
...@@ -637,7 +636,8 @@ struct ata_queued_cmd { ...@@ -637,7 +636,8 @@ struct ata_queued_cmd {
u8 cdb[ATAPI_CDB_LEN]; u8 cdb[ATAPI_CDB_LEN];
unsigned long flags; /* ATA_QCFLAG_xxx */ unsigned long flags; /* ATA_QCFLAG_xxx */
unsigned int tag; unsigned int tag; /* libata core tag */
unsigned int hw_tag; /* driver tag */
unsigned int n_elem; unsigned int n_elem;
unsigned int orig_n_elem; unsigned int orig_n_elem;
...@@ -849,9 +849,9 @@ struct ata_port { ...@@ -849,9 +849,9 @@ struct ata_port {
unsigned int udma_mask; unsigned int udma_mask;
unsigned int cbl; /* cable type; ATA_CBL_xxx */ unsigned int cbl; /* cable type; ATA_CBL_xxx */
struct ata_queued_cmd qcmd[ATA_MAX_QUEUE]; struct ata_queued_cmd qcmd[ATA_MAX_QUEUE + 1];
unsigned long sas_tag_allocated; /* for sas tag allocation only */ unsigned long sas_tag_allocated; /* for sas tag allocation only */
unsigned int qc_active; u64 qc_active;
int nr_active_links; /* #links with active qcs */ int nr_active_links; /* #links with active qcs */
unsigned int sas_last_tag; /* track next tag hw expects */ unsigned int sas_last_tag; /* track next tag hw expects */
...@@ -1183,7 +1183,7 @@ extern void ata_id_c_string(const u16 *id, unsigned char *s, ...@@ -1183,7 +1183,7 @@ extern void ata_id_c_string(const u16 *id, unsigned char *s,
extern unsigned int ata_do_dev_read_id(struct ata_device *dev, extern unsigned int ata_do_dev_read_id(struct ata_device *dev,
struct ata_taskfile *tf, u16 *id); struct ata_taskfile *tf, u16 *id);
extern void ata_qc_complete(struct ata_queued_cmd *qc); extern void ata_qc_complete(struct ata_queued_cmd *qc);
extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active); extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active);
extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd); extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
extern int ata_std_bios_param(struct scsi_device *sdev, extern int ata_std_bios_param(struct scsi_device *sdev,
struct block_device *bdev, struct block_device *bdev,
...@@ -1483,14 +1483,14 @@ extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset, ...@@ -1483,14 +1483,14 @@ extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
const char *name); const char *name);
#endif #endif
static inline unsigned int ata_tag_valid(unsigned int tag) static inline bool ata_tag_internal(unsigned int tag)
{ {
return (tag < ATA_MAX_QUEUE) ? 1 : 0; return tag == ATA_TAG_INTERNAL;
} }
static inline unsigned int ata_tag_internal(unsigned int tag) static inline bool ata_tag_valid(unsigned int tag)
{ {
return tag == ATA_TAG_INTERNAL; return tag < ATA_MAX_QUEUE || ata_tag_internal(tag);
} }
/* /*
...@@ -1653,7 +1653,7 @@ static inline void ata_qc_set_polling(struct ata_queued_cmd *qc) ...@@ -1653,7 +1653,7 @@ static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap, static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
unsigned int tag) unsigned int tag)
{ {
if (likely(ata_tag_valid(tag))) if (ata_tag_valid(tag))
return &ap->qcmd[tag]; return &ap->qcmd[tag];
return NULL; return NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment