Commit 8a8bc223 authored by Tejun Heo's avatar Tejun Heo Committed by Linus Torvalds

libata: revert convert-to-block-tagging patches

This patch reverts the following three commits which convert libata to
use block layer tagging.

 43a49cbd
 e013e13b
 2fca5ccf

Although using block layer tagging is the right direction, due to the
tight coupling among tag number, data structure allocation and
hardware command slot allocation, libata doesn't work correctly with
the current conversion.

The biggest problem is guaranteeing that tag 0 is always used for
non-NCQ commands.  Due to the way blk-tag is implemented and how SCSI
starts and finishes requests, such guarantee can't be made.  I'm not
sure whether this would actually break any low level driver but it
doesn't look like a good idea to break such assumption given the
frailty of ATA controllers.

So, for the time being, keep using the old dumb in-libata qc
allocation.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Jens Axobe <jens.axboe@oracle.com>
Cc: Jeff Garzik <jeff@garzik.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f7160c75
...@@ -1712,6 +1712,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, ...@@ -1712,6 +1712,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
else else
tag = 0; tag = 0;
if (test_and_set_bit(tag, &ap->qc_allocated))
BUG();
qc = __ata_qc_from_tag(ap, tag); qc = __ata_qc_from_tag(ap, tag);
qc->tag = tag; qc->tag = tag;
...@@ -4562,6 +4564,37 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words) ...@@ -4562,6 +4564,37 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
#endif /* __BIG_ENDIAN */ #endif /* __BIG_ENDIAN */
} }
/**
* ata_qc_new - Request an available ATA command, for queueing
* @ap: Port associated with device @dev
* @dev: Device from whom we request an available command structure
*
* LOCKING:
* None.
*/
static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
{
struct ata_queued_cmd *qc = NULL;
unsigned int i;
/* no command while frozen */
if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
return NULL;
/* the last tag is reserved for internal command. */
for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
if (!test_and_set_bit(i, &ap->qc_allocated)) {
qc = __ata_qc_from_tag(ap, i);
break;
}
if (qc)
qc->tag = i;
return qc;
}
/** /**
* ata_qc_new_init - Request an available ATA command, and initialize it * ata_qc_new_init - Request an available ATA command, and initialize it
* @dev: Device from whom we request an available command structure * @dev: Device from whom we request an available command structure
...@@ -4571,20 +4604,16 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words) ...@@ -4571,20 +4604,16 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
* None. * None.
*/ */
struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag) struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
{ {
struct ata_port *ap = dev->link->ap; struct ata_port *ap = dev->link->ap;
struct ata_queued_cmd *qc; struct ata_queued_cmd *qc;
if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) qc = ata_qc_new(ap);
return NULL;
qc = __ata_qc_from_tag(ap, tag);
if (qc) { if (qc) {
qc->scsicmd = NULL; qc->scsicmd = NULL;
qc->ap = ap; qc->ap = ap;
qc->dev = dev; qc->dev = dev;
qc->tag = tag;
ata_qc_reinit(qc); ata_qc_reinit(qc);
} }
...@@ -4592,6 +4621,31 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag) ...@@ -4592,6 +4621,31 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
return qc; return qc;
} }
/**
* ata_qc_free - free unused ata_queued_cmd
* @qc: Command to complete
*
* Designed to free unused ata_queued_cmd object
* in case something prevents using it.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
void ata_qc_free(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
unsigned int tag;
WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
qc->flags = 0;
tag = qc->tag;
if (likely(ata_tag_valid(tag))) {
qc->tag = ATA_TAG_POISON;
clear_bit(tag, &ap->qc_allocated);
}
}
void __ata_qc_complete(struct ata_queued_cmd *qc) void __ata_qc_complete(struct ata_queued_cmd *qc)
{ {
struct ata_port *ap = qc->ap; struct ata_port *ap = qc->ap;
......
...@@ -709,11 +709,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev, ...@@ -709,11 +709,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
{ {
struct ata_queued_cmd *qc; struct ata_queued_cmd *qc;
if (cmd->request->tag != -1) qc = ata_qc_new_init(dev);
qc = ata_qc_new_init(dev, cmd->request->tag);
else
qc = ata_qc_new_init(dev, 0);
if (qc) { if (qc) {
qc->scsicmd = cmd; qc->scsicmd = cmd;
qc->scsidone = done; qc->scsidone = done;
...@@ -1108,17 +1104,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, ...@@ -1108,17 +1104,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id)); depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id));
depth = min(ATA_MAX_QUEUE - 1, depth); depth = min(ATA_MAX_QUEUE - 1, depth);
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth);
/*
* If this device is behind a port multiplier, we have
* to share the tag map between all devices on that PMP.
* Set up the shared tag map here and we get automatic.
*/
if (dev->link->ap->pmp_link)
scsi_init_shared_tag_map(sdev->host, ATA_MAX_QUEUE - 1);
scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
scsi_activate_tcq(sdev, depth);
} }
return 0; return 0;
...@@ -1958,11 +1944,6 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) ...@@ -1958,11 +1944,6 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
hdr[1] |= (1 << 7); hdr[1] |= (1 << 7);
memcpy(rbuf, hdr, sizeof(hdr)); memcpy(rbuf, hdr, sizeof(hdr));
/* if ncq, set tags supported */
if (ata_id_has_ncq(args->id))
rbuf[7] |= (1 << 1);
memcpy(&rbuf[8], "ATA ", 8); memcpy(&rbuf[8], "ATA ", 8);
ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16); ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4); ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
......
...@@ -74,7 +74,7 @@ extern struct ata_link *ata_dev_phys_link(struct ata_device *dev); ...@@ -74,7 +74,7 @@ extern struct ata_link *ata_dev_phys_link(struct ata_device *dev);
extern void ata_force_cbl(struct ata_port *ap); extern void ata_force_cbl(struct ata_port *ap);
extern u64 ata_tf_to_lba(const struct ata_taskfile *tf); extern u64 ata_tf_to_lba(const struct ata_taskfile *tf);
extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf); extern u64 ata_tf_to_lba48(const struct ata_taskfile *tf);
extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag); extern struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev);
extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, extern int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
u64 block, u32 n_block, unsigned int tf_flags, u64 block, u32 n_block, unsigned int tf_flags,
unsigned int tag); unsigned int tag);
...@@ -103,6 +103,7 @@ extern int ata_dev_configure(struct ata_device *dev); ...@@ -103,6 +103,7 @@ extern int ata_dev_configure(struct ata_device *dev);
extern int sata_down_spd_limit(struct ata_link *link); extern int sata_down_spd_limit(struct ata_link *link);
extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel); extern int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel);
extern void ata_sg_clean(struct ata_queued_cmd *qc); extern void ata_sg_clean(struct ata_queued_cmd *qc);
extern void ata_qc_free(struct ata_queued_cmd *qc);
extern void ata_qc_issue(struct ata_queued_cmd *qc); extern void ata_qc_issue(struct ata_queued_cmd *qc);
extern void __ata_qc_complete(struct ata_queued_cmd *qc); extern void __ata_qc_complete(struct ata_queued_cmd *qc);
extern int atapi_check_dma(struct ata_queued_cmd *qc); extern int atapi_check_dma(struct ata_queued_cmd *qc);
...@@ -118,22 +119,6 @@ extern struct ata_port *ata_port_alloc(struct ata_host *host); ...@@ -118,22 +119,6 @@ extern struct ata_port *ata_port_alloc(struct ata_host *host);
extern void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy); extern void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy);
extern void ata_lpm_schedule(struct ata_port *ap, enum link_pm); extern void ata_lpm_schedule(struct ata_port *ap, enum link_pm);
/**
* ata_qc_free - free unused ata_queued_cmd
* @qc: Command to complete
*
* Designed to free unused ata_queued_cmd object
* in case something prevents using it.
*
* LOCKING:
* spin_lock_irqsave(host lock)
*/
static inline void ata_qc_free(struct ata_queued_cmd *qc)
{
qc->flags = 0;
qc->tag = ATA_TAG_POISON;
}
/* libata-acpi.c */ /* libata-acpi.c */
#ifdef CONFIG_ATA_ACPI #ifdef CONFIG_ATA_ACPI
extern void ata_acpi_associate_sata_port(struct ata_port *ap); extern void ata_acpi_associate_sata_port(struct ata_port *ap);
......
...@@ -698,6 +698,7 @@ struct ata_port { ...@@ -698,6 +698,7 @@ struct ata_port {
unsigned int cbl; /* cable type; ATA_CBL_xxx */ unsigned int cbl; /* cable type; ATA_CBL_xxx */
struct ata_queued_cmd qcmd[ATA_MAX_QUEUE]; struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
unsigned long qc_allocated;
unsigned int qc_active; unsigned int qc_active;
int nr_active_links; /* #links with active qcs */ int nr_active_links; /* #links with active qcs */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment