Commit 74a6d0f0 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bart/ide-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/bart/ide-2.6: (33 commits)
  ide-cd: remove dead dsc_overlap setting
  ide: push local_irq_{save,restore}() to do_identify()
  ide: remove superfluous local_irq_{save,restore}() from ide_dump_status()
  ide: move legacy ISA/VLB ports handling to ide-legacy.c (v2)
  ide: move Power Management support to ide-pm.c
  ide: use ATA_DMA_* defines in ide-dma-sff.c
  ide: checkpatch.pl fixes for ide-lib.c
  ide: remove inline tags from ide-probe.c
  ide: remove redundant code from ide_end_drive_cmd()
  ide: struct device - replace bus_id with dev_name(), dev_set_name()
  ide: rework handling of serialized ports (v2)
  cy82c693: remove superfluous ide_cy82c693 chipset type
  trm290: add IDE_HFLAG_TRM290 host flag
  ide: add ->max_sectors field to struct ide_port_info
  rz1000: apply chipset quirks early (v2)
  ide: always set nIEN on idle devices
  ide: fix ->quirk_list checking in ide_do_request()
  gayle: set IDE_HFLAG_SERIALIZE explictly
  cmd64x: set IDE_HFLAG_SERIALIZE explictly for CMD646
  ali14xx: doesn't use shared IRQs
  ...
parents 14eeee88 519d6808
...@@ -62,6 +62,9 @@ config IDE_TIMINGS ...@@ -62,6 +62,9 @@ config IDE_TIMINGS
config IDE_ATAPI config IDE_ATAPI
bool bool
config IDE_LEGACY
bool
config BLK_DEV_IDE_SATA config BLK_DEV_IDE_SATA
bool "Support for SATA (deprecated; conflicts with libata SATA driver)" bool "Support for SATA (deprecated; conflicts with libata SATA driver)"
default n default n
...@@ -856,6 +859,7 @@ config BLK_DEV_4DRIVES ...@@ -856,6 +859,7 @@ config BLK_DEV_4DRIVES
config BLK_DEV_ALI14XX config BLK_DEV_ALI14XX
tristate "ALI M14xx support" tristate "ALI M14xx support"
select IDE_TIMINGS select IDE_TIMINGS
select IDE_LEGACY
help help
This driver is enabled at runtime using the "ali14xx.probe" kernel This driver is enabled at runtime using the "ali14xx.probe" kernel
boot parameter. It enables support for the secondary IDE interface boot parameter. It enables support for the secondary IDE interface
...@@ -866,6 +870,7 @@ config BLK_DEV_ALI14XX ...@@ -866,6 +870,7 @@ config BLK_DEV_ALI14XX
config BLK_DEV_DTC2278 config BLK_DEV_DTC2278
tristate "DTC-2278 support" tristate "DTC-2278 support"
select IDE_LEGACY
help help
This driver is enabled at runtime using the "dtc2278.probe" kernel This driver is enabled at runtime using the "dtc2278.probe" kernel
boot parameter. It enables support for the secondary IDE interface boot parameter. It enables support for the secondary IDE interface
...@@ -876,6 +881,7 @@ config BLK_DEV_DTC2278 ...@@ -876,6 +881,7 @@ config BLK_DEV_DTC2278
config BLK_DEV_HT6560B config BLK_DEV_HT6560B
tristate "Holtek HT6560B support" tristate "Holtek HT6560B support"
select IDE_TIMINGS select IDE_TIMINGS
select IDE_LEGACY
help help
This driver is enabled at runtime using the "ht6560b.probe" kernel This driver is enabled at runtime using the "ht6560b.probe" kernel
boot parameter. It enables support for the secondary IDE interface boot parameter. It enables support for the secondary IDE interface
...@@ -886,6 +892,7 @@ config BLK_DEV_HT6560B ...@@ -886,6 +892,7 @@ config BLK_DEV_HT6560B
config BLK_DEV_QD65XX config BLK_DEV_QD65XX
tristate "QDI QD65xx support" tristate "QDI QD65xx support"
select IDE_TIMINGS select IDE_TIMINGS
select IDE_LEGACY
help help
This driver is enabled at runtime using the "qd65xx.probe" kernel This driver is enabled at runtime using the "qd65xx.probe" kernel
boot parameter. It permits faster I/O speeds to be set. See the boot parameter. It permits faster I/O speeds to be set. See the
...@@ -894,6 +901,7 @@ config BLK_DEV_QD65XX ...@@ -894,6 +901,7 @@ config BLK_DEV_QD65XX
config BLK_DEV_UMC8672 config BLK_DEV_UMC8672
tristate "UMC-8672 support" tristate "UMC-8672 support"
select IDE_LEGACY
help help
This driver is enabled at runtime using the "umc8672.probe" kernel This driver is enabled at runtime using the "umc8672.probe" kernel
boot parameter. It enables support for the secondary IDE interface boot parameter. It enables support for the secondary IDE interface
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
EXTRA_CFLAGS += -Idrivers/ide EXTRA_CFLAGS += -Idrivers/ide
ide-core-y += ide.o ide-ioctls.o ide-io.o ide-iops.o ide-lib.o ide-probe.o \ ide-core-y += ide.o ide-ioctls.o ide-io.o ide-iops.o ide-lib.o ide-probe.o \
ide-taskfile.o ide-park.o ide-pio-blacklist.o ide-taskfile.o ide-pm.o ide-park.o ide-pio-blacklist.o
# core IDE code # core IDE code
ide-core-$(CONFIG_IDE_TIMINGS) += ide-timings.o ide-core-$(CONFIG_IDE_TIMINGS) += ide-timings.o
...@@ -15,6 +15,7 @@ ide-core-$(CONFIG_BLK_DEV_IDEDMA) += ide-dma.o ...@@ -15,6 +15,7 @@ ide-core-$(CONFIG_BLK_DEV_IDEDMA) += ide-dma.o
ide-core-$(CONFIG_BLK_DEV_IDEDMA_SFF) += ide-dma-sff.o ide-core-$(CONFIG_BLK_DEV_IDEDMA_SFF) += ide-dma-sff.o
ide-core-$(CONFIG_IDE_PROC_FS) += ide-proc.o ide-core-$(CONFIG_IDE_PROC_FS) += ide-proc.o
ide-core-$(CONFIG_BLK_DEV_IDEACPI) += ide-acpi.o ide-core-$(CONFIG_BLK_DEV_IDEACPI) += ide-acpi.o
ide-core-$(CONFIG_IDE_LEGACY) += ide-legacy.o
obj-$(CONFIG_IDE) += ide-core.o obj-$(CONFIG_IDE) += ide-core.o
......
...@@ -424,10 +424,10 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = { ...@@ -424,10 +424,10 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
.name = DRV_NAME, .name = DRV_NAME,
.init_chipset = init_chipset_cmd64x, .init_chipset = init_chipset_cmd64x,
.enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}}, .enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
.chipset = ide_cmd646,
.port_ops = &cmd64x_port_ops, .port_ops = &cmd64x_port_ops,
.dma_ops = &cmd648_dma_ops, .dma_ops = &cmd648_dma_ops,
.host_flags = IDE_HFLAG_ABUSE_PREFETCH, .host_flags = IDE_HFLAG_SERIALIZE |
IDE_HFLAG_ABUSE_PREFETCH,
.pio_mask = ATA_PIO5, .pio_mask = ATA_PIO5,
.mwdma_mask = ATA_MWDMA2, .mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA2, .udma_mask = ATA_UDMA2,
......
...@@ -292,7 +292,6 @@ static const struct ide_port_info cy82c693_chipset __devinitdata = { ...@@ -292,7 +292,6 @@ static const struct ide_port_info cy82c693_chipset __devinitdata = {
.name = DRV_NAME, .name = DRV_NAME,
.init_iops = init_iops_cy82c693, .init_iops = init_iops_cy82c693,
.port_ops = &cy82c693_port_ops, .port_ops = &cy82c693_port_ops,
.chipset = ide_cy82c693,
.host_flags = IDE_HFLAG_SINGLE, .host_flags = IDE_HFLAG_SINGLE,
.pio_mask = ATA_PIO4, .pio_mask = ATA_PIO4,
.swdma_mask = ATA_SWDMA2, .swdma_mask = ATA_SWDMA2,
......
...@@ -117,6 +117,10 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base, ...@@ -117,6 +117,10 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
hw->chipset = ide_generic; hw->chipset = ide_generic;
} }
static const struct ide_port_info gayle_port_info = {
.host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_NO_DMA,
};
/* /*
* Probe for a Gayle IDE interface (and optionally for an IDE doubler) * Probe for a Gayle IDE interface (and optionally for an IDE doubler)
*/ */
...@@ -178,7 +182,7 @@ static int __init gayle_init(void) ...@@ -178,7 +182,7 @@ static int __init gayle_init(void)
hws[i] = &hw[i]; hws[i] = &hw[i];
} }
rc = ide_host_add(NULL, hws, NULL); rc = ide_host_add(&gayle_port_info, hws, NULL);
if (rc) if (rc)
release_mem_region(res_start, res_n); release_mem_region(res_start, res_n);
......
...@@ -135,7 +135,6 @@ ...@@ -135,7 +135,6 @@
/* various tuning parameters */ /* various tuning parameters */
#define HPT_RESET_STATE_ENGINE #define HPT_RESET_STATE_ENGINE
#undef HPT_DELAY_INTERRUPT #undef HPT_DELAY_INTERRUPT
#define HPT_SERIALIZE_IO 0
static const char *quirk_drives[] = { static const char *quirk_drives[] = {
"QUANTUM FIREBALLlct08 08", "QUANTUM FIREBALLlct08 08",
...@@ -1288,7 +1287,6 @@ static u8 hpt3xx_cable_detect(ide_hwif_t *hwif) ...@@ -1288,7 +1287,6 @@ static u8 hpt3xx_cable_detect(ide_hwif_t *hwif)
static void __devinit init_hwif_hpt366(ide_hwif_t *hwif) static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
{ {
struct hpt_info *info = hpt3xx_get_info(hwif->dev); struct hpt_info *info = hpt3xx_get_info(hwif->dev);
int serialize = HPT_SERIALIZE_IO;
u8 chip_type = info->chip_type; u8 chip_type = info->chip_type;
/* Cache the channel's MISC. control registers' offset */ /* Cache the channel's MISC. control registers' offset */
...@@ -1305,13 +1303,9 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif) ...@@ -1305,13 +1303,9 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
* Clock is shared between the channels, * Clock is shared between the channels,
* so we'll have to serialize them... :-( * so we'll have to serialize them... :-(
*/ */
serialize = 1; hwif->host->host_flags |= IDE_HFLAG_SERIALIZE;
hwif->rw_disk = &hpt3xxn_rw_disk; hwif->rw_disk = &hpt3xxn_rw_disk;
} }
/* Serialize access to this device if needed */
if (serialize && hwif->mate)
hwif->serialized = hwif->mate->serialized = 1;
} }
static int __devinit init_dma_hpt366(ide_hwif_t *hwif, static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
......
...@@ -615,10 +615,10 @@ void ide_acpi_push_timing(ide_hwif_t *hwif) ...@@ -615,10 +615,10 @@ void ide_acpi_push_timing(ide_hwif_t *hwif)
in_params[0].buffer.length = sizeof(struct GTM_buffer); in_params[0].buffer.length = sizeof(struct GTM_buffer);
in_params[0].buffer.pointer = (u8 *)&hwif->acpidata->gtm; in_params[0].buffer.pointer = (u8 *)&hwif->acpidata->gtm;
in_params[1].type = ACPI_TYPE_BUFFER; in_params[1].type = ACPI_TYPE_BUFFER;
in_params[1].buffer.length = sizeof(ATA_ID_WORDS * 2); in_params[1].buffer.length = ATA_ID_WORDS * 2;
in_params[1].buffer.pointer = (u8 *)&master->idbuff; in_params[1].buffer.pointer = (u8 *)&master->idbuff;
in_params[2].type = ACPI_TYPE_BUFFER; in_params[2].type = ACPI_TYPE_BUFFER;
in_params[2].buffer.length = sizeof(ATA_ID_WORDS * 2); in_params[2].buffer.length = ATA_ID_WORDS * 2;
in_params[2].buffer.pointer = (u8 *)&slave->idbuff; in_params[2].buffer.pointer = (u8 *)&slave->idbuff;
/* Output buffer: _STM has no output */ /* Output buffer: _STM has no output */
......
...@@ -262,7 +262,6 @@ static void cdrom_end_request(ide_drive_t *drive, int uptodate) ...@@ -262,7 +262,6 @@ static void cdrom_end_request(ide_drive_t *drive, int uptodate)
struct request *failed = (struct request *) rq->buffer; struct request *failed = (struct request *) rq->buffer;
struct cdrom_info *info = drive->driver_data; struct cdrom_info *info = drive->driver_data;
void *sense = &info->sense_data; void *sense = &info->sense_data;
unsigned long flags;
if (failed) { if (failed) {
if (failed->sense) { if (failed->sense) {
...@@ -278,11 +277,9 @@ static void cdrom_end_request(ide_drive_t *drive, int uptodate) ...@@ -278,11 +277,9 @@ static void cdrom_end_request(ide_drive_t *drive, int uptodate)
failed->hard_nr_sectors)) failed->hard_nr_sectors))
BUG(); BUG();
} else { } else {
spin_lock_irqsave(&ide_lock, flags); if (blk_end_request(failed, -EIO,
if (__blk_end_request(failed, -EIO, failed->data_len))
failed->data_len))
BUG(); BUG();
spin_unlock_irqrestore(&ide_lock, flags);
} }
} else } else
cdrom_analyze_sense_data(drive, NULL, sense); cdrom_analyze_sense_data(drive, NULL, sense);
...@@ -317,7 +314,8 @@ static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 st) ...@@ -317,7 +314,8 @@ static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 st)
static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
{ {
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
struct request *rq = hwif->hwgroup->rq; ide_hwgroup_t *hwgroup = hwif->hwgroup;
struct request *rq = hwgroup->rq;
int stat, err, sense_key; int stat, err, sense_key;
/* check for errors */ /* check for errors */
...@@ -426,16 +424,17 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) ...@@ -426,16 +424,17 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
if (time_after(jiffies, info->write_timeout)) if (time_after(jiffies, info->write_timeout))
do_end_request = 1; do_end_request = 1;
else { else {
struct request_queue *q = drive->queue;
unsigned long flags; unsigned long flags;
/* /*
* take a breather relying on the unplug * take a breather relying on the unplug
* timer to kick us again * timer to kick us again
*/ */
spin_lock_irqsave(&ide_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
blk_plug_device(drive->queue); blk_plug_device(q);
spin_unlock_irqrestore(&ide_lock, spin_unlock_irqrestore(q->queue_lock, flags);
flags);
return 1; return 1;
} }
} }
...@@ -504,12 +503,14 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret) ...@@ -504,12 +503,14 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
end_request: end_request:
if (stat & ATA_ERR) { if (stat & ATA_ERR) {
struct request_queue *q = drive->queue;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&ide_lock, flags); spin_lock_irqsave(q->queue_lock, flags);
blkdev_dequeue_request(rq); blkdev_dequeue_request(rq);
HWGROUP(drive)->rq = NULL; spin_unlock_irqrestore(q->queue_lock, flags);
spin_unlock_irqrestore(&ide_lock, flags);
hwgroup->rq = NULL;
cdrom_queue_request_sense(drive, rq->sense, rq); cdrom_queue_request_sense(drive, rq->sense, rq);
} else } else
...@@ -773,52 +774,6 @@ static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive) ...@@ -773,52 +774,6 @@ static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive)
return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr); return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr);
} }
#define IDECD_SEEK_THRESHOLD (1000) /* 1000 blocks */
#define IDECD_SEEK_TIMER (5 * WAIT_MIN_SLEEP) /* 100 ms */
#define IDECD_SEEK_TIMEOUT (2 * WAIT_CMD) /* 20 sec */
static ide_startstop_t cdrom_seek_intr(ide_drive_t *drive)
{
struct cdrom_info *info = drive->driver_data;
int stat;
static int retry = 10;
ide_debug_log(IDE_DBG_FUNC, "Call %s\n", __func__);
if (cdrom_decode_status(drive, 0, &stat))
return ide_stopped;
drive->atapi_flags |= IDE_AFLAG_SEEKING;
if (retry && time_after(jiffies, info->start_seek + IDECD_SEEK_TIMER)) {
if (--retry == 0)
drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
}
return ide_stopped;
}
static void ide_cd_prepare_seek_request(ide_drive_t *drive, struct request *rq)
{
sector_t frame = rq->sector;
ide_debug_log(IDE_DBG_FUNC, "Call %s\n", __func__);
sector_div(frame, queue_hardsect_size(drive->queue) >> SECTOR_BITS);
memset(rq->cmd, 0, BLK_MAX_CDB);
rq->cmd[0] = GPCMD_SEEK;
put_unaligned(cpu_to_be32(frame), (unsigned int *) &rq->cmd[2]);
rq->timeout = ATAPI_WAIT_PC;
}
static ide_startstop_t cdrom_start_seek_continuation(ide_drive_t *drive)
{
struct request *rq = drive->hwif->hwgroup->rq;
return cdrom_transfer_packet_command(drive, rq, &cdrom_seek_intr);
}
/* /*
* Fix up a possibly partially-processed request so that we can start it over * Fix up a possibly partially-processed request so that we can start it over
* entirely, or even put it back on the request queue. * entirely, or even put it back on the request queue.
...@@ -950,7 +905,8 @@ static int cdrom_newpc_intr_dummy_cb(struct request *rq) ...@@ -950,7 +905,8 @@ static int cdrom_newpc_intr_dummy_cb(struct request *rq)
static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
{ {
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
struct request *rq = HWGROUP(drive)->rq; ide_hwgroup_t *hwgroup = hwif->hwgroup;
struct request *rq = hwgroup->rq;
xfer_func_t *xferfunc; xfer_func_t *xferfunc;
ide_expiry_t *expiry = NULL; ide_expiry_t *expiry = NULL;
int dma_error = 0, dma, stat, thislen, uptodate = 0; int dma_error = 0, dma, stat, thislen, uptodate = 0;
...@@ -1148,17 +1104,15 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) ...@@ -1148,17 +1104,15 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
end_request: end_request:
if (blk_pc_request(rq)) { if (blk_pc_request(rq)) {
unsigned long flags;
unsigned int dlen = rq->data_len; unsigned int dlen = rq->data_len;
if (dma) if (dma)
rq->data_len = 0; rq->data_len = 0;
spin_lock_irqsave(&ide_lock, flags); if (blk_end_request(rq, 0, dlen))
if (__blk_end_request(rq, 0, dlen))
BUG(); BUG();
HWGROUP(drive)->rq = NULL;
spin_unlock_irqrestore(&ide_lock, flags); hwgroup->rq = NULL;
} else { } else {
if (!uptodate) if (!uptodate)
rq->cmd_flags |= REQ_FAILED; rq->cmd_flags |= REQ_FAILED;
...@@ -1260,7 +1214,6 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq) ...@@ -1260,7 +1214,6 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
sector_t block) sector_t block)
{ {
struct cdrom_info *info = drive->driver_data;
ide_handler_t *fn; ide_handler_t *fn;
int xferlen; int xferlen;
...@@ -1270,44 +1223,14 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, ...@@ -1270,44 +1223,14 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
(unsigned long long)block); (unsigned long long)block);
if (blk_fs_request(rq)) { if (blk_fs_request(rq)) {
if (drive->atapi_flags & IDE_AFLAG_SEEKING) { xferlen = 32768;
ide_hwif_t *hwif = drive->hwif; fn = cdrom_start_rw_cont;
unsigned long elapsed = jiffies - info->start_seek;
int stat = hwif->tp_ops->read_status(hwif);
if ((stat & ATA_DSC) != ATA_DSC) {
if (elapsed < IDECD_SEEK_TIMEOUT) {
ide_stall_queue(drive,
IDECD_SEEK_TIMER);
return ide_stopped;
}
printk(KERN_ERR PFX "%s: DSC timeout\n",
drive->name);
}
drive->atapi_flags &= ~IDE_AFLAG_SEEKING;
}
if (rq_data_dir(rq) == READ &&
IDE_LARGE_SEEK(info->last_block, block,
IDECD_SEEK_THRESHOLD) &&
(drive->dev_flags & IDE_DFLAG_DSC_OVERLAP)) {
xferlen = 0;
fn = cdrom_start_seek_continuation;
drive->dma = 0; if (cdrom_start_rw(drive, rq) == ide_stopped)
info->start_seek = jiffies; return ide_stopped;
ide_cd_prepare_seek_request(drive, rq);
} else {
xferlen = 32768;
fn = cdrom_start_rw_cont;
if (cdrom_start_rw(drive, rq) == ide_stopped)
return ide_stopped;
if (ide_cd_prepare_rw_request(drive, rq) == ide_stopped) if (ide_cd_prepare_rw_request(drive, rq) == ide_stopped)
return ide_stopped; return ide_stopped;
}
info->last_block = block;
} else if (blk_sense_request(rq) || blk_pc_request(rq) || } else if (blk_sense_request(rq) || blk_pc_request(rq) ||
rq->cmd_type == REQ_TYPE_ATA_PC) { rq->cmd_type == REQ_TYPE_ATA_PC) {
xferlen = rq->data_len; xferlen = rq->data_len;
...@@ -1908,13 +1831,6 @@ static ide_proc_entry_t idecd_proc[] = { ...@@ -1908,13 +1831,6 @@ static ide_proc_entry_t idecd_proc[] = {
{ NULL, 0, NULL, NULL } { NULL, 0, NULL, NULL }
}; };
ide_devset_rw_flag(dsc_overlap, IDE_DFLAG_DSC_OVERLAP);
static const struct ide_proc_devset idecd_settings[] = {
IDE_PROC_DEVSET(dsc_overlap, 0, 1),
{ 0 },
};
static ide_proc_entry_t *ide_cd_proc_entries(ide_drive_t *drive) static ide_proc_entry_t *ide_cd_proc_entries(ide_drive_t *drive)
{ {
return idecd_proc; return idecd_proc;
...@@ -1922,7 +1838,7 @@ static ide_proc_entry_t *ide_cd_proc_entries(ide_drive_t *drive) ...@@ -1922,7 +1838,7 @@ static ide_proc_entry_t *ide_cd_proc_entries(ide_drive_t *drive)
static const struct ide_proc_devset *ide_cd_proc_devsets(ide_drive_t *drive) static const struct ide_proc_devset *ide_cd_proc_devsets(ide_drive_t *drive)
{ {
return idecd_settings; return NULL;
} }
#endif #endif
...@@ -2022,11 +1938,6 @@ static int ide_cdrom_setup(ide_drive_t *drive) ...@@ -2022,11 +1938,6 @@ static int ide_cdrom_setup(ide_drive_t *drive)
/* set correct block size */ /* set correct block size */
blk_queue_hardsect_size(drive->queue, CD_FRAMESIZE); blk_queue_hardsect_size(drive->queue, CD_FRAMESIZE);
if (drive->next != drive)
drive->dev_flags |= IDE_DFLAG_DSC_OVERLAP;
else
drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
if (ide_cdrom_register(drive, nslots)) { if (ide_cdrom_register(drive, nslots)) {
printk(KERN_ERR PFX "%s: %s failed to register device with the" printk(KERN_ERR PFX "%s: %s failed to register device with the"
" cdrom driver.\n", drive->name, __func__); " cdrom driver.\n", drive->name, __func__);
...@@ -2063,7 +1974,6 @@ static void ide_cd_release(struct kref *kref) ...@@ -2063,7 +1974,6 @@ static void ide_cd_release(struct kref *kref)
kfree(info->toc); kfree(info->toc);
if (devinfo->handle == drive) if (devinfo->handle == drive)
unregister_cdrom(devinfo); unregister_cdrom(devinfo);
drive->dev_flags &= ~IDE_DFLAG_DSC_OVERLAP;
drive->driver_data = NULL; drive->driver_data = NULL;
blk_queue_prep_rq(drive->queue, NULL); blk_queue_prep_rq(drive->queue, NULL);
g->private_data = NULL; g->private_data = NULL;
......
...@@ -88,8 +88,6 @@ struct cdrom_info { ...@@ -88,8 +88,6 @@ struct cdrom_info {
struct request_sense sense_data; struct request_sense sense_data;
struct request request_sense_request; struct request request_sense_request;
unsigned long last_block;
unsigned long start_seek;
u8 max_speed; /* Max speed of the drive. */ u8 max_speed; /* Max speed of the drive. */
u8 current_speed; /* Current speed of the drive. */ u8 current_speed; /* Current speed of the drive. */
......
...@@ -98,10 +98,10 @@ int ide_build_dmatable(ide_drive_t *drive, struct request *rq) ...@@ -98,10 +98,10 @@ int ide_build_dmatable(ide_drive_t *drive, struct request *rq)
{ {
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
__le32 *table = (__le32 *)hwif->dmatable_cpu; __le32 *table = (__le32 *)hwif->dmatable_cpu;
unsigned int is_trm290 = (hwif->chipset == ide_trm290) ? 1 : 0;
unsigned int count = 0; unsigned int count = 0;
int i; int i;
struct scatterlist *sg; struct scatterlist *sg;
u8 is_trm290 = !!(hwif->host_flags & IDE_HFLAG_TRM290);
hwif->sg_nents = ide_build_sglist(drive, rq); hwif->sg_nents = ide_build_sglist(drive, rq);
if (hwif->sg_nents == 0) if (hwif->sg_nents == 0)
...@@ -176,15 +176,10 @@ int ide_dma_setup(ide_drive_t *drive) ...@@ -176,15 +176,10 @@ int ide_dma_setup(ide_drive_t *drive)
{ {
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
struct request *rq = hwif->hwgroup->rq; struct request *rq = hwif->hwgroup->rq;
unsigned int reading; unsigned int reading = rq_data_dir(rq) ? 0 : ATA_DMA_WR;
u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
u8 dma_stat; u8 dma_stat;
if (rq_data_dir(rq))
reading = 0;
else
reading = 1 << 3;
/* fall back to pio! */ /* fall back to pio! */
if (!ide_build_dmatable(drive, rq)) { if (!ide_build_dmatable(drive, rq)) {
ide_map_sg(drive, rq); ide_map_sg(drive, rq);
...@@ -209,10 +204,11 @@ int ide_dma_setup(ide_drive_t *drive) ...@@ -209,10 +204,11 @@ int ide_dma_setup(ide_drive_t *drive)
/* clear INTR & ERROR flags */ /* clear INTR & ERROR flags */
if (mmio) if (mmio)
writeb(dma_stat | 6, writeb(dma_stat | ATA_DMA_ERR | ATA_DMA_INTR,
(void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
else else
outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS); outb(dma_stat | ATA_DMA_ERR | ATA_DMA_INTR,
hwif->dma_base + ATA_DMA_STATUS);
drive->waiting_for_dma = 1; drive->waiting_for_dma = 1;
return 0; return 0;
...@@ -246,14 +242,13 @@ static int dma_timer_expiry(ide_drive_t *drive) ...@@ -246,14 +242,13 @@ static int dma_timer_expiry(ide_drive_t *drive)
hwif->hwgroup->expiry = NULL; /* one free ride for now */ hwif->hwgroup->expiry = NULL; /* one free ride for now */
/* 1 dmaing, 2 error, 4 intr */ if (dma_stat & ATA_DMA_ERR) /* ERROR */
if (dma_stat & 2) /* ERROR */
return -1; return -1;
if (dma_stat & 1) /* DMAing */ if (dma_stat & ATA_DMA_ACTIVE) /* DMAing */
return WAIT_CMD; return WAIT_CMD;
if (dma_stat & 4) /* Got an Interrupt */ if (dma_stat & ATA_DMA_INTR) /* Got an Interrupt */
return WAIT_CMD; return WAIT_CMD;
return 0; /* Status is unknown -- reset the bus */ return 0; /* Status is unknown -- reset the bus */
...@@ -279,12 +274,11 @@ void ide_dma_start(ide_drive_t *drive) ...@@ -279,12 +274,11 @@ void ide_dma_start(ide_drive_t *drive)
*/ */
if (hwif->host_flags & IDE_HFLAG_MMIO) { if (hwif->host_flags & IDE_HFLAG_MMIO) {
dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
/* start DMA */ writeb(dma_cmd | ATA_DMA_START,
writeb(dma_cmd | 1,
(void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
} else { } else {
dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
outb(dma_cmd | 1, hwif->dma_base + ATA_DMA_CMD); outb(dma_cmd | ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD);
} }
wmb(); wmb();
...@@ -296,19 +290,18 @@ int ide_dma_end(ide_drive_t *drive) ...@@ -296,19 +290,18 @@ int ide_dma_end(ide_drive_t *drive)
{ {
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
u8 dma_stat = 0, dma_cmd = 0; u8 dma_stat = 0, dma_cmd = 0, mask;
drive->waiting_for_dma = 0; drive->waiting_for_dma = 0;
/* stop DMA */
if (mmio) { if (mmio) {
/* get DMA command mode */
dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
/* stop DMA */ writeb(dma_cmd & ~ATA_DMA_START,
writeb(dma_cmd & ~1,
(void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); (void __iomem *)(hwif->dma_base + ATA_DMA_CMD));
} else { } else {
dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD);
outb(dma_cmd & ~1, hwif->dma_base + ATA_DMA_CMD); outb(dma_cmd & ~ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD);
} }
/* get DMA status */ /* get DMA status */
...@@ -316,16 +309,21 @@ int ide_dma_end(ide_drive_t *drive) ...@@ -316,16 +309,21 @@ int ide_dma_end(ide_drive_t *drive)
if (mmio) if (mmio)
/* clear the INTR & ERROR bits */ /* clear the INTR & ERROR bits */
writeb(dma_stat | 6, writeb(dma_stat | ATA_DMA_ERR | ATA_DMA_INTR,
(void __iomem *)(hwif->dma_base + ATA_DMA_STATUS)); (void __iomem *)(hwif->dma_base + ATA_DMA_STATUS));
else else
outb(dma_stat | 6, hwif->dma_base + ATA_DMA_STATUS); outb(dma_stat | ATA_DMA_ERR | ATA_DMA_INTR,
hwif->dma_base + ATA_DMA_STATUS);
/* purge DMA mappings */ /* purge DMA mappings */
ide_destroy_dmatable(drive); ide_destroy_dmatable(drive);
/* verify good DMA status */
wmb(); wmb();
return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
/* verify good DMA status */
mask = ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR;
if ((dma_stat & mask) != ATA_DMA_INTR)
return 0x10 | dma_stat;
return 0;
} }
EXPORT_SYMBOL_GPL(ide_dma_end); EXPORT_SYMBOL_GPL(ide_dma_end);
...@@ -335,11 +333,7 @@ int ide_dma_test_irq(ide_drive_t *drive) ...@@ -335,11 +333,7 @@ int ide_dma_test_irq(ide_drive_t *drive)
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif); u8 dma_stat = hwif->tp_ops->read_sff_dma_status(hwif);
/* return 1 if INTR asserted */ return (dma_stat & ATA_DMA_INTR) ? 1 : 0;
if ((dma_stat & 4) == 4)
return 1;
return 0;
} }
EXPORT_SYMBOL_GPL(ide_dma_test_irq); EXPORT_SYMBOL_GPL(ide_dma_test_irq);
......
...@@ -84,11 +84,11 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq, ...@@ -84,11 +84,11 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
ide_dma_on(drive); ide_dma_on(drive);
} }
if (!__blk_end_request(rq, error, nr_bytes)) { if (!blk_end_request(rq, error, nr_bytes))
if (dequeue)
HWGROUP(drive)->rq = NULL;
ret = 0; ret = 0;
}
if (ret == 0 && dequeue)
drive->hwif->hwgroup->rq = NULL;
return ret; return ret;
} }
...@@ -107,16 +107,7 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq, ...@@ -107,16 +107,7 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq,
int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
{ {
unsigned int nr_bytes = nr_sectors << 9; unsigned int nr_bytes = nr_sectors << 9;
struct request *rq; struct request *rq = drive->hwif->hwgroup->rq;
unsigned long flags;
int ret = 1;
/*
* room for locking improvements here, the calls below don't
* need the queue lock held at all
*/
spin_lock_irqsave(&ide_lock, flags);
rq = HWGROUP(drive)->rq;
if (!nr_bytes) { if (!nr_bytes) {
if (blk_pc_request(rq)) if (blk_pc_request(rq))
...@@ -125,105 +116,10 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) ...@@ -125,105 +116,10 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
nr_bytes = rq->hard_cur_sectors << 9; nr_bytes = rq->hard_cur_sectors << 9;
} }
ret = __ide_end_request(drive, rq, uptodate, nr_bytes, 1); return __ide_end_request(drive, rq, uptodate, nr_bytes, 1);
spin_unlock_irqrestore(&ide_lock, flags);
return ret;
} }
EXPORT_SYMBOL(ide_end_request); EXPORT_SYMBOL(ide_end_request);
static void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
{
struct request_pm_state *pm = rq->data;
#ifdef DEBUG_PM
printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
drive->name, pm->pm_step);
#endif
if (drive->media != ide_disk)
return;
switch (pm->pm_step) {
case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
if (pm->pm_state == PM_EVENT_FREEZE)
pm->pm_step = IDE_PM_COMPLETED;
else
pm->pm_step = IDE_PM_STANDBY;
break;
case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
pm->pm_step = IDE_PM_COMPLETED;
break;
case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
pm->pm_step = IDE_PM_IDLE;
break;
case IDE_PM_IDLE: /* Resume step 2 (idle)*/
pm->pm_step = IDE_PM_RESTORE_DMA;
break;
}
}
static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
{
struct request_pm_state *pm = rq->data;
ide_task_t *args = rq->special;
memset(args, 0, sizeof(*args));
switch (pm->pm_step) {
case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
if (drive->media != ide_disk)
break;
/* Not supported? Switch to next step now. */
if (ata_id_flush_enabled(drive->id) == 0 ||
(drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
ide_complete_power_step(drive, rq);
return ide_stopped;
}
if (ata_id_flush_ext_enabled(drive->id))
args->tf.command = ATA_CMD_FLUSH_EXT;
else
args->tf.command = ATA_CMD_FLUSH;
goto out_do_tf;
case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
args->tf.command = ATA_CMD_STANDBYNOW1;
goto out_do_tf;
case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
ide_set_max_pio(drive);
/*
* skip IDE_PM_IDLE for ATAPI devices
*/
if (drive->media != ide_disk)
pm->pm_step = IDE_PM_RESTORE_DMA;
else
ide_complete_power_step(drive, rq);
return ide_stopped;
case IDE_PM_IDLE: /* Resume step 2 (idle) */
args->tf.command = ATA_CMD_IDLEIMMEDIATE;
goto out_do_tf;
case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
/*
* Right now, all we do is call ide_set_dma(drive),
* we could be smarter and check for current xfer_speed
* in struct drive etc...
*/
if (drive->hwif->dma_ops == NULL)
break;
/*
* TODO: respect IDE_DFLAG_USING_DMA
*/
ide_set_dma(drive);
break;
}
pm->pm_step = IDE_PM_COMPLETED;
return ide_stopped;
out_do_tf:
args->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
args->data_phase = TASKFILE_NO_DATA;
return do_rw_taskfile(drive, args);
}
/** /**
* ide_end_dequeued_request - complete an IDE I/O * ide_end_dequeued_request - complete an IDE I/O
* @drive: IDE device for the I/O * @drive: IDE device for the I/O
...@@ -242,48 +138,12 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request * ...@@ -242,48 +138,12 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
int uptodate, int nr_sectors) int uptodate, int nr_sectors)
{ {
unsigned long flags;
int ret;
spin_lock_irqsave(&ide_lock, flags);
BUG_ON(!blk_rq_started(rq)); BUG_ON(!blk_rq_started(rq));
ret = __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0);
spin_unlock_irqrestore(&ide_lock, flags);
return ret; return __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0);
} }
EXPORT_SYMBOL_GPL(ide_end_dequeued_request); EXPORT_SYMBOL_GPL(ide_end_dequeued_request);
/**
* ide_complete_pm_request - end the current Power Management request
* @drive: target drive
* @rq: request
*
* This function cleans up the current PM request and stops the queue
* if necessary.
*/
static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
{
unsigned long flags;
#ifdef DEBUG_PM
printk("%s: completing PM request, %s\n", drive->name,
blk_pm_suspend_request(rq) ? "suspend" : "resume");
#endif
spin_lock_irqsave(&ide_lock, flags);
if (blk_pm_suspend_request(rq)) {
blk_stop_queue(drive->queue);
} else {
drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
blk_start_queue(drive->queue);
}
HWGROUP(drive)->rq = NULL;
if (__blk_end_request(rq, 0, 0))
BUG();
spin_unlock_irqrestore(&ide_lock, flags);
}
/** /**
* ide_end_drive_cmd - end an explicit drive command * ide_end_drive_cmd - end an explicit drive command
* @drive: command * @drive: command
...@@ -300,19 +160,12 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) ...@@ -300,19 +160,12 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
{ {
unsigned long flags; ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
struct request *rq; struct request *rq = hwgroup->rq;
spin_lock_irqsave(&ide_lock, flags);
rq = HWGROUP(drive)->rq;
spin_unlock_irqrestore(&ide_lock, flags);
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
ide_task_t *task = (ide_task_t *)rq->special; ide_task_t *task = (ide_task_t *)rq->special;
if (rq->errors == 0)
rq->errors = !OK_STAT(stat, ATA_DRDY, BAD_STAT);
if (task) { if (task) {
struct ide_taskfile *tf = &task->tf; struct ide_taskfile *tf = &task->tf;
...@@ -333,15 +186,14 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) ...@@ -333,15 +186,14 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
return; return;
} }
spin_lock_irqsave(&ide_lock, flags); hwgroup->rq = NULL;
HWGROUP(drive)->rq = NULL;
rq->errors = err; rq->errors = err;
if (unlikely(__blk_end_request(rq, (rq->errors ? -EIO : 0),
blk_rq_bytes(rq)))) if (unlikely(blk_end_request(rq, (rq->errors ? -EIO : 0),
blk_rq_bytes(rq))))
BUG(); BUG();
spin_unlock_irqrestore(&ide_lock, flags);
} }
EXPORT_SYMBOL(ide_end_drive_cmd); EXPORT_SYMBOL(ide_end_drive_cmd);
static void ide_kill_rq(ide_drive_t *drive, struct request *rq) static void ide_kill_rq(ide_drive_t *drive, struct request *rq)
...@@ -720,40 +572,6 @@ static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq) ...@@ -720,40 +572,6 @@ static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
} }
} }
static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
{
struct request_pm_state *pm = rq->data;
if (blk_pm_suspend_request(rq) &&
pm->pm_step == IDE_PM_START_SUSPEND)
/* Mark drive blocked when starting the suspend sequence. */
drive->dev_flags |= IDE_DFLAG_BLOCKED;
else if (blk_pm_resume_request(rq) &&
pm->pm_step == IDE_PM_START_RESUME) {
/*
* The first thing we do on wakeup is to wait for BSY bit to
* go away (with a looong timeout) as a drive on this hwif may
* just be POSTing itself.
* We do that before even selecting as the "other" device on
* the bus may be broken enough to walk on our toes at this
* point.
*/
ide_hwif_t *hwif = drive->hwif;
int rc;
#ifdef DEBUG_PM
printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
#endif
rc = ide_wait_not_busy(hwif, 35000);
if (rc)
printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
SELECT_DRIVE(drive);
hwif->tp_ops->set_irq(hwif, 1);
rc = ide_wait_not_busy(hwif, 100000);
if (rc)
printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
}
}
/** /**
* start_request - start of I/O and command issuing for IDE * start_request - start of I/O and command issuing for IDE
* *
...@@ -927,7 +745,7 @@ static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup) ...@@ -927,7 +745,7 @@ static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup)
/* /*
* Issue a new request to a drive from hwgroup * Issue a new request to a drive from hwgroup
* Caller must have already done spin_lock_irqsave(&ide_lock, ..); * Caller must have already done spin_lock_irqsave(&hwgroup->lock, ..);
* *
* A hwgroup is a serialized group of IDE interfaces. Usually there is * A hwgroup is a serialized group of IDE interfaces. Usually there is
* exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640) * exactly one hwif (interface) per hwgroup, but buggy controllers (eg. CMD640)
...@@ -939,7 +757,7 @@ static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup) ...@@ -939,7 +757,7 @@ static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup)
* possibly along with many other devices. This is especially common in * possibly along with many other devices. This is especially common in
* PCI-based systems with off-board IDE controller cards. * PCI-based systems with off-board IDE controller cards.
* *
* The IDE driver uses the single global ide_lock spinlock to protect * The IDE driver uses a per-hwgroup spinlock to protect
* access to the request queues, and to protect the hwgroup->busy flag. * access to the request queues, and to protect the hwgroup->busy flag.
* *
* The first thread into the driver for a particular hwgroup sets the * The first thread into the driver for a particular hwgroup sets the
...@@ -955,7 +773,7 @@ static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup) ...@@ -955,7 +773,7 @@ static inline ide_drive_t *choose_drive (ide_hwgroup_t *hwgroup)
* will start the next request from the queue. If no more work remains, * will start the next request from the queue. If no more work remains,
* the driver will clear the hwgroup->busy flag and exit. * the driver will clear the hwgroup->busy flag and exit.
* *
* The ide_lock (spinlock) is used to protect all access to the * The per-hwgroup spinlock is used to protect all access to the
* hwgroup->busy flag, but is otherwise not needed for most processing in * hwgroup->busy flag, but is otherwise not needed for most processing in
* the driver. This makes the driver much more friendlier to shared IRQs * the driver. This makes the driver much more friendlier to shared IRQs
* than previous designs, while remaining 100% (?) SMP safe and capable. * than previous designs, while remaining 100% (?) SMP safe and capable.
...@@ -968,7 +786,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) ...@@ -968,7 +786,7 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
ide_startstop_t startstop; ide_startstop_t startstop;
int loops = 0; int loops = 0;
/* caller must own ide_lock */ /* caller must own hwgroup->lock */
BUG_ON(!irqs_disabled()); BUG_ON(!irqs_disabled());
while (!hwgroup->busy) { while (!hwgroup->busy) {
...@@ -1023,12 +841,12 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) ...@@ -1023,12 +841,12 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
} }
again: again:
hwif = HWIF(drive); hwif = HWIF(drive);
if (hwgroup->hwif->sharing_irq && hwif != hwgroup->hwif) { if (hwif != hwgroup->hwif) {
/* /*
* set nIEN for previous hwif, drives in the * set nIEN for previous hwif, drives in the
* quirk_list may not like intr setups/cleanups * quirk_list may not like intr setups/cleanups
*/ */
if (drive->quirk_list != 1) if (drive->quirk_list == 0)
hwif->tp_ops->set_irq(hwif, 0); hwif->tp_ops->set_irq(hwif, 0);
} }
hwgroup->hwif = hwif; hwgroup->hwif = hwif;
...@@ -1036,11 +854,6 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) ...@@ -1036,11 +854,6 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
drive->service_start = jiffies; drive->service_start = jiffies;
if (blk_queue_plugged(drive->queue)) {
printk(KERN_ERR "ide: huh? queue was plugged!\n");
break;
}
/* /*
* we know that the queue isn't empty, but this can happen * we know that the queue isn't empty, but this can happen
* if the q->prep_rq_fn() decides to kill a request * if the q->prep_rq_fn() decides to kill a request
...@@ -1090,11 +903,11 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq) ...@@ -1090,11 +903,11 @@ static void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
*/ */
if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq)
disable_irq_nosync(hwif->irq); disable_irq_nosync(hwif->irq);
spin_unlock(&ide_lock); spin_unlock(&hwgroup->lock);
local_irq_enable_in_hardirq(); local_irq_enable_in_hardirq();
/* allow other IRQs while we start this request */ /* allow other IRQs while we start this request */
startstop = start_request(drive, rq); startstop = start_request(drive, rq);
spin_lock_irq(&ide_lock); spin_lock_irq(&hwgroup->lock);
if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq) if (masked_irq != IDE_NO_IRQ && hwif->irq != masked_irq)
enable_irq(hwif->irq); enable_irq(hwif->irq);
if (startstop == ide_stopped) if (startstop == ide_stopped)
...@@ -1192,7 +1005,7 @@ void ide_timer_expiry (unsigned long data) ...@@ -1192,7 +1005,7 @@ void ide_timer_expiry (unsigned long data)
unsigned long flags; unsigned long flags;
unsigned long wait = -1; unsigned long wait = -1;
spin_lock_irqsave(&ide_lock, flags); spin_lock_irqsave(&hwgroup->lock, flags);
if (((handler = hwgroup->handler) == NULL) || if (((handler = hwgroup->handler) == NULL) ||
(hwgroup->req_gen != hwgroup->req_gen_timer)) { (hwgroup->req_gen != hwgroup->req_gen_timer)) {
...@@ -1225,7 +1038,7 @@ void ide_timer_expiry (unsigned long data) ...@@ -1225,7 +1038,7 @@ void ide_timer_expiry (unsigned long data)
hwgroup->timer.expires = jiffies + wait; hwgroup->timer.expires = jiffies + wait;
hwgroup->req_gen_timer = hwgroup->req_gen; hwgroup->req_gen_timer = hwgroup->req_gen;
add_timer(&hwgroup->timer); add_timer(&hwgroup->timer);
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&hwgroup->lock, flags);
return; return;
} }
} }
...@@ -1235,7 +1048,7 @@ void ide_timer_expiry (unsigned long data) ...@@ -1235,7 +1048,7 @@ void ide_timer_expiry (unsigned long data)
* the handler() function, which means we need to * the handler() function, which means we need to
* globally mask the specific IRQ: * globally mask the specific IRQ:
*/ */
spin_unlock(&ide_lock); spin_unlock(&hwgroup->lock);
hwif = HWIF(drive); hwif = HWIF(drive);
/* disable_irq_nosync ?? */ /* disable_irq_nosync ?? */
disable_irq(hwif->irq); disable_irq(hwif->irq);
...@@ -1259,14 +1072,14 @@ void ide_timer_expiry (unsigned long data) ...@@ -1259,14 +1072,14 @@ void ide_timer_expiry (unsigned long data)
hwif->tp_ops->read_status(hwif)); hwif->tp_ops->read_status(hwif));
} }
drive->service_time = jiffies - drive->service_start; drive->service_time = jiffies - drive->service_start;
spin_lock_irq(&ide_lock); spin_lock_irq(&hwgroup->lock);
enable_irq(hwif->irq); enable_irq(hwif->irq);
if (startstop == ide_stopped) if (startstop == ide_stopped)
hwgroup->busy = 0; hwgroup->busy = 0;
} }
} }
ide_do_request(hwgroup, IDE_NO_IRQ); ide_do_request(hwgroup, IDE_NO_IRQ);
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&hwgroup->lock, flags);
} }
/** /**
...@@ -1359,18 +1172,16 @@ irqreturn_t ide_intr (int irq, void *dev_id) ...@@ -1359,18 +1172,16 @@ irqreturn_t ide_intr (int irq, void *dev_id)
{ {
unsigned long flags; unsigned long flags;
ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id; ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)dev_id;
ide_hwif_t *hwif; ide_hwif_t *hwif = hwgroup->hwif;
ide_drive_t *drive; ide_drive_t *drive;
ide_handler_t *handler; ide_handler_t *handler;
ide_startstop_t startstop; ide_startstop_t startstop;
irqreturn_t irq_ret = IRQ_NONE;
spin_lock_irqsave(&ide_lock, flags); spin_lock_irqsave(&hwgroup->lock, flags);
hwif = hwgroup->hwif;
if (!ide_ack_intr(hwif)) { if (!ide_ack_intr(hwif))
spin_unlock_irqrestore(&ide_lock, flags); goto out;
return IRQ_NONE;
}
if ((handler = hwgroup->handler) == NULL || hwgroup->polling) { if ((handler = hwgroup->handler) == NULL || hwgroup->polling) {
/* /*
...@@ -1406,9 +1217,9 @@ irqreturn_t ide_intr (int irq, void *dev_id) ...@@ -1406,9 +1217,9 @@ irqreturn_t ide_intr (int irq, void *dev_id)
(void)hwif->tp_ops->read_status(hwif); (void)hwif->tp_ops->read_status(hwif);
#endif /* CONFIG_BLK_DEV_IDEPCI */ #endif /* CONFIG_BLK_DEV_IDEPCI */
} }
spin_unlock_irqrestore(&ide_lock, flags); goto out;
return IRQ_NONE;
} }
drive = hwgroup->drive; drive = hwgroup->drive;
if (!drive) { if (!drive) {
/* /*
...@@ -1417,10 +1228,10 @@ irqreturn_t ide_intr (int irq, void *dev_id) ...@@ -1417,10 +1228,10 @@ irqreturn_t ide_intr (int irq, void *dev_id)
* *
* [Note - this can occur if the drive is hot unplugged] * [Note - this can occur if the drive is hot unplugged]
*/ */
spin_unlock_irqrestore(&ide_lock, flags); goto out_handled;
return IRQ_HANDLED;
} }
if (!drive_is_ready(drive)) {
if (!drive_is_ready(drive))
/* /*
* This happens regularly when we share a PCI IRQ with * This happens regularly when we share a PCI IRQ with
* another device. Unfortunately, it can also happen * another device. Unfortunately, it can also happen
...@@ -1428,9 +1239,8 @@ irqreturn_t ide_intr (int irq, void *dev_id) ...@@ -1428,9 +1239,8 @@ irqreturn_t ide_intr (int irq, void *dev_id)
* their status register is up to date. Hopefully we have * their status register is up to date. Hopefully we have
* enough advance overhead that the latter isn't a problem. * enough advance overhead that the latter isn't a problem.
*/ */
spin_unlock_irqrestore(&ide_lock, flags); goto out;
return IRQ_NONE;
}
if (!hwgroup->busy) { if (!hwgroup->busy) {
hwgroup->busy = 1; /* paranoia */ hwgroup->busy = 1; /* paranoia */
printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name); printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name);
...@@ -1438,7 +1248,7 @@ irqreturn_t ide_intr (int irq, void *dev_id) ...@@ -1438,7 +1248,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
hwgroup->handler = NULL; hwgroup->handler = NULL;
hwgroup->req_gen++; hwgroup->req_gen++;
del_timer(&hwgroup->timer); del_timer(&hwgroup->timer);
spin_unlock(&ide_lock); spin_unlock(&hwgroup->lock);
if (hwif->port_ops && hwif->port_ops->clear_irq) if (hwif->port_ops && hwif->port_ops->clear_irq)
hwif->port_ops->clear_irq(drive); hwif->port_ops->clear_irq(drive);
...@@ -1449,7 +1259,7 @@ irqreturn_t ide_intr (int irq, void *dev_id) ...@@ -1449,7 +1259,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
/* service this interrupt, may set handler for next interrupt */ /* service this interrupt, may set handler for next interrupt */
startstop = handler(drive); startstop = handler(drive);
spin_lock_irq(&ide_lock); spin_lock_irq(&hwgroup->lock);
/* /*
* Note that handler() may have set things up for another * Note that handler() may have set things up for another
* interrupt to occur soon, but it cannot happen until * interrupt to occur soon, but it cannot happen until
...@@ -1467,8 +1277,11 @@ irqreturn_t ide_intr (int irq, void *dev_id) ...@@ -1467,8 +1277,11 @@ irqreturn_t ide_intr (int irq, void *dev_id)
"on exit\n", drive->name); "on exit\n", drive->name);
} }
} }
spin_unlock_irqrestore(&ide_lock, flags); out_handled:
return IRQ_HANDLED; irq_ret = IRQ_HANDLED;
out:
spin_unlock_irqrestore(&hwgroup->lock, flags);
return irq_ret;
} }
/** /**
...@@ -1488,16 +1301,17 @@ irqreturn_t ide_intr (int irq, void *dev_id) ...@@ -1488,16 +1301,17 @@ irqreturn_t ide_intr (int irq, void *dev_id)
void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq) void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq)
{ {
ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
struct request_queue *q = drive->queue;
unsigned long flags; unsigned long flags;
ide_hwgroup_t *hwgroup = HWGROUP(drive);
spin_lock_irqsave(&ide_lock, flags);
hwgroup->rq = NULL; hwgroup->rq = NULL;
__elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
blk_start_queueing(drive->queue);
spin_unlock_irqrestore(&ide_lock, flags);
}
spin_lock_irqsave(q->queue_lock, flags);
__elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
blk_start_queueing(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
EXPORT_SYMBOL(ide_do_drive_cmd); EXPORT_SYMBOL(ide_do_drive_cmd);
void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma) void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
......
...@@ -19,7 +19,6 @@ int ide_setting_ioctl(ide_drive_t *drive, struct block_device *bdev, ...@@ -19,7 +19,6 @@ int ide_setting_ioctl(ide_drive_t *drive, struct block_device *bdev,
const struct ide_ioctl_devset *s) const struct ide_ioctl_devset *s)
{ {
const struct ide_devset *ds; const struct ide_devset *ds;
unsigned long flags;
int err = -EOPNOTSUPP; int err = -EOPNOTSUPP;
for (; (ds = s->setting); s++) { for (; (ds = s->setting); s++) {
...@@ -33,9 +32,7 @@ int ide_setting_ioctl(ide_drive_t *drive, struct block_device *bdev, ...@@ -33,9 +32,7 @@ int ide_setting_ioctl(ide_drive_t *drive, struct block_device *bdev,
read_val: read_val:
mutex_lock(&ide_setting_mtx); mutex_lock(&ide_setting_mtx);
spin_lock_irqsave(&ide_lock, flags);
err = ds->get(drive); err = ds->get(drive);
spin_unlock_irqrestore(&ide_lock, flags);
mutex_unlock(&ide_setting_mtx); mutex_unlock(&ide_setting_mtx);
return err >= 0 ? put_user(err, (long __user *)arg) : err; return err >= 0 ? put_user(err, (long __user *)arg) : err;
...@@ -98,7 +95,7 @@ static int ide_set_nice_ioctl(ide_drive_t *drive, unsigned long arg) ...@@ -98,7 +95,7 @@ static int ide_set_nice_ioctl(ide_drive_t *drive, unsigned long arg)
return -EPERM; return -EPERM;
if (((arg >> IDE_NICE_DSC_OVERLAP) & 1) && if (((arg >> IDE_NICE_DSC_OVERLAP) & 1) &&
(drive->media == ide_disk || drive->media == ide_floppy || (drive->media != ide_tape ||
(drive->dev_flags & IDE_DFLAG_SCSI))) (drive->dev_flags & IDE_DFLAG_SCSI)))
return -EPERM; return -EPERM;
......
...@@ -835,10 +835,12 @@ static void __ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, ...@@ -835,10 +835,12 @@ static void __ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
unsigned int timeout, ide_expiry_t *expiry) unsigned int timeout, ide_expiry_t *expiry)
{ {
ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&ide_lock, flags);
spin_lock_irqsave(&hwgroup->lock, flags);
__ide_set_handler(drive, handler, timeout, expiry); __ide_set_handler(drive, handler, timeout, expiry);
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&hwgroup->lock, flags);
} }
EXPORT_SYMBOL(ide_set_handler); EXPORT_SYMBOL(ide_set_handler);
...@@ -860,10 +862,11 @@ EXPORT_SYMBOL(ide_set_handler); ...@@ -860,10 +862,11 @@ EXPORT_SYMBOL(ide_set_handler);
void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler, void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
unsigned timeout, ide_expiry_t *expiry) unsigned timeout, ide_expiry_t *expiry)
{ {
ide_hwif_t *hwif = drive->hwif;
ide_hwgroup_t *hwgroup = hwif->hwgroup;
unsigned long flags; unsigned long flags;
ide_hwif_t *hwif = HWIF(drive);
spin_lock_irqsave(&ide_lock, flags); spin_lock_irqsave(&hwgroup->lock, flags);
__ide_set_handler(drive, handler, timeout, expiry); __ide_set_handler(drive, handler, timeout, expiry);
hwif->tp_ops->exec_command(hwif, cmd); hwif->tp_ops->exec_command(hwif, cmd);
/* /*
...@@ -873,19 +876,20 @@ void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler, ...@@ -873,19 +876,20 @@ void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
* FIXME: we could skip this delay with care on non shared devices * FIXME: we could skip this delay with care on non shared devices
*/ */
ndelay(400); ndelay(400);
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&hwgroup->lock, flags);
} }
EXPORT_SYMBOL(ide_execute_command); EXPORT_SYMBOL(ide_execute_command);
void ide_execute_pkt_cmd(ide_drive_t *drive) void ide_execute_pkt_cmd(ide_drive_t *drive)
{ {
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
ide_hwgroup_t *hwgroup = hwif->hwgroup;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&ide_lock, flags); spin_lock_irqsave(&hwgroup->lock, flags);
hwif->tp_ops->exec_command(hwif, ATA_CMD_PACKET); hwif->tp_ops->exec_command(hwif, ATA_CMD_PACKET);
ndelay(400); ndelay(400);
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&hwgroup->lock, flags);
} }
EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd); EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd);
...@@ -1076,22 +1080,16 @@ static void pre_reset(ide_drive_t *drive) ...@@ -1076,22 +1080,16 @@ static void pre_reset(ide_drive_t *drive)
*/ */
static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
{ {
unsigned int unit; ide_hwif_t *hwif = drive->hwif;
unsigned long flags, timeout; ide_hwgroup_t *hwgroup = hwif->hwgroup;
ide_hwif_t *hwif; struct ide_io_ports *io_ports = &hwif->io_ports;
ide_hwgroup_t *hwgroup; const struct ide_tp_ops *tp_ops = hwif->tp_ops;
struct ide_io_ports *io_ports;
const struct ide_tp_ops *tp_ops;
const struct ide_port_ops *port_ops; const struct ide_port_ops *port_ops;
unsigned long flags, timeout;
unsigned int unit;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
spin_lock_irqsave(&ide_lock, flags); spin_lock_irqsave(&hwgroup->lock, flags);
hwif = HWIF(drive);
hwgroup = HWGROUP(drive);
io_ports = &hwif->io_ports;
tp_ops = hwif->tp_ops;
/* We must not reset with running handlers */ /* We must not reset with running handlers */
BUG_ON(hwgroup->handler != NULL); BUG_ON(hwgroup->handler != NULL);
...@@ -1106,7 +1104,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) ...@@ -1106,7 +1104,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE; hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
hwgroup->polling = 1; hwgroup->polling = 1;
__ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL); __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&hwgroup->lock, flags);
return ide_started; return ide_started;
} }
...@@ -1129,9 +1127,9 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) ...@@ -1129,9 +1127,9 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
if (time_before_eq(timeout, now)) if (time_before_eq(timeout, now))
break; break;
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&hwgroup->lock, flags);
timeout = schedule_timeout_uninterruptible(timeout - now); timeout = schedule_timeout_uninterruptible(timeout - now);
spin_lock_irqsave(&ide_lock, flags); spin_lock_irqsave(&hwgroup->lock, flags);
} while (timeout); } while (timeout);
finish_wait(&ide_park_wq, &wait); finish_wait(&ide_park_wq, &wait);
...@@ -1143,7 +1141,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) ...@@ -1143,7 +1141,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
pre_reset(&hwif->drives[unit]); pre_reset(&hwif->drives[unit]);
if (io_ports->ctl_addr == 0) { if (io_ports->ctl_addr == 0) {
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&hwgroup->lock, flags);
ide_complete_drive_reset(drive, -ENXIO); ide_complete_drive_reset(drive, -ENXIO);
return ide_stopped; return ide_stopped;
} }
...@@ -1179,7 +1177,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) ...@@ -1179,7 +1177,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
if (port_ops && port_ops->resetproc) if (port_ops && port_ops->resetproc)
port_ops->resetproc(drive); port_ops->resetproc(drive);
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&hwgroup->lock, flags);
return ide_started; return ide_started;
} }
......
#include <linux/kernel.h>
#include <linux/ide.h>
static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw,
u8 port_no, const struct ide_port_info *d,
unsigned long config)
{
unsigned long base, ctl;
int irq;
if (port_no == 0) {
base = 0x1f0;
ctl = 0x3f6;
irq = 14;
} else {
base = 0x170;
ctl = 0x376;
irq = 15;
}
if (!request_region(base, 8, d->name)) {
printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
d->name, base, base + 7);
return;
}
if (!request_region(ctl, 1, d->name)) {
printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
d->name, ctl);
release_region(base, 8);
return;
}
ide_std_init_ports(hw, base, ctl);
hw->irq = irq;
hw->chipset = d->chipset;
hw->config = config;
hws[port_no] = hw;
}
int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
{
hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL };
memset(&hw, 0, sizeof(hw));
if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0)
ide_legacy_init_one(hws, &hw[0], 0, d, config);
ide_legacy_init_one(hws, &hw[1], 1, d, config);
if (hws[0] == NULL && hws[1] == NULL &&
(d->host_flags & IDE_HFLAG_SINGLE))
return -ENOENT;
return ide_host_add(d, hws, NULL);
}
EXPORT_SYMBOL_GPL(ide_legacy_device_add);
...@@ -43,7 +43,6 @@ const char *ide_xfer_verbose(u8 mode) ...@@ -43,7 +43,6 @@ const char *ide_xfer_verbose(u8 mode)
return s; return s;
} }
EXPORT_SYMBOL(ide_xfer_verbose); EXPORT_SYMBOL(ide_xfer_verbose);
/** /**
...@@ -87,7 +86,7 @@ static u8 ide_rate_filter(ide_drive_t *drive, u8 speed) ...@@ -87,7 +86,7 @@ static u8 ide_rate_filter(ide_drive_t *drive, u8 speed)
* This is used by most chipset support modules when "auto-tuning". * This is used by most chipset support modules when "auto-tuning".
*/ */
u8 ide_get_best_pio_mode (ide_drive_t *drive, u8 mode_wanted, u8 max_mode) u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
{ {
u16 *id = drive->id; u16 *id = drive->id;
int pio_mode = -1, overridden = 0; int pio_mode = -1, overridden = 0;
...@@ -131,7 +130,6 @@ u8 ide_get_best_pio_mode (ide_drive_t *drive, u8 mode_wanted, u8 max_mode) ...@@ -131,7 +130,6 @@ u8 ide_get_best_pio_mode (ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
return pio_mode; return pio_mode;
} }
EXPORT_SYMBOL_GPL(ide_get_best_pio_mode); EXPORT_SYMBOL_GPL(ide_get_best_pio_mode);
/* req_pio == "255" for auto-tune */ /* req_pio == "255" for auto-tune */
...@@ -162,7 +160,6 @@ void ide_set_pio(ide_drive_t *drive, u8 req_pio) ...@@ -162,7 +160,6 @@ void ide_set_pio(ide_drive_t *drive, u8 req_pio)
(void)ide_set_pio_mode(drive, XFER_PIO_0 + pio); (void)ide_set_pio_mode(drive, XFER_PIO_0 + pio);
} }
EXPORT_SYMBOL_GPL(ide_set_pio); EXPORT_SYMBOL_GPL(ide_set_pio);
/** /**
...@@ -173,7 +170,7 @@ EXPORT_SYMBOL_GPL(ide_set_pio); ...@@ -173,7 +170,7 @@ EXPORT_SYMBOL_GPL(ide_set_pio);
* Enable or disable bounce buffering for the device. Drives move * Enable or disable bounce buffering for the device. Drives move
* between PIO and DMA and that changes the rules we need. * between PIO and DMA and that changes the rules we need.
*/ */
void ide_toggle_bounce(ide_drive_t *drive, int on) void ide_toggle_bounce(ide_drive_t *drive, int on)
{ {
u64 addr = BLK_BOUNCE_HIGH; /* dma64_addr_t */ u64 addr = BLK_BOUNCE_HIGH; /* dma64_addr_t */
...@@ -243,14 +240,13 @@ int ide_set_dma_mode(ide_drive_t *drive, const u8 mode) ...@@ -243,14 +240,13 @@ int ide_set_dma_mode(ide_drive_t *drive, const u8 mode)
return ide_config_drive_speed(drive, mode); return ide_config_drive_speed(drive, mode);
} }
} }
EXPORT_SYMBOL_GPL(ide_set_dma_mode); EXPORT_SYMBOL_GPL(ide_set_dma_mode);
/** /**
* ide_set_xfer_rate - set transfer rate * ide_set_xfer_rate - set transfer rate
* @drive: drive to set * @drive: drive to set
* @rate: speed to attempt to set * @rate: speed to attempt to set
* *
* General helper for setting the speed of an IDE device. This * General helper for setting the speed of an IDE device. This
* function knows about user enforced limits from the configuration * function knows about user enforced limits from the configuration
* which ->set_pio_mode/->set_dma_mode does not. * which ->set_pio_mode/->set_dma_mode does not.
...@@ -277,21 +273,16 @@ int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) ...@@ -277,21 +273,16 @@ int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
static void ide_dump_opcode(ide_drive_t *drive) static void ide_dump_opcode(ide_drive_t *drive)
{ {
struct request *rq; struct request *rq = drive->hwif->hwgroup->rq;
ide_task_t *task = NULL; ide_task_t *task = NULL;
spin_lock(&ide_lock);
rq = NULL;
if (HWGROUP(drive))
rq = HWGROUP(drive)->rq;
spin_unlock(&ide_lock);
if (!rq) if (!rq)
return; return;
if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
task = rq->special; task = rq->special;
printk("ide: failed opcode was: "); printk(KERN_ERR "ide: failed opcode was: ");
if (task == NULL) if (task == NULL)
printk(KERN_CONT "unknown\n"); printk(KERN_CONT "unknown\n");
else else
...@@ -329,44 +320,55 @@ static void ide_dump_sector(ide_drive_t *drive) ...@@ -329,44 +320,55 @@ static void ide_dump_sector(ide_drive_t *drive)
drive->hwif->tp_ops->tf_read(drive, &task); drive->hwif->tp_ops->tf_read(drive, &task);
if (lba48 || (tf->device & ATA_LBA)) if (lba48 || (tf->device & ATA_LBA))
printk(", LBAsect=%llu", printk(KERN_CONT ", LBAsect=%llu",
(unsigned long long)ide_get_lba_addr(tf, lba48)); (unsigned long long)ide_get_lba_addr(tf, lba48));
else else
printk(", CHS=%d/%d/%d", (tf->lbah << 8) + tf->lbam, printk(KERN_CONT ", CHS=%d/%d/%d", (tf->lbah << 8) + tf->lbam,
tf->device & 0xf, tf->lbal); tf->device & 0xf, tf->lbal);
} }
static void ide_dump_ata_error(ide_drive_t *drive, u8 err) static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
{ {
printk("{ "); printk(KERN_ERR "{ ");
if (err & ATA_ABORTED) printk("DriveStatusError "); if (err & ATA_ABORTED)
printk(KERN_CONT "DriveStatusError ");
if (err & ATA_ICRC) if (err & ATA_ICRC)
printk((err & ATA_ABORTED) ? "BadCRC " : "BadSector "); printk(KERN_CONT "%s",
if (err & ATA_UNC) printk("UncorrectableError "); (err & ATA_ABORTED) ? "BadCRC " : "BadSector ");
if (err & ATA_IDNF) printk("SectorIdNotFound "); if (err & ATA_UNC)
if (err & ATA_TRK0NF) printk("TrackZeroNotFound "); printk(KERN_CONT "UncorrectableError ");
if (err & ATA_AMNF) printk("AddrMarkNotFound "); if (err & ATA_IDNF)
printk("}"); printk(KERN_CONT "SectorIdNotFound ");
if (err & ATA_TRK0NF)
printk(KERN_CONT "TrackZeroNotFound ");
if (err & ATA_AMNF)
printk(KERN_CONT "AddrMarkNotFound ");
printk(KERN_CONT "}");
if ((err & (ATA_BBK | ATA_ABORTED)) == ATA_BBK || if ((err & (ATA_BBK | ATA_ABORTED)) == ATA_BBK ||
(err & (ATA_UNC | ATA_IDNF | ATA_AMNF))) { (err & (ATA_UNC | ATA_IDNF | ATA_AMNF))) {
ide_dump_sector(drive); ide_dump_sector(drive);
if (HWGROUP(drive) && HWGROUP(drive)->rq) if (HWGROUP(drive) && HWGROUP(drive)->rq)
printk(", sector=%llu", printk(KERN_CONT ", sector=%llu",
(unsigned long long)HWGROUP(drive)->rq->sector); (unsigned long long)HWGROUP(drive)->rq->sector);
} }
printk("\n"); printk(KERN_CONT "\n");
} }
static void ide_dump_atapi_error(ide_drive_t *drive, u8 err) static void ide_dump_atapi_error(ide_drive_t *drive, u8 err)
{ {
printk("{ "); printk(KERN_ERR "{ ");
if (err & ATAPI_ILI) printk("IllegalLengthIndication "); if (err & ATAPI_ILI)
if (err & ATAPI_EOM) printk("EndOfMedia "); printk(KERN_CONT "IllegalLengthIndication ");
if (err & ATA_ABORTED) printk("AbortedCommand "); if (err & ATAPI_EOM)
if (err & ATA_MCR) printk("MediaChangeRequested "); printk(KERN_CONT "EndOfMedia ");
if (err & ATAPI_LFS) printk("LastFailedSense=0x%02x ", if (err & ATA_ABORTED)
(err & ATAPI_LFS) >> 4); printk(KERN_CONT "AbortedCommand ");
printk("}\n"); if (err & ATA_MCR)
printk(KERN_CONT "MediaChangeRequested ");
if (err & ATAPI_LFS)
printk(KERN_CONT "LastFailedSense=0x%02x ",
(err & ATAPI_LFS) >> 4);
printk(KERN_CONT "}\n");
} }
/** /**
...@@ -382,34 +384,37 @@ static void ide_dump_atapi_error(ide_drive_t *drive, u8 err) ...@@ -382,34 +384,37 @@ static void ide_dump_atapi_error(ide_drive_t *drive, u8 err)
u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat) u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat)
{ {
unsigned long flags;
u8 err = 0; u8 err = 0;
local_irq_save(flags); printk(KERN_ERR "%s: %s: status=0x%02x { ", drive->name, msg, stat);
printk("%s: %s: status=0x%02x { ", drive->name, msg, stat);
if (stat & ATA_BUSY) if (stat & ATA_BUSY)
printk("Busy "); printk(KERN_CONT "Busy ");
else { else {
if (stat & ATA_DRDY) printk("DriveReady "); if (stat & ATA_DRDY)
if (stat & ATA_DF) printk("DeviceFault "); printk(KERN_CONT "DriveReady ");
if (stat & ATA_DSC) printk("SeekComplete "); if (stat & ATA_DF)
if (stat & ATA_DRQ) printk("DataRequest "); printk(KERN_CONT "DeviceFault ");
if (stat & ATA_CORR) printk("CorrectedError "); if (stat & ATA_DSC)
if (stat & ATA_IDX) printk("Index "); printk(KERN_CONT "SeekComplete ");
if (stat & ATA_ERR) printk("Error "); if (stat & ATA_DRQ)
printk(KERN_CONT "DataRequest ");
if (stat & ATA_CORR)
printk(KERN_CONT "CorrectedError ");
if (stat & ATA_IDX)
printk(KERN_CONT "Index ");
if (stat & ATA_ERR)
printk(KERN_CONT "Error ");
} }
printk("}\n"); printk(KERN_CONT "}\n");
if ((stat & (ATA_BUSY | ATA_ERR)) == ATA_ERR) { if ((stat & (ATA_BUSY | ATA_ERR)) == ATA_ERR) {
err = ide_read_error(drive); err = ide_read_error(drive);
printk("%s: %s: error=0x%02x ", drive->name, msg, err); printk(KERN_ERR "%s: %s: error=0x%02x ", drive->name, msg, err);
if (drive->media == ide_disk) if (drive->media == ide_disk)
ide_dump_ata_error(drive, err); ide_dump_ata_error(drive, err);
else else
ide_dump_atapi_error(drive, err); ide_dump_atapi_error(drive, err);
} }
ide_dump_opcode(drive); ide_dump_opcode(drive);
local_irq_restore(flags);
return err; return err;
} }
EXPORT_SYMBOL(ide_dump_status); EXPORT_SYMBOL(ide_dump_status);
...@@ -7,17 +7,16 @@ DECLARE_WAIT_QUEUE_HEAD(ide_park_wq); ...@@ -7,17 +7,16 @@ DECLARE_WAIT_QUEUE_HEAD(ide_park_wq);
static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout) static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
{ {
ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
struct request_queue *q = drive->queue; struct request_queue *q = drive->queue;
struct request *rq; struct request *rq;
int rc; int rc;
timeout += jiffies; timeout += jiffies;
spin_lock_irq(&ide_lock); spin_lock_irq(&hwgroup->lock);
if (drive->dev_flags & IDE_DFLAG_PARKED) { if (drive->dev_flags & IDE_DFLAG_PARKED) {
ide_hwgroup_t *hwgroup = drive->hwif->hwgroup; int reset_timer = time_before(timeout, drive->sleep);
int reset_timer;
reset_timer = time_before(timeout, drive->sleep);
drive->sleep = timeout; drive->sleep = timeout;
wake_up_all(&ide_park_wq); wake_up_all(&ide_park_wq);
if (reset_timer && hwgroup->sleeping && if (reset_timer && hwgroup->sleeping &&
...@@ -26,10 +25,10 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout) ...@@ -26,10 +25,10 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
hwgroup->busy = 0; hwgroup->busy = 0;
blk_start_queueing(q); blk_start_queueing(q);
} }
spin_unlock_irq(&ide_lock); spin_unlock_irq(&hwgroup->lock);
return; return;
} }
spin_unlock_irq(&ide_lock); spin_unlock_irq(&hwgroup->lock);
rq = blk_get_request(q, READ, __GFP_WAIT); rq = blk_get_request(q, READ, __GFP_WAIT);
rq->cmd[0] = REQ_PARK_HEADS; rq->cmd[0] = REQ_PARK_HEADS;
...@@ -62,20 +61,21 @@ ssize_t ide_park_show(struct device *dev, struct device_attribute *attr, ...@@ -62,20 +61,21 @@ ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
char *buf) char *buf)
{ {
ide_drive_t *drive = to_ide_device(dev); ide_drive_t *drive = to_ide_device(dev);
ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
unsigned long now; unsigned long now;
unsigned int msecs; unsigned int msecs;
if (drive->dev_flags & IDE_DFLAG_NO_UNLOAD) if (drive->dev_flags & IDE_DFLAG_NO_UNLOAD)
return -EOPNOTSUPP; return -EOPNOTSUPP;
spin_lock_irq(&ide_lock); spin_lock_irq(&hwgroup->lock);
now = jiffies; now = jiffies;
if (drive->dev_flags & IDE_DFLAG_PARKED && if (drive->dev_flags & IDE_DFLAG_PARKED &&
time_after(drive->sleep, now)) time_after(drive->sleep, now))
msecs = jiffies_to_msecs(drive->sleep - now); msecs = jiffies_to_msecs(drive->sleep - now);
else else
msecs = 0; msecs = 0;
spin_unlock_irq(&ide_lock); spin_unlock_irq(&hwgroup->lock);
return snprintf(buf, 20, "%u\n", msecs); return snprintf(buf, 20, "%u\n", msecs);
} }
......
#include <linux/kernel.h>
#include <linux/ide.h>
#include <linux/hdreg.h>
int generic_ide_suspend(struct device *dev, pm_message_t mesg)
{
ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
ide_hwif_t *hwif = HWIF(drive);
struct request *rq;
struct request_pm_state rqpm;
ide_task_t args;
int ret;
/* call ACPI _GTM only once */
if ((drive->dn & 1) == 0 || pair == NULL)
ide_acpi_get_timing(hwif);
memset(&rqpm, 0, sizeof(rqpm));
memset(&args, 0, sizeof(args));
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_PM_SUSPEND;
rq->special = &args;
rq->data = &rqpm;
rqpm.pm_step = IDE_PM_START_SUSPEND;
if (mesg.event == PM_EVENT_PRETHAW)
mesg.event = PM_EVENT_FREEZE;
rqpm.pm_state = mesg.event;
ret = blk_execute_rq(drive->queue, NULL, rq, 0);
blk_put_request(rq);
/* call ACPI _PS3 only after both devices are suspended */
if (ret == 0 && ((drive->dn & 1) || pair == NULL))
ide_acpi_set_state(hwif, 0);
return ret;
}
int generic_ide_resume(struct device *dev)
{
ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
ide_hwif_t *hwif = HWIF(drive);
struct request *rq;
struct request_pm_state rqpm;
ide_task_t args;
int err;
/* call ACPI _PS0 / _STM only once */
if ((drive->dn & 1) == 0 || pair == NULL) {
ide_acpi_set_state(hwif, 1);
ide_acpi_push_timing(hwif);
}
ide_acpi_exec_tfs(drive);
memset(&rqpm, 0, sizeof(rqpm));
memset(&args, 0, sizeof(args));
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_PM_RESUME;
rq->cmd_flags |= REQ_PREEMPT;
rq->special = &args;
rq->data = &rqpm;
rqpm.pm_step = IDE_PM_START_RESUME;
rqpm.pm_state = PM_EVENT_ON;
err = blk_execute_rq(drive->queue, NULL, rq, 1);
blk_put_request(rq);
if (err == 0 && dev->driver) {
ide_driver_t *drv = to_ide_driver(dev->driver);
if (drv->resume)
drv->resume(drive);
}
return err;
}
void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
{
struct request_pm_state *pm = rq->data;
#ifdef DEBUG_PM
printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
drive->name, pm->pm_step);
#endif
if (drive->media != ide_disk)
return;
switch (pm->pm_step) {
case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
if (pm->pm_state == PM_EVENT_FREEZE)
pm->pm_step = IDE_PM_COMPLETED;
else
pm->pm_step = IDE_PM_STANDBY;
break;
case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
pm->pm_step = IDE_PM_COMPLETED;
break;
case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
pm->pm_step = IDE_PM_IDLE;
break;
case IDE_PM_IDLE: /* Resume step 2 (idle)*/
pm->pm_step = IDE_PM_RESTORE_DMA;
break;
}
}
ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
{
struct request_pm_state *pm = rq->data;
ide_task_t *args = rq->special;
memset(args, 0, sizeof(*args));
switch (pm->pm_step) {
case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
if (drive->media != ide_disk)
break;
/* Not supported? Switch to next step now. */
if (ata_id_flush_enabled(drive->id) == 0 ||
(drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
ide_complete_power_step(drive, rq);
return ide_stopped;
}
if (ata_id_flush_ext_enabled(drive->id))
args->tf.command = ATA_CMD_FLUSH_EXT;
else
args->tf.command = ATA_CMD_FLUSH;
goto out_do_tf;
case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
args->tf.command = ATA_CMD_STANDBYNOW1;
goto out_do_tf;
case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
ide_set_max_pio(drive);
/*
* skip IDE_PM_IDLE for ATAPI devices
*/
if (drive->media != ide_disk)
pm->pm_step = IDE_PM_RESTORE_DMA;
else
ide_complete_power_step(drive, rq);
return ide_stopped;
case IDE_PM_IDLE: /* Resume step 2 (idle) */
args->tf.command = ATA_CMD_IDLEIMMEDIATE;
goto out_do_tf;
case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
/*
* Right now, all we do is call ide_set_dma(drive),
* we could be smarter and check for current xfer_speed
* in struct drive etc...
*/
if (drive->hwif->dma_ops == NULL)
break;
/*
* TODO: respect IDE_DFLAG_USING_DMA
*/
ide_set_dma(drive);
break;
}
pm->pm_step = IDE_PM_COMPLETED;
return ide_stopped;
out_do_tf:
args->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
args->data_phase = TASKFILE_NO_DATA;
return do_rw_taskfile(drive, args);
}
/**
* ide_complete_pm_request - end the current Power Management request
* @drive: target drive
* @rq: request
*
* This function cleans up the current PM request and stops the queue
* if necessary.
*/
void ide_complete_pm_request(ide_drive_t *drive, struct request *rq)
{
struct request_queue *q = drive->queue;
unsigned long flags;
#ifdef DEBUG_PM
printk("%s: completing PM request, %s\n", drive->name,
blk_pm_suspend_request(rq) ? "suspend" : "resume");
#endif
spin_lock_irqsave(q->queue_lock, flags);
if (blk_pm_suspend_request(rq)) {
blk_stop_queue(q);
} else {
drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
blk_start_queue(q);
}
spin_unlock_irqrestore(q->queue_lock, flags);
drive->hwif->hwgroup->rq = NULL;
if (blk_end_request(rq, 0, 0))
BUG();
}
void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
{
struct request_pm_state *pm = rq->data;
if (blk_pm_suspend_request(rq) &&
pm->pm_step == IDE_PM_START_SUSPEND)
/* Mark drive blocked when starting the suspend sequence. */
drive->dev_flags |= IDE_DFLAG_BLOCKED;
else if (blk_pm_resume_request(rq) &&
pm->pm_step == IDE_PM_START_RESUME) {
/*
* The first thing we do on wakeup is to wait for BSY bit to
* go away (with a looong timeout) as a drive on this hwif may
* just be POSTing itself.
* We do that before even selecting as the "other" device on
* the bus may be broken enough to walk on our toes at this
* point.
*/
ide_hwif_t *hwif = drive->hwif;
int rc;
#ifdef DEBUG_PM
printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
#endif
rc = ide_wait_not_busy(hwif, 35000);
if (rc)
printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
SELECT_DRIVE(drive);
hwif->tp_ops->set_irq(hwif, 1);
rc = ide_wait_not_busy(hwif, 100000);
if (rc)
printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
}
}
...@@ -110,20 +110,22 @@ static void ide_disk_init_mult_count(ide_drive_t *drive) ...@@ -110,20 +110,22 @@ static void ide_disk_init_mult_count(ide_drive_t *drive)
* read and parse the results. This function is run with * read and parse the results. This function is run with
* interrupts disabled. * interrupts disabled.
*/ */
static inline void do_identify (ide_drive_t *drive, u8 cmd) static void do_identify(ide_drive_t *drive, u8 cmd)
{ {
ide_hwif_t *hwif = HWIF(drive); ide_hwif_t *hwif = HWIF(drive);
u16 *id = drive->id; u16 *id = drive->id;
char *m = (char *)&id[ATA_ID_PROD]; char *m = (char *)&id[ATA_ID_PROD];
unsigned long flags;
int bswap = 1, is_cfa; int bswap = 1, is_cfa;
/* local CPU only; some systems need this */
local_irq_save(flags);
/* read 512 bytes of id info */ /* read 512 bytes of id info */
hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
local_irq_restore(flags);
drive->dev_flags |= IDE_DFLAG_ID_READ; drive->dev_flags |= IDE_DFLAG_ID_READ;
local_irq_enable();
#ifdef DEBUG #ifdef DEBUG
printk(KERN_INFO "%s: dumping identify data\n", drive->name); printk(KERN_INFO "%s: dumping identify data\n", drive->name);
ide_dump_identify((u8 *)id); ide_dump_identify((u8 *)id);
...@@ -306,17 +308,12 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd) ...@@ -306,17 +308,12 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
s = tp_ops->read_status(hwif); s = tp_ops->read_status(hwif);
if (OK_STAT(s, ATA_DRQ, BAD_R_STAT)) { if (OK_STAT(s, ATA_DRQ, BAD_R_STAT)) {
unsigned long flags;
/* local CPU only; some systems need this */
local_irq_save(flags);
/* drive returned ID */ /* drive returned ID */
do_identify(drive, cmd); do_identify(drive, cmd);
/* drive responded with ID */ /* drive responded with ID */
rc = 0; rc = 0;
/* clear drive IRQ */ /* clear drive IRQ */
(void)tp_ops->read_status(hwif); (void)tp_ops->read_status(hwif);
local_irq_restore(flags);
} else { } else {
/* drive refused ID */ /* drive refused ID */
rc = 2; rc = 2;
...@@ -554,8 +551,8 @@ static void enable_nest (ide_drive_t *drive) ...@@ -554,8 +551,8 @@ static void enable_nest (ide_drive_t *drive)
* 1 device was found * 1 device was found
* (note: IDE_DFLAG_PRESENT might still be not set) * (note: IDE_DFLAG_PRESENT might still be not set)
*/ */
static inline u8 probe_for_drive (ide_drive_t *drive) static u8 probe_for_drive(ide_drive_t *drive)
{ {
char *m; char *m;
...@@ -642,7 +639,7 @@ static int ide_register_port(ide_hwif_t *hwif) ...@@ -642,7 +639,7 @@ static int ide_register_port(ide_hwif_t *hwif)
int ret; int ret;
/* register with global device tree */ /* register with global device tree */
strlcpy(hwif->gendev.bus_id,hwif->name,BUS_ID_SIZE); dev_set_name(&hwif->gendev, hwif->name);
hwif->gendev.driver_data = hwif; hwif->gendev.driver_data = hwif;
if (hwif->gendev.parent == NULL) { if (hwif->gendev.parent == NULL) {
if (hwif->dev) if (hwif->dev)
...@@ -863,31 +860,6 @@ static void ide_port_tune_devices(ide_hwif_t *hwif) ...@@ -863,31 +860,6 @@ static void ide_port_tune_devices(ide_hwif_t *hwif)
} }
} }
/*
* save_match() is used to simplify logic in init_irq() below.
*
* A loophole here is that we may not know about a particular
* hwif's irq until after that hwif is actually probed/initialized..
* This could be a problem for the case where an hwif is on a
* dual interface that requires serialization (eg. cmd640) and another
* hwif using one of the same irqs is initialized beforehand.
*
* This routine detects and reports such situations, but does not fix them.
*/
static void save_match(ide_hwif_t *hwif, ide_hwif_t *new, ide_hwif_t **match)
{
ide_hwif_t *m = *match;
if (m && m->hwgroup && m->hwgroup != new->hwgroup) {
if (!new->hwgroup)
return;
printk(KERN_WARNING "%s: potential IRQ problem with %s and %s\n",
hwif->name, new->name, m->name);
}
if (!m || m->irq != hwif->irq) /* don't undo a prior perfect match */
*match = new;
}
/* /*
* init request queue * init request queue
*/ */
...@@ -906,7 +878,8 @@ static int ide_init_queue(ide_drive_t *drive) ...@@ -906,7 +878,8 @@ static int ide_init_queue(ide_drive_t *drive)
* do not. * do not.
*/ */
q = blk_init_queue_node(do_ide_request, &ide_lock, hwif_to_node(hwif)); q = blk_init_queue_node(do_ide_request, &hwif->hwgroup->lock,
hwif_to_node(hwif));
if (!q) if (!q)
return 1; return 1;
...@@ -947,7 +920,7 @@ static void ide_add_drive_to_hwgroup(ide_drive_t *drive) ...@@ -947,7 +920,7 @@ static void ide_add_drive_to_hwgroup(ide_drive_t *drive)
{ {
ide_hwgroup_t *hwgroup = drive->hwif->hwgroup; ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
spin_lock_irq(&ide_lock); spin_lock_irq(&hwgroup->lock);
if (!hwgroup->drive) { if (!hwgroup->drive) {
/* first drive for hwgroup. */ /* first drive for hwgroup. */
drive->next = drive; drive->next = drive;
...@@ -957,7 +930,7 @@ static void ide_add_drive_to_hwgroup(ide_drive_t *drive) ...@@ -957,7 +930,7 @@ static void ide_add_drive_to_hwgroup(ide_drive_t *drive)
drive->next = hwgroup->drive->next; drive->next = hwgroup->drive->next;
hwgroup->drive->next = drive; hwgroup->drive->next = drive;
} }
spin_unlock_irq(&ide_lock); spin_unlock_irq(&hwgroup->lock);
} }
/* /*
...@@ -1002,7 +975,7 @@ void ide_remove_port_from_hwgroup(ide_hwif_t *hwif) ...@@ -1002,7 +975,7 @@ void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
ide_ports[hwif->index] = NULL; ide_ports[hwif->index] = NULL;
spin_lock_irq(&ide_lock); spin_lock_irq(&hwgroup->lock);
/* /*
* Remove us from the hwgroup, and free * Remove us from the hwgroup, and free
* the hwgroup if we were the only member * the hwgroup if we were the only member
...@@ -1030,7 +1003,7 @@ void ide_remove_port_from_hwgroup(ide_hwif_t *hwif) ...@@ -1030,7 +1003,7 @@ void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
} }
BUG_ON(hwgroup->hwif == hwif); BUG_ON(hwgroup->hwif == hwif);
} }
spin_unlock_irq(&ide_lock); spin_unlock_irq(&hwgroup->lock);
} }
/* /*
...@@ -1051,27 +1024,13 @@ static int init_irq (ide_hwif_t *hwif) ...@@ -1051,27 +1024,13 @@ static int init_irq (ide_hwif_t *hwif)
mutex_lock(&ide_cfg_mtx); mutex_lock(&ide_cfg_mtx);
hwif->hwgroup = NULL; hwif->hwgroup = NULL;
/*
* Group up with any other hwifs that share our irq(s).
*/
for (index = 0; index < MAX_HWIFS; index++) { for (index = 0; index < MAX_HWIFS; index++) {
ide_hwif_t *h = ide_ports[index]; ide_hwif_t *h = ide_ports[index];
if (h && h->hwgroup) { /* scan only initialized ports */ if (h && h->hwgroup) { /* scan only initialized ports */
if (hwif->irq == h->irq) { if (hwif->host->host_flags & IDE_HFLAG_SERIALIZE) {
hwif->sharing_irq = h->sharing_irq = 1; if (hwif->host == h->host)
if (hwif->chipset != ide_pci || match = h;
h->chipset != ide_pci) {
save_match(hwif, h, &match);
}
}
if (hwif->serialized) {
if (hwif->mate && hwif->mate->irq == h->irq)
save_match(hwif, h, &match);
}
if (h->serialized) {
if (h->mate && hwif->irq == h->mate->irq)
save_match(hwif, h, &match);
} }
} }
} }
...@@ -1092,17 +1051,19 @@ static int init_irq (ide_hwif_t *hwif) ...@@ -1092,17 +1051,19 @@ static int init_irq (ide_hwif_t *hwif)
* linked list, the first entry is the hwif that owns * linked list, the first entry is the hwif that owns
* hwgroup->handler - do not change that. * hwgroup->handler - do not change that.
*/ */
spin_lock_irq(&ide_lock); spin_lock_irq(&hwgroup->lock);
hwif->next = hwgroup->hwif->next; hwif->next = hwgroup->hwif->next;
hwgroup->hwif->next = hwif; hwgroup->hwif->next = hwif;
BUG_ON(hwif->next == hwif); BUG_ON(hwif->next == hwif);
spin_unlock_irq(&ide_lock); spin_unlock_irq(&hwgroup->lock);
} else { } else {
hwgroup = kmalloc_node(sizeof(*hwgroup), GFP_KERNEL|__GFP_ZERO, hwgroup = kmalloc_node(sizeof(*hwgroup), GFP_KERNEL|__GFP_ZERO,
hwif_to_node(hwif)); hwif_to_node(hwif));
if (hwgroup == NULL) if (hwgroup == NULL)
goto out_up; goto out_up;
spin_lock_init(&hwgroup->lock);
hwif->hwgroup = hwgroup; hwif->hwgroup = hwgroup;
hwgroup->hwif = hwif->next = hwif; hwgroup->hwif = hwif->next = hwif;
...@@ -1122,8 +1083,7 @@ static int init_irq (ide_hwif_t *hwif) ...@@ -1122,8 +1083,7 @@ static int init_irq (ide_hwif_t *hwif)
sa = IRQF_SHARED; sa = IRQF_SHARED;
#endif /* __mc68000__ */ #endif /* __mc68000__ */
if (hwif->chipset == ide_pci || hwif->chipset == ide_cmd646 || if (hwif->chipset == ide_pci)
hwif->chipset == ide_ali14xx)
sa = IRQF_SHARED; sa = IRQF_SHARED;
if (io_ports->ctl_addr) if (io_ports->ctl_addr)
...@@ -1150,8 +1110,7 @@ static int init_irq (ide_hwif_t *hwif) ...@@ -1150,8 +1110,7 @@ static int init_irq (ide_hwif_t *hwif)
io_ports->data_addr, hwif->irq); io_ports->data_addr, hwif->irq);
#endif /* __mc68000__ */ #endif /* __mc68000__ */
if (match) if (match)
printk(KERN_CONT " (%sed with %s)", printk(KERN_CONT " (serialized with %s)", match->name);
hwif->sharing_irq ? "shar" : "serializ", match->name);
printk(KERN_CONT "\n"); printk(KERN_CONT "\n");
mutex_unlock(&ide_cfg_mtx); mutex_unlock(&ide_cfg_mtx);
...@@ -1263,20 +1222,21 @@ static void ide_remove_drive_from_hwgroup(ide_drive_t *drive) ...@@ -1263,20 +1222,21 @@ static void ide_remove_drive_from_hwgroup(ide_drive_t *drive)
static void drive_release_dev (struct device *dev) static void drive_release_dev (struct device *dev)
{ {
ide_drive_t *drive = container_of(dev, ide_drive_t, gendev); ide_drive_t *drive = container_of(dev, ide_drive_t, gendev);
ide_hwgroup_t *hwgroup = drive->hwif->hwgroup;
ide_proc_unregister_device(drive); ide_proc_unregister_device(drive);
spin_lock_irq(&ide_lock); spin_lock_irq(&hwgroup->lock);
ide_remove_drive_from_hwgroup(drive); ide_remove_drive_from_hwgroup(drive);
kfree(drive->id); kfree(drive->id);
drive->id = NULL; drive->id = NULL;
drive->dev_flags &= ~IDE_DFLAG_PRESENT; drive->dev_flags &= ~IDE_DFLAG_PRESENT;
/* Messed up locking ... */ /* Messed up locking ... */
spin_unlock_irq(&ide_lock); spin_unlock_irq(&hwgroup->lock);
blk_cleanup_queue(drive->queue); blk_cleanup_queue(drive->queue);
spin_lock_irq(&ide_lock); spin_lock_irq(&hwgroup->lock);
drive->queue = NULL; drive->queue = NULL;
spin_unlock_irq(&ide_lock); spin_unlock_irq(&hwgroup->lock);
complete(&drive->gendev_rel_comp); complete(&drive->gendev_rel_comp);
} }
...@@ -1352,7 +1312,7 @@ static void hwif_register_devices(ide_hwif_t *hwif) ...@@ -1352,7 +1312,7 @@ static void hwif_register_devices(ide_hwif_t *hwif)
if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
continue; continue;
snprintf(dev->bus_id, BUS_ID_SIZE, "%u.%u", hwif->index, i); dev_set_name(dev, "%u.%u", hwif->index, i);
dev->parent = &hwif->gendev; dev->parent = &hwif->gendev;
dev->bus = &ide_bus_type; dev->bus = &ide_bus_type;
dev->driver_data = drive; dev->driver_data = drive;
...@@ -1436,13 +1396,11 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port, ...@@ -1436,13 +1396,11 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
} }
if ((d->host_flags & IDE_HFLAG_SERIALIZE) || if ((d->host_flags & IDE_HFLAG_SERIALIZE) ||
((d->host_flags & IDE_HFLAG_SERIALIZE_DMA) && hwif->dma_base)) { ((d->host_flags & IDE_HFLAG_SERIALIZE_DMA) && hwif->dma_base))
if (hwif->mate) hwif->host->host_flags |= IDE_HFLAG_SERIALIZE;
hwif->mate->serialized = hwif->serialized = 1;
}
if (d->host_flags & IDE_HFLAG_RQSIZE_256) if (d->max_sectors)
hwif->rqsize = 256; hwif->rqsize = d->max_sectors;
/* call chipset specific routine for each enabled port */ /* call chipset specific routine for each enabled port */
if (d->init_hwif) if (d->init_hwif)
...@@ -1794,59 +1752,3 @@ void ide_port_scan(ide_hwif_t *hwif) ...@@ -1794,59 +1752,3 @@ void ide_port_scan(ide_hwif_t *hwif)
ide_proc_port_register_devices(hwif); ide_proc_port_register_devices(hwif);
} }
EXPORT_SYMBOL_GPL(ide_port_scan); EXPORT_SYMBOL_GPL(ide_port_scan);
static void ide_legacy_init_one(hw_regs_t **hws, hw_regs_t *hw,
u8 port_no, const struct ide_port_info *d,
unsigned long config)
{
unsigned long base, ctl;
int irq;
if (port_no == 0) {
base = 0x1f0;
ctl = 0x3f6;
irq = 14;
} else {
base = 0x170;
ctl = 0x376;
irq = 15;
}
if (!request_region(base, 8, d->name)) {
printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
d->name, base, base + 7);
return;
}
if (!request_region(ctl, 1, d->name)) {
printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
d->name, ctl);
release_region(base, 8);
return;
}
ide_std_init_ports(hw, base, ctl);
hw->irq = irq;
hw->chipset = d->chipset;
hw->config = config;
hws[port_no] = hw;
}
int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
{
hw_regs_t hw[2], *hws[] = { NULL, NULL, NULL, NULL };
memset(&hw, 0, sizeof(hw));
if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0)
ide_legacy_init_one(hws, &hw[0], 0, d, config);
ide_legacy_init_one(hws, &hw[1], 1, d, config);
if (hws[0] == NULL && hws[1] == NULL &&
(d->host_flags & IDE_HFLAG_SINGLE))
return -ENOENT;
return ide_host_add(d, hws, NULL);
}
EXPORT_SYMBOL_GPL(ide_legacy_device_add);
...@@ -46,10 +46,6 @@ static int proc_ide_read_imodel ...@@ -46,10 +46,6 @@ static int proc_ide_read_imodel
case ide_qd65xx: name = "qd65xx"; break; case ide_qd65xx: name = "qd65xx"; break;
case ide_umc8672: name = "umc8672"; break; case ide_umc8672: name = "umc8672"; break;
case ide_ht6560b: name = "ht6560b"; break; case ide_ht6560b: name = "ht6560b"; break;
case ide_rz1000: name = "rz1000"; break;
case ide_trm290: name = "trm290"; break;
case ide_cmd646: name = "cmd646"; break;
case ide_cy82c693: name = "cy82c693"; break;
case ide_4drives: name = "4drives"; break; case ide_4drives: name = "4drives"; break;
case ide_pmac: name = "mac-io"; break; case ide_pmac: name = "mac-io"; break;
case ide_au1xxx: name = "au1xxx"; break; case ide_au1xxx: name = "au1xxx"; break;
...@@ -155,13 +151,8 @@ static int ide_read_setting(ide_drive_t *drive, ...@@ -155,13 +151,8 @@ static int ide_read_setting(ide_drive_t *drive,
const struct ide_devset *ds = setting->setting; const struct ide_devset *ds = setting->setting;
int val = -EINVAL; int val = -EINVAL;
if (ds->get) { if (ds->get)
unsigned long flags;
spin_lock_irqsave(&ide_lock, flags);
val = ds->get(drive); val = ds->get(drive);
spin_unlock_irqrestore(&ide_lock, flags);
}
return val; return val;
} }
...@@ -583,31 +574,19 @@ EXPORT_SYMBOL(ide_proc_register_driver); ...@@ -583,31 +574,19 @@ EXPORT_SYMBOL(ide_proc_register_driver);
* Clean up the driver specific /proc files and IDE settings * Clean up the driver specific /proc files and IDE settings
* for a given drive. * for a given drive.
* *
* Takes ide_setting_mtx and ide_lock. * Takes ide_setting_mtx.
* Caller must hold none of the locks.
*/ */
void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver) void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver)
{ {
unsigned long flags;
ide_remove_proc_entries(drive->proc, driver->proc_entries(drive)); ide_remove_proc_entries(drive->proc, driver->proc_entries(drive));
mutex_lock(&ide_setting_mtx); mutex_lock(&ide_setting_mtx);
spin_lock_irqsave(&ide_lock, flags);
/* /*
* ide_setting_mtx protects the settings list * ide_setting_mtx protects both the settings list and the use
* ide_lock protects the use of settings * of settings (we cannot take a setting out that is being used).
*
* so we need to hold both, ide_settings_sem because we want to
* modify the settings list, and ide_lock because we cannot take
* a setting out that is being used.
*
* OTOH both ide_{read,write}_setting are only ever used under
* ide_setting_mtx.
*/ */
drive->settings = NULL; drive->settings = NULL;
spin_unlock_irqrestore(&ide_lock, flags);
mutex_unlock(&ide_setting_mtx); mutex_unlock(&ide_setting_mtx);
} }
EXPORT_SYMBOL(ide_proc_unregister_driver); EXPORT_SYMBOL(ide_proc_unregister_driver);
......
...@@ -74,9 +74,6 @@ static const u8 ide_hwif_to_major[] = { IDE0_MAJOR, IDE1_MAJOR, ...@@ -74,9 +74,6 @@ static const u8 ide_hwif_to_major[] = { IDE0_MAJOR, IDE1_MAJOR,
DEFINE_MUTEX(ide_cfg_mtx); DEFINE_MUTEX(ide_cfg_mtx);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock);
EXPORT_SYMBOL(ide_lock);
static void ide_port_init_devices_data(ide_hwif_t *); static void ide_port_init_devices_data(ide_hwif_t *);
/* /*
...@@ -130,7 +127,6 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif) ...@@ -130,7 +127,6 @@ static void ide_port_init_devices_data(ide_hwif_t *hwif)
} }
} }
/* Called with ide_lock held. */
static void __ide_port_unregister_devices(ide_hwif_t *hwif) static void __ide_port_unregister_devices(ide_hwif_t *hwif)
{ {
int i; int i;
...@@ -139,10 +135,8 @@ static void __ide_port_unregister_devices(ide_hwif_t *hwif) ...@@ -139,10 +135,8 @@ static void __ide_port_unregister_devices(ide_hwif_t *hwif)
ide_drive_t *drive = &hwif->drives[i]; ide_drive_t *drive = &hwif->drives[i];
if (drive->dev_flags & IDE_DFLAG_PRESENT) { if (drive->dev_flags & IDE_DFLAG_PRESENT) {
spin_unlock_irq(&ide_lock);
device_unregister(&drive->gendev); device_unregister(&drive->gendev);
wait_for_completion(&drive->gendev_rel_comp); wait_for_completion(&drive->gendev_rel_comp);
spin_lock_irq(&ide_lock);
} }
} }
} }
...@@ -150,11 +144,9 @@ static void __ide_port_unregister_devices(ide_hwif_t *hwif) ...@@ -150,11 +144,9 @@ static void __ide_port_unregister_devices(ide_hwif_t *hwif)
void ide_port_unregister_devices(ide_hwif_t *hwif) void ide_port_unregister_devices(ide_hwif_t *hwif)
{ {
mutex_lock(&ide_cfg_mtx); mutex_lock(&ide_cfg_mtx);
spin_lock_irq(&ide_lock);
__ide_port_unregister_devices(hwif); __ide_port_unregister_devices(hwif);
hwif->present = 0; hwif->present = 0;
ide_port_init_devices_data(hwif); ide_port_init_devices_data(hwif);
spin_unlock_irq(&ide_lock);
mutex_unlock(&ide_cfg_mtx); mutex_unlock(&ide_cfg_mtx);
} }
EXPORT_SYMBOL_GPL(ide_port_unregister_devices); EXPORT_SYMBOL_GPL(ide_port_unregister_devices);
...@@ -192,12 +184,10 @@ void ide_unregister(ide_hwif_t *hwif) ...@@ -192,12 +184,10 @@ void ide_unregister(ide_hwif_t *hwif)
mutex_lock(&ide_cfg_mtx); mutex_lock(&ide_cfg_mtx);
spin_lock_irq(&ide_lock);
if (hwif->present) { if (hwif->present) {
__ide_port_unregister_devices(hwif); __ide_port_unregister_devices(hwif);
hwif->present = 0; hwif->present = 0;
} }
spin_unlock_irq(&ide_lock);
ide_proc_unregister_port(hwif); ide_proc_unregister_port(hwif);
...@@ -340,6 +330,7 @@ static int set_pio_mode_abuse(ide_hwif_t *hwif, u8 req_pio) ...@@ -340,6 +330,7 @@ static int set_pio_mode_abuse(ide_hwif_t *hwif, u8 req_pio)
static int set_pio_mode(ide_drive_t *drive, int arg) static int set_pio_mode(ide_drive_t *drive, int arg)
{ {
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
ide_hwgroup_t *hwgroup = hwif->hwgroup;
const struct ide_port_ops *port_ops = hwif->port_ops; const struct ide_port_ops *port_ops = hwif->port_ops;
if (arg < 0 || arg > 255) if (arg < 0 || arg > 255)
...@@ -354,9 +345,9 @@ static int set_pio_mode(ide_drive_t *drive, int arg) ...@@ -354,9 +345,9 @@ static int set_pio_mode(ide_drive_t *drive, int arg)
unsigned long flags; unsigned long flags;
/* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */ /* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */
spin_lock_irqsave(&ide_lock, flags); spin_lock_irqsave(&hwgroup->lock, flags);
port_ops->set_pio_mode(drive, arg); port_ops->set_pio_mode(drive, arg);
spin_unlock_irqrestore(&ide_lock, flags); spin_unlock_irqrestore(&hwgroup->lock, flags);
} else } else
port_ops->set_pio_mode(drive, arg); port_ops->set_pio_mode(drive, arg);
} else { } else {
...@@ -397,80 +388,6 @@ ide_ext_devset_rw_sync(unmaskirq, unmaskirq); ...@@ -397,80 +388,6 @@ ide_ext_devset_rw_sync(unmaskirq, unmaskirq);
ide_ext_devset_rw_sync(using_dma, using_dma); ide_ext_devset_rw_sync(using_dma, using_dma);
__IDE_DEVSET(pio_mode, DS_SYNC, NULL, set_pio_mode); __IDE_DEVSET(pio_mode, DS_SYNC, NULL, set_pio_mode);
static int generic_ide_suspend(struct device *dev, pm_message_t mesg)
{
ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
ide_hwif_t *hwif = HWIF(drive);
struct request *rq;
struct request_pm_state rqpm;
ide_task_t args;
int ret;
/* call ACPI _GTM only once */
if ((drive->dn & 1) == 0 || pair == NULL)
ide_acpi_get_timing(hwif);
memset(&rqpm, 0, sizeof(rqpm));
memset(&args, 0, sizeof(args));
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_PM_SUSPEND;
rq->special = &args;
rq->data = &rqpm;
rqpm.pm_step = IDE_PM_START_SUSPEND;
if (mesg.event == PM_EVENT_PRETHAW)
mesg.event = PM_EVENT_FREEZE;
rqpm.pm_state = mesg.event;
ret = blk_execute_rq(drive->queue, NULL, rq, 0);
blk_put_request(rq);
/* call ACPI _PS3 only after both devices are suspended */
if (ret == 0 && ((drive->dn & 1) || pair == NULL))
ide_acpi_set_state(hwif, 0);
return ret;
}
static int generic_ide_resume(struct device *dev)
{
ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
ide_hwif_t *hwif = HWIF(drive);
struct request *rq;
struct request_pm_state rqpm;
ide_task_t args;
int err;
/* call ACPI _PS0 / _STM only once */
if ((drive->dn & 1) == 0 || pair == NULL) {
ide_acpi_set_state(hwif, 1);
ide_acpi_push_timing(hwif);
}
ide_acpi_exec_tfs(drive);
memset(&rqpm, 0, sizeof(rqpm));
memset(&args, 0, sizeof(args));
rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_PM_RESUME;
rq->cmd_flags |= REQ_PREEMPT;
rq->special = &args;
rq->data = &rqpm;
rqpm.pm_step = IDE_PM_START_RESUME;
rqpm.pm_state = PM_EVENT_ON;
err = blk_execute_rq(drive->queue, NULL, rq, 1);
blk_put_request(rq);
if (err == 0 && dev->driver) {
ide_driver_t *drv = to_ide_driver(dev->driver);
if (drv->resume)
drv->resume(drive);
}
return err;
}
/** /**
* ide_device_get - get an additional reference to a ide_drive_t * ide_device_get - get an additional reference to a ide_drive_t
* @drive: device to get a reference to * @drive: device to get a reference to
......
...@@ -350,16 +350,17 @@ static const struct ide_dma_ops pdc2026x_dma_ops = { ...@@ -350,16 +350,17 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
.dma_timeout = pdc202xx_dma_timeout, .dma_timeout = pdc202xx_dma_timeout,
}; };
#define DECLARE_PDC2026X_DEV(udma, extra_flags) \ #define DECLARE_PDC2026X_DEV(udma, sectors) \
{ \ { \
.name = DRV_NAME, \ .name = DRV_NAME, \
.init_chipset = init_chipset_pdc202xx, \ .init_chipset = init_chipset_pdc202xx, \
.port_ops = &pdc2026x_port_ops, \ .port_ops = &pdc2026x_port_ops, \
.dma_ops = &pdc2026x_dma_ops, \ .dma_ops = &pdc2026x_dma_ops, \
.host_flags = IDE_HFLAGS_PDC202XX | extra_flags, \ .host_flags = IDE_HFLAGS_PDC202XX, \
.pio_mask = ATA_PIO4, \ .pio_mask = ATA_PIO4, \
.mwdma_mask = ATA_MWDMA2, \ .mwdma_mask = ATA_MWDMA2, \
.udma_mask = udma, \ .udma_mask = udma, \
.max_sectors = sectors, \
} }
static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = { static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
...@@ -376,8 +377,8 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = { ...@@ -376,8 +377,8 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
/* 1: PDC2026{2,3} */ /* 1: PDC2026{2,3} */
DECLARE_PDC2026X_DEV(ATA_UDMA4, 0), DECLARE_PDC2026X_DEV(ATA_UDMA4, 0),
/* 2: PDC2026{5,7} */ /* 2: PDC2026{5,7}: UDMA5, limit LBA48 requests to 256 sectors */
DECLARE_PDC2026X_DEV(ATA_UDMA5, IDE_HFLAG_RQSIZE_256), DECLARE_PDC2026X_DEV(ATA_UDMA5, 256),
}; };
/** /**
......
...@@ -22,34 +22,48 @@ ...@@ -22,34 +22,48 @@
#define DRV_NAME "rz1000" #define DRV_NAME "rz1000"
static void __devinit init_hwif_rz1000 (ide_hwif_t *hwif) static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
{ {
struct pci_dev *dev = to_pci_dev(hwif->dev);
u16 reg; u16 reg;
if (!pci_read_config_word (dev, 0x40, &reg) && if (!pci_read_config_word (dev, 0x40, &reg) &&
!pci_write_config_word(dev, 0x40, reg & 0xdfff)) { !pci_write_config_word(dev, 0x40, reg & 0xdfff)) {
printk(KERN_INFO "%s: disabled chipset read-ahead " printk(KERN_INFO "%s: disabled chipset read-ahead "
"(buggy RZ1000/RZ1001)\n", hwif->name); "(buggy RZ1000/RZ1001)\n", pci_name(dev));
return 0;
} else { } else {
if (hwif->mate)
hwif->mate->serialized = hwif->serialized = 1;
hwif->host_flags |= IDE_HFLAG_NO_UNMASK_IRQS;
printk(KERN_INFO "%s: serialized, disabled unmasking " printk(KERN_INFO "%s: serialized, disabled unmasking "
"(buggy RZ1000/RZ1001)\n", hwif->name); "(buggy RZ1000/RZ1001)\n", pci_name(dev));
return 1;
} }
} }
static const struct ide_port_info rz1000_chipset __devinitdata = { static const struct ide_port_info rz1000_chipset __devinitdata = {
.name = DRV_NAME, .name = DRV_NAME,
.init_hwif = init_hwif_rz1000,
.chipset = ide_rz1000,
.host_flags = IDE_HFLAG_NO_DMA, .host_flags = IDE_HFLAG_NO_DMA,
}; };
static int __devinit rz1000_init_one(struct pci_dev *dev, const struct pci_device_id *id) static int __devinit rz1000_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{ {
return ide_pci_init_one(dev, &rz1000_chipset, NULL); struct ide_port_info d = rz1000_chipset;
int rc;
rc = pci_enable_device(dev);
if (rc)
return rc;
if (rz1000_disable_readahead(dev)) {
d.host_flags |= IDE_HFLAG_SERIALIZE;
d.host_flags |= IDE_HFLAG_NO_UNMASK_IRQS;
}
return ide_pci_init_one(dev, &d, NULL);
}
static void rz1000_remove(struct pci_dev *dev)
{
ide_pci_remove(dev);
pci_disable_device(dev);
} }
static const struct pci_device_id rz1000_pci_tbl[] = { static const struct pci_device_id rz1000_pci_tbl[] = {
...@@ -63,7 +77,7 @@ static struct pci_driver rz1000_pci_driver = { ...@@ -63,7 +77,7 @@ static struct pci_driver rz1000_pci_driver = {
.name = "RZ1000_IDE", .name = "RZ1000_IDE",
.id_table = rz1000_pci_tbl, .id_table = rz1000_pci_tbl,
.probe = rz1000_init_one, .probe = rz1000_init_one,
.remove = ide_pci_remove, .remove = rz1000_remove,
}; };
static int __init rz1000_ide_init(void) static int __init rz1000_ide_init(void)
......
...@@ -328,10 +328,10 @@ static struct ide_dma_ops trm290_dma_ops = { ...@@ -328,10 +328,10 @@ static struct ide_dma_ops trm290_dma_ops = {
static const struct ide_port_info trm290_chipset __devinitdata = { static const struct ide_port_info trm290_chipset __devinitdata = {
.name = DRV_NAME, .name = DRV_NAME,
.init_hwif = init_hwif_trm290, .init_hwif = init_hwif_trm290,
.chipset = ide_trm290,
.port_ops = &trm290_port_ops, .port_ops = &trm290_port_ops,
.dma_ops = &trm290_dma_ops, .dma_ops = &trm290_dma_ops,
.host_flags = IDE_HFLAG_NO_ATAPI_DMA | .host_flags = IDE_HFLAG_TRM290 |
IDE_HFLAG_NO_ATAPI_DMA |
#if 0 /* play it safe for now */ #if 0 /* play it safe for now */
IDE_HFLAG_TRUST_BIOS_FOR_DMA | IDE_HFLAG_TRUST_BIOS_FOR_DMA |
#endif #endif
......
...@@ -181,7 +181,7 @@ static void tx4938ide_input_data_swap(ide_drive_t *drive, struct request *rq, ...@@ -181,7 +181,7 @@ static void tx4938ide_input_data_swap(ide_drive_t *drive, struct request *rq,
while (count--) while (count--)
*ptr++ = cpu_to_le16(__raw_readw((void __iomem *)port)); *ptr++ = cpu_to_le16(__raw_readw((void __iomem *)port));
__ide_flush_dcache_range((unsigned long)buf, count * 2); __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
} }
static void tx4938ide_output_data_swap(ide_drive_t *drive, struct request *rq, static void tx4938ide_output_data_swap(ide_drive_t *drive, struct request *rq,
...@@ -195,7 +195,7 @@ static void tx4938ide_output_data_swap(ide_drive_t *drive, struct request *rq, ...@@ -195,7 +195,7 @@ static void tx4938ide_output_data_swap(ide_drive_t *drive, struct request *rq,
__raw_writew(le16_to_cpu(*ptr), (void __iomem *)port); __raw_writew(le16_to_cpu(*ptr), (void __iomem *)port);
ptr++; ptr++;
} }
__ide_flush_dcache_range((unsigned long)buf, count * 2); __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
} }
static const struct ide_tp_ops tx4938ide_tp_ops = { static const struct ide_tp_ops tx4938ide_tp_ops = {
......
...@@ -259,6 +259,12 @@ static int tx4939ide_build_dmatable(ide_drive_t *drive, struct request *rq) ...@@ -259,6 +259,12 @@ static int tx4939ide_build_dmatable(ide_drive_t *drive, struct request *rq)
bcount = 0x10000 - (cur_addr & 0xffff); bcount = 0x10000 - (cur_addr & 0xffff);
if (bcount > cur_len) if (bcount > cur_len)
bcount = cur_len; bcount = cur_len;
/*
* This workaround for zero count seems required.
* (standard ide_build_dmatable do it too)
*/
if ((bcount & 0xffff) == 0x0000)
bcount = 0x8000;
*table++ = bcount & 0xffff; *table++ = bcount & 0xffff;
*table++ = cur_addr; *table++ = cur_addr;
cur_addr += bcount; cur_addr += bcount;
...@@ -558,7 +564,7 @@ static void tx4939ide_input_data_swap(ide_drive_t *drive, struct request *rq, ...@@ -558,7 +564,7 @@ static void tx4939ide_input_data_swap(ide_drive_t *drive, struct request *rq,
while (count--) while (count--)
*ptr++ = cpu_to_le16(__raw_readw((void __iomem *)port)); *ptr++ = cpu_to_le16(__raw_readw((void __iomem *)port));
__ide_flush_dcache_range((unsigned long)buf, count * 2); __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
} }
static void tx4939ide_output_data_swap(ide_drive_t *drive, struct request *rq, static void tx4939ide_output_data_swap(ide_drive_t *drive, struct request *rq,
...@@ -572,7 +578,7 @@ static void tx4939ide_output_data_swap(ide_drive_t *drive, struct request *rq, ...@@ -572,7 +578,7 @@ static void tx4939ide_output_data_swap(ide_drive_t *drive, struct request *rq,
__raw_writew(le16_to_cpu(*ptr), (void __iomem *)port); __raw_writew(le16_to_cpu(*ptr), (void __iomem *)port);
ptr++; ptr++;
} }
__ide_flush_dcache_range((unsigned long)buf, count * 2); __ide_flush_dcache_range((unsigned long)buf, roundup(len, 2));
} }
static const struct ide_tp_ops tx4939ide_tp_ops = { static const struct ide_tp_ops tx4939ide_tp_ops = {
......
...@@ -107,18 +107,21 @@ static void umc_set_speeds(u8 speeds[]) ...@@ -107,18 +107,21 @@ static void umc_set_speeds(u8 speeds[])
static void umc_set_pio_mode(ide_drive_t *drive, const u8 pio) static void umc_set_pio_mode(ide_drive_t *drive, const u8 pio)
{ {
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
unsigned long flags; ide_hwgroup_t *mate_hwgroup = hwif->mate ? hwif->mate->hwgroup : NULL;
unsigned long uninitialized_var(flags);
printk("%s: setting umc8672 to PIO mode%d (speed %d)\n", printk("%s: setting umc8672 to PIO mode%d (speed %d)\n",
drive->name, pio, pio_to_umc[pio]); drive->name, pio, pio_to_umc[pio]);
spin_lock_irqsave(&ide_lock, flags); if (mate_hwgroup)
if (hwif->mate && hwif->mate->hwgroup->handler) { spin_lock_irqsave(&mate_hwgroup->lock, flags);
if (mate_hwgroup && mate_hwgroup->handler) {
printk(KERN_ERR "umc8672: other interface is busy: exiting tune_umc()\n"); printk(KERN_ERR "umc8672: other interface is busy: exiting tune_umc()\n");
} else { } else {
current_speeds[drive->name[2] - 'a'] = pio_to_umc[pio]; current_speeds[drive->name[2] - 'a'] = pio_to_umc[pio];
umc_set_speeds(current_speeds); umc_set_speeds(current_speeds);
} }
spin_unlock_irqrestore(&ide_lock, flags); if (mate_hwgroup)
spin_unlock_irqrestore(&mate_hwgroup->lock, flags);
} }
static const struct ide_port_ops umc8672_port_ops = { static const struct ide_port_ops umc8672_port_ops = {
......
...@@ -578,6 +578,8 @@ static int idescsi_eh_abort (struct scsi_cmnd *cmd) ...@@ -578,6 +578,8 @@ static int idescsi_eh_abort (struct scsi_cmnd *cmd)
{ {
idescsi_scsi_t *scsi = scsihost_to_idescsi(cmd->device->host); idescsi_scsi_t *scsi = scsihost_to_idescsi(cmd->device->host);
ide_drive_t *drive = scsi->drive; ide_drive_t *drive = scsi->drive;
ide_hwif_t *hwif;
ide_hwgroup_t *hwgroup;
int busy; int busy;
int ret = FAILED; int ret = FAILED;
...@@ -594,13 +596,16 @@ static int idescsi_eh_abort (struct scsi_cmnd *cmd) ...@@ -594,13 +596,16 @@ static int idescsi_eh_abort (struct scsi_cmnd *cmd)
goto no_drive; goto no_drive;
} }
/* First give it some more time, how much is "right" is hard to say :-( */ hwif = drive->hwif;
hwgroup = hwif->hwgroup;
busy = ide_wait_not_busy(HWIF(drive), 100); /* FIXME - uses mdelay which causes latency? */ /* First give it some more time, how much is "right" is hard to say :-(
FIXME - uses mdelay which causes latency? */
busy = ide_wait_not_busy(hwif, 100);
if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
printk (KERN_WARNING "ide-scsi: drive did%s become ready\n", busy?" not":""); printk (KERN_WARNING "ide-scsi: drive did%s become ready\n", busy?" not":"");
spin_lock_irq(&ide_lock); spin_lock_irq(&hwgroup->lock);
/* If there is no pc running we're done (our interrupt took care of it) */ /* If there is no pc running we're done (our interrupt took care of it) */
pc = drive->pc; pc = drive->pc;
...@@ -629,7 +634,7 @@ static int idescsi_eh_abort (struct scsi_cmnd *cmd) ...@@ -629,7 +634,7 @@ static int idescsi_eh_abort (struct scsi_cmnd *cmd)
} }
ide_unlock: ide_unlock:
spin_unlock_irq(&ide_lock); spin_unlock_irq(&hwgroup->lock);
no_drive: no_drive:
if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) if (test_bit(IDESCSI_LOG_CMD, &scsi->log))
printk (KERN_WARNING "ide-scsi: abort returns %s\n", ret == SUCCESS?"success":"failed"); printk (KERN_WARNING "ide-scsi: abort returns %s\n", ret == SUCCESS?"success":"failed");
...@@ -642,6 +647,7 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd) ...@@ -642,6 +647,7 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
struct request *req; struct request *req;
idescsi_scsi_t *scsi = scsihost_to_idescsi(cmd->device->host); idescsi_scsi_t *scsi = scsihost_to_idescsi(cmd->device->host);
ide_drive_t *drive = scsi->drive; ide_drive_t *drive = scsi->drive;
ide_hwgroup_t *hwgroup;
int ready = 0; int ready = 0;
int ret = SUCCESS; int ret = SUCCESS;
...@@ -658,14 +664,18 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd) ...@@ -658,14 +664,18 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
return FAILED; return FAILED;
} }
hwgroup = drive->hwif->hwgroup;
spin_lock_irq(cmd->device->host->host_lock); spin_lock_irq(cmd->device->host->host_lock);
spin_lock(&ide_lock); spin_lock(&hwgroup->lock);
pc = drive->pc; pc = drive->pc;
if (pc)
req = pc->rq;
if (pc == NULL || (req = pc->rq) != HWGROUP(drive)->rq || !HWGROUP(drive)->handler) { if (pc == NULL || req != hwgroup->rq || hwgroup->handler == NULL) {
printk (KERN_WARNING "ide-scsi: No active request in idescsi_eh_reset\n"); printk (KERN_WARNING "ide-scsi: No active request in idescsi_eh_reset\n");
spin_unlock(&ide_lock); spin_unlock(&hwgroup->lock);
spin_unlock_irq(cmd->device->host->host_lock); spin_unlock_irq(cmd->device->host->host_lock);
return FAILED; return FAILED;
} }
...@@ -685,10 +695,10 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd) ...@@ -685,10 +695,10 @@ static int idescsi_eh_reset (struct scsi_cmnd *cmd)
BUG(); BUG();
} }
HWGROUP(drive)->rq = NULL; hwgroup->rq = NULL;
HWGROUP(drive)->handler = NULL; hwgroup->handler = NULL;
HWGROUP(drive)->busy = 1; /* will set this to zero when ide reset finished */ hwgroup->busy = 1; /* will set this to zero when ide reset finished */
spin_unlock(&ide_lock); spin_unlock(&hwgroup->lock);
ide_do_reset(drive); ide_do_reset(drive);
......
...@@ -122,8 +122,6 @@ struct ide_io_ports { ...@@ -122,8 +122,6 @@ struct ide_io_ports {
#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */ #define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
#define SECTOR_SIZE 512 #define SECTOR_SIZE 512
#define IDE_LARGE_SEEK(b1,b2,t) (((b1) > (b2) + (t)) || ((b2) > (b1) + (t)))
/* /*
* Timeouts for various operations: * Timeouts for various operations:
*/ */
...@@ -172,9 +170,7 @@ typedef int (ide_ack_intr_t)(struct hwif_s *); ...@@ -172,9 +170,7 @@ typedef int (ide_ack_intr_t)(struct hwif_s *);
enum { ide_unknown, ide_generic, ide_pci, enum { ide_unknown, ide_generic, ide_pci,
ide_cmd640, ide_dtc2278, ide_ali14xx, ide_cmd640, ide_dtc2278, ide_ali14xx,
ide_qd65xx, ide_umc8672, ide_ht6560b, ide_qd65xx, ide_umc8672, ide_ht6560b,
ide_rz1000, ide_trm290, ide_4drives, ide_pmac, ide_acorn,
ide_cmd646, ide_cy82c693, ide_4drives,
ide_pmac, ide_acorn,
ide_au1xxx, ide_palm3710 ide_au1xxx, ide_palm3710
}; };
...@@ -496,8 +492,6 @@ enum { ...@@ -496,8 +492,6 @@ enum {
* when more than one interrupt is needed. * when more than one interrupt is needed.
*/ */
IDE_AFLAG_LIMIT_NFRAMES = (1 << 7), IDE_AFLAG_LIMIT_NFRAMES = (1 << 7),
/* Seeking in progress. */
IDE_AFLAG_SEEKING = (1 << 8),
/* Saved TOC information is current. */ /* Saved TOC information is current. */
IDE_AFLAG_TOC_VALID = (1 << 9), IDE_AFLAG_TOC_VALID = (1 << 9),
/* We think that the drive door is locked. */ /* We think that the drive door is locked. */
...@@ -845,8 +839,6 @@ typedef struct hwif_s { ...@@ -845,8 +839,6 @@ typedef struct hwif_s {
unsigned extra_ports; /* number of extra dma ports */ unsigned extra_ports; /* number of extra dma ports */
unsigned present : 1; /* this interface exists */ unsigned present : 1; /* this interface exists */
unsigned serialized : 1; /* serialized all channel operation */
unsigned sharing_irq: 1; /* 1 = sharing irq with another hwif */
unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */ unsigned sg_mapped : 1; /* sg_table and sg_nents are ready */
struct device gendev; struct device gendev;
...@@ -909,6 +901,8 @@ typedef struct hwgroup_s { ...@@ -909,6 +901,8 @@ typedef struct hwgroup_s {
int req_gen; int req_gen;
int req_gen_timer; int req_gen_timer;
spinlock_t lock;
} ide_hwgroup_t; } ide_hwgroup_t;
typedef struct ide_driver_s ide_driver_t; typedef struct ide_driver_s ide_driver_t;
...@@ -1122,6 +1116,14 @@ enum { ...@@ -1122,6 +1116,14 @@ enum {
IDE_PM_COMPLETED, IDE_PM_COMPLETED,
}; };
int generic_ide_suspend(struct device *, pm_message_t);
int generic_ide_resume(struct device *);
void ide_complete_power_step(ide_drive_t *, struct request *);
ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *);
void ide_complete_pm_request(ide_drive_t *, struct request *);
void ide_check_pm_state(ide_drive_t *, struct request *);
/* /*
* Subdrivers support. * Subdrivers support.
* *
...@@ -1376,8 +1378,8 @@ enum { ...@@ -1376,8 +1378,8 @@ enum {
IDE_HFLAG_LEGACY_IRQS = (1 << 21), IDE_HFLAG_LEGACY_IRQS = (1 << 21),
/* force use of legacy IRQs */ /* force use of legacy IRQs */
IDE_HFLAG_FORCE_LEGACY_IRQS = (1 << 22), IDE_HFLAG_FORCE_LEGACY_IRQS = (1 << 22),
/* limit LBA48 requests to 256 sectors */ /* host is TRM290 */
IDE_HFLAG_RQSIZE_256 = (1 << 23), IDE_HFLAG_TRM290 = (1 << 23),
/* use 32-bit I/O ops */ /* use 32-bit I/O ops */
IDE_HFLAG_IO_32BIT = (1 << 24), IDE_HFLAG_IO_32BIT = (1 << 24),
/* unmask IRQs */ /* unmask IRQs */
...@@ -1415,6 +1417,9 @@ struct ide_port_info { ...@@ -1415,6 +1417,9 @@ struct ide_port_info {
ide_pci_enablebit_t enablebits[2]; ide_pci_enablebit_t enablebits[2];
hwif_chipset_t chipset; hwif_chipset_t chipset;
u16 max_sectors; /* if < than the default one */
u32 host_flags; u32 host_flags;
u8 pio_mask; u8 pio_mask;
u8 swdma_mask; u8 swdma_mask;
...@@ -1610,13 +1615,13 @@ extern struct mutex ide_cfg_mtx; ...@@ -1610,13 +1615,13 @@ extern struct mutex ide_cfg_mtx;
/* /*
* Structure locking: * Structure locking:
* *
* ide_cfg_mtx and ide_lock together protect changes to * ide_cfg_mtx and hwgroup->lock together protect changes to
* ide_hwif_t->{next,hwgroup} * ide_hwif_t->next
* ide_drive_t->next * ide_drive_t->next
* *
* ide_hwgroup_t->busy: ide_lock * ide_hwgroup_t->busy: hwgroup->lock
* ide_hwgroup_t->hwif: ide_lock * ide_hwgroup_t->hwif: hwgroup->lock
* ide_hwif_t->mate: constant, no locking * ide_hwif_t->{hwgroup,mate}: constant, no locking
* ide_drive_t->hwif: constant, no locking * ide_drive_t->hwif: constant, no locking
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment