ide: add struct ide_dma_ops (take 3)

Add struct ide_dma_ops and convert core code + drivers to use it.

While at it:

* Drop "ide_" prefix from ->ide_dma_end and ->ide_dma_test_irq methods.

* Drop "ide_" "infixes" from DMA methods.

* au1xxx-ide.c:
  - use auide_dma_{test_irq,end}() directly in auide_dma_timeout()

* pdc202xx_old.c:
  - drop "old_" "infixes" from DMA methods

* siimage.c:
  - add siimage_dma_test_irq() helper
  - print SATA warning in siimage_init_one()

* Remove no longer needed ->init_hwif implementations.

v2:
* Changes based on review from Sergei:
  - s/siimage_ide_dma_test_irq/siimage_dma_test_irq/
  - s/drive->hwif/hwif/ in idefloppy_pc_intr().
  - fix patch description w.r.t. au1xxx-ide changes
  - fix au1xxx-ide build
  - fix naming for cmd64*_dma_ops
  - drop "ide_" and "old_" infixes
  - s/hpt3xxx_dma_ops/hpt37x_dma_ops/
  - s/hpt370x_dma_ops/hpt370_dma_ops/
  - use correct DMA ops for HPT302/N, HPT371/N and HPT374
  - s/it821x_smart_dma_ops/it821x_pass_through_dma_ops/

v3:
* Two bugs slipped in v2 (noticed by Sergei):
  - use correct DMA ops for HPT374 (for real this time)
  - handle HPT370/HPT370A properly
Acked-by: default avatarSergei Shtylyov <sshtylyov@ru.mvista.com>
Signed-off-by: default avatarBartlomiej Zolnierkiewicz <bzolnier@gmail.com>
parent 1fd18905
......@@ -389,17 +389,21 @@ static int icside_dma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
hwif->dmatable_cpu = NULL;
hwif->dmatable_dma = 0;
hwif->dma_host_set = icside_dma_host_set;
hwif->dma_setup = icside_dma_setup;
hwif->dma_exec_cmd = icside_dma_exec_cmd;
hwif->dma_start = icside_dma_start;
hwif->ide_dma_end = icside_dma_end;
hwif->ide_dma_test_irq = icside_dma_test_irq;
hwif->dma_timeout = icside_dma_timeout;
hwif->dma_lost_irq = icside_dma_lost_irq;
return 0;
}
static struct ide_dma_ops icside_v6_dma_ops = {
.dma_host_set = icside_dma_host_set,
.dma_setup = icside_dma_setup,
.dma_exec_cmd = icside_dma_exec_cmd,
.dma_start = icside_dma_start,
.dma_end = icside_dma_end,
.dma_test_irq = icside_dma_test_irq,
.dma_timeout = icside_dma_timeout,
.dma_lost_irq = icside_dma_lost_irq,
};
#else
#define icside_v6_dma_ops NULL
#endif
static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
......@@ -475,6 +479,7 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec)
static const struct ide_port_info icside_v6_port_info __initdata = {
.init_dma = icside_dma_off_init,
.port_ops = &icside_v6_no_dma_port_ops,
.dma_ops = &icside_v6_dma_ops,
.host_flags = IDE_HFLAG_SERIALIZE |
IDE_HFLAG_NO_AUTOTUNE,
.mwdma_mask = ATA_MWDMA2,
......@@ -550,6 +555,7 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
if (ec->dma != NO_DMA && !request_dma(ec->dma, hwif->name)) {
d.init_dma = icside_dma_init;
d.port_ops = &icside_v6_dma_port_ops;
d.dma_ops = NULL;
}
idx[0] = hwif->index;
......
......@@ -328,7 +328,7 @@ static int __devinit palm_bk3710_init_dma(ide_hwif_t *hwif,
if (ide_allocate_dma_engine(hwif))
return -1;
ide_setup_dma(hwif, base);
ide_setup_dma(hwif, base, d);
return 0;
}
......
......@@ -673,11 +673,6 @@ cris_ide_inb(unsigned long reg)
return (unsigned char)cris_ide_inw(reg);
}
static int cris_dma_end (ide_drive_t *drive);
static int cris_dma_setup (ide_drive_t *drive);
static void cris_dma_exec_cmd (ide_drive_t *drive, u8 command);
static int cris_dma_test_irq(ide_drive_t *drive);
static void cris_dma_start(ide_drive_t *drive);
static void cris_ide_input_data (ide_drive_t *drive, void *, unsigned int);
static void cris_ide_output_data (ide_drive_t *drive, void *, unsigned int);
static void cris_atapi_input_bytes(ide_drive_t *drive, void *, unsigned int);
......@@ -787,9 +782,12 @@ static const struct ide_port_ops cris_port_ops = {
.set_dma_mode = cris_set_dma_mode,
};
static struct ide_dma_ops cris_dma_ops;
static const struct ide_port_info cris_port_info __initdata = {
.chipset = ide_etrax100,
.port_ops = &cris_port_ops,
.dma_ops = &cris_dma_ops,
.host_flags = IDE_HFLAG_NO_ATAPI_DMA |
IDE_HFLAG_NO_DMA, /* no SFF-style DMA */
.pio_mask = ATA_PIO4,
......@@ -820,12 +818,6 @@ static int __init init_e100_ide(void)
hwif->ata_output_data = &cris_ide_output_data;
hwif->atapi_input_bytes = &cris_atapi_input_bytes;
hwif->atapi_output_bytes = &cris_atapi_output_bytes;
hwif->dma_host_set = &cris_dma_host_set;
hwif->ide_dma_end = &cris_dma_end;
hwif->dma_setup = &cris_dma_setup;
hwif->dma_exec_cmd = &cris_dma_exec_cmd;
hwif->ide_dma_test_irq = &cris_dma_test_irq;
hwif->dma_start = &cris_dma_start;
hwif->OUTB = &cris_ide_outb;
hwif->OUTW = &cris_ide_outw;
hwif->OUTBSYNC = &cris_ide_outbsync;
......@@ -1080,6 +1072,15 @@ static void cris_dma_start(ide_drive_t *drive)
}
}
static struct ide_dma_ops cris_dma_ops = {
.dma_host_set = cris_dma_host_set,
.dma_setup = cris_dma_setup,
.dma_exec_cmd = cris_dma_exec_cmd,
.dma_start = cris_dma_start,
.dma_end = cris_dma_end,
.dma_test_irq = cris_dma_test_irq,
};
module_init(init_e100_ide);
MODULE_LICENSE("GPL");
......@@ -539,7 +539,7 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
/* FIXME: for Virtual DMA we must check harder */
if (info->dma)
info->dma = !hwif->dma_setup(drive);
info->dma = !hwif->dma_ops->dma_setup(drive);
/* set up the controller registers */
ide_pktcmd_tf_load(drive, IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL |
......@@ -617,7 +617,7 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
/* start the DMA if need be */
if (info->dma)
hwif->dma_start(drive);
hwif->dma_ops->dma_start(drive);
return ide_started;
}
......@@ -929,7 +929,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
dma = info->dma;
if (dma) {
info->dma = 0;
dma_error = HWIF(drive)->ide_dma_end(drive);
dma_error = hwif->dma_ops->dma_end(drive);
if (dma_error) {
printk(KERN_ERR "%s: DMA %s error\n", drive->name,
write ? "write" : "read");
......
......@@ -102,7 +102,7 @@ ide_startstop_t ide_dma_intr (ide_drive_t *drive)
{
u8 stat = 0, dma_stat = 0;
dma_stat = HWIF(drive)->ide_dma_end(drive);
dma_stat = drive->hwif->dma_ops->dma_end(drive);
stat = ide_read_status(drive);
if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
......@@ -394,7 +394,7 @@ void ide_dma_off_quietly(ide_drive_t *drive)
drive->using_dma = 0;
ide_toggle_bounce(drive, 0);
drive->hwif->dma_host_set(drive, 0);
drive->hwif->dma_ops->dma_host_set(drive, 0);
}
EXPORT_SYMBOL(ide_dma_off_quietly);
......@@ -427,7 +427,7 @@ void ide_dma_on(ide_drive_t *drive)
drive->using_dma = 1;
ide_toggle_bounce(drive, 1);
drive->hwif->dma_host_set(drive, 1);
drive->hwif->dma_ops->dma_host_set(drive, 1);
}
#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
......@@ -802,10 +802,10 @@ void ide_dma_timeout (ide_drive_t *drive)
printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
if (hwif->ide_dma_test_irq(drive))
if (hwif->dma_ops->dma_test_irq(drive))
return;
hwif->ide_dma_end(drive);
hwif->dma_ops->dma_end(drive);
}
EXPORT_SYMBOL(ide_dma_timeout);
......@@ -839,8 +839,21 @@ int ide_allocate_dma_engine(ide_hwif_t *hwif)
}
EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
static struct ide_dma_ops sff_dma_ops = {
.dma_host_set = ide_dma_host_set,
.dma_setup = ide_dma_setup,
.dma_exec_cmd = ide_dma_exec_cmd,
.dma_start = ide_dma_start,
.dma_end = __ide_dma_end,
.dma_test_irq = __ide_dma_test_irq,
.dma_timeout = ide_dma_timeout,
.dma_lost_irq = ide_dma_lost_irq,
};
void ide_setup_dma(ide_hwif_t *hwif, unsigned long base,
const struct ide_port_info *d)
{
struct ide_dma_ops *dma_ops = d->dma_ops ? d->dma_ops : &sff_dma_ops;
hwif->dma_base = base;
if (!hwif->dma_command)
......@@ -854,22 +867,24 @@ void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
if (!hwif->dma_prdtable)
hwif->dma_prdtable = hwif->dma_base + 4;
if (!hwif->dma_host_set)
hwif->dma_host_set = &ide_dma_host_set;
if (!hwif->dma_setup)
hwif->dma_setup = &ide_dma_setup;
if (!hwif->dma_exec_cmd)
hwif->dma_exec_cmd = &ide_dma_exec_cmd;
if (!hwif->dma_start)
hwif->dma_start = &ide_dma_start;
if (!hwif->ide_dma_end)
hwif->ide_dma_end = &__ide_dma_end;
if (!hwif->ide_dma_test_irq)
hwif->ide_dma_test_irq = &__ide_dma_test_irq;
if (!hwif->dma_timeout)
hwif->dma_timeout = &ide_dma_timeout;
if (!hwif->dma_lost_irq)
hwif->dma_lost_irq = &ide_dma_lost_irq;
hwif->dma_ops = dma_ops;
if (dma_ops->dma_host_set == NULL)
dma_ops->dma_host_set = ide_dma_host_set;
if (dma_ops->dma_setup == NULL)
dma_ops->dma_setup = ide_dma_setup;
if (dma_ops->dma_exec_cmd == NULL)
dma_ops->dma_exec_cmd = ide_dma_exec_cmd;
if (dma_ops->dma_start == NULL)
dma_ops->dma_start = ide_dma_start;
if (dma_ops->dma_end == NULL)
dma_ops->dma_end = __ide_dma_end;
if (dma_ops->dma_test_irq == NULL)
dma_ops->dma_test_irq = __ide_dma_test_irq;
if (dma_ops->dma_timeout == NULL)
dma_ops->dma_timeout = ide_dma_timeout;
if (dma_ops->dma_lost_irq == NULL)
dma_ops->dma_lost_irq = ide_dma_lost_irq;
}
EXPORT_SYMBOL_GPL(ide_setup_dma);
......
......@@ -411,7 +411,7 @@ static ide_startstop_t idefloppy_pc_intr(ide_drive_t *drive)
debug_log("Reached %s interrupt handler\n", __func__);
if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
dma_error = hwif->ide_dma_end(drive);
dma_error = hwif->dma_ops->dma_end(drive);
if (dma_error) {
printk(KERN_ERR "%s: DMA %s error\n", drive->name,
rq_data_dir(rq) ? "write" : "read");
......@@ -663,7 +663,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
dma = 0;
if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma)
dma = !hwif->dma_setup(drive);
dma = !hwif->dma_ops->dma_setup(drive);
ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
IDE_TFLAG_OUT_DEVICE, bcount, dma);
......@@ -671,7 +671,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
if (dma) {
/* Begin DMA, if necessary */
pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
hwif->dma_start(drive);
hwif->dma_ops->dma_start(drive);
}
/* Can we transfer the packet when we get the interrupt or wait? */
......
......@@ -218,7 +218,7 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
* we could be smarter and check for current xfer_speed
* in struct drive etc...
*/
if (drive->hwif->dma_host_set == NULL)
if (drive->hwif->dma_ops == NULL)
break;
/*
* TODO: respect ->using_dma setting
......@@ -1238,12 +1238,12 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
if (error < 0) {
printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
(void)HWIF(drive)->ide_dma_end(drive);
(void)hwif->dma_ops->dma_end(drive);
ret = ide_error(drive, "dma timeout error",
ide_read_status(drive));
} else {
printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
hwif->dma_timeout(drive);
hwif->dma_ops->dma_timeout(drive);
}
/*
......@@ -1355,7 +1355,7 @@ void ide_timer_expiry (unsigned long data)
startstop = handler(drive);
} else if (drive_is_ready(drive)) {
if (drive->waiting_for_dma)
hwgroup->hwif->dma_lost_irq(drive);
hwif->dma_ops->dma_lost_irq(drive);
(void)ide_ack_intr(hwif);
printk(KERN_WARNING "%s: lost interrupt\n", drive->name);
startstop = handler(drive);
......
......@@ -432,7 +432,7 @@ int drive_is_ready (ide_drive_t *drive)
u8 stat = 0;
if (drive->waiting_for_dma)
return hwif->ide_dma_test_irq(drive);
return hwif->dma_ops->dma_test_irq(drive);
#if 0
/* need to guarantee 400ns since last command was issued */
......@@ -703,8 +703,8 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
// msleep(50);
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_host_set) /* check if host supports DMA */
hwif->dma_host_set(drive, 0);
if (hwif->dma_ops) /* check if host supports DMA */
hwif->dma_ops->dma_host_set(drive, 0);
#endif
/* Skip setting PIO flow-control modes on pre-EIDE drives */
......@@ -762,8 +762,8 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
#ifdef CONFIG_BLK_DEV_IDEDMA
if ((speed >= XFER_SW_DMA_0 || (hwif->host_flags & IDE_HFLAG_VDMA)) &&
drive->using_dma)
hwif->dma_host_set(drive, 1);
else if (hwif->dma_host_set) /* check if host supports DMA */
hwif->dma_ops->dma_host_set(drive, 1);
else if (hwif->dma_ops) /* check if host supports DMA */
ide_dma_off_quietly(drive);
#endif
......
......@@ -843,7 +843,7 @@ static void ide_port_tune_devices(ide_hwif_t *hwif)
drive->nice1 = 1;
if (hwif->dma_host_set)
if (hwif->dma_ops)
ide_set_dma(drive);
}
}
......@@ -1390,7 +1390,8 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
hwif->swdma_mask = 0;
hwif->mwdma_mask = 0;
hwif->ultra_mask = 0;
}
} else if (d->dma_ops)
hwif->dma_ops = d->dma_ops;
}
if (d->host_flags & IDE_HFLAG_RQSIZE_256)
......
......@@ -993,7 +993,7 @@ static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
stat = ide_read_status(drive);
if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
if (hwif->ide_dma_end(drive) || (stat & ERR_STAT)) {
if (hwif->dma_ops->dma_end(drive) || (stat & ERR_STAT)) {
/*
* A DMA error is sometimes expected. For example,
* if the tape is crossing a filemark during a
......@@ -1213,7 +1213,7 @@ static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
#ifdef CONFIG_BLK_DEV_IDEDMA
/* Begin DMA, if necessary */
if (pc->flags & PC_FLAG_DMA_IN_PROGRESS)
hwif->dma_start(drive);
hwif->dma_ops->dma_start(drive);
#endif
/* Send the actual packet */
HWIF(drive)->atapi_output_bytes(drive, pc->c, 12);
......@@ -1279,7 +1279,7 @@ static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
ide_dma_off(drive);
}
if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma)
dma_ok = !hwif->dma_setup(drive);
dma_ok = !hwif->dma_ops->dma_setup(drive);
ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
IDE_TFLAG_OUT_DEVICE, bcount, dma_ok);
......
......@@ -135,6 +135,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
ide_hwif_t *hwif = HWIF(drive);
struct ide_taskfile *tf = &task->tf;
ide_handler_t *handler = NULL;
struct ide_dma_ops *dma_ops = hwif->dma_ops;
if (task->data_phase == TASKFILE_MULTI_IN ||
task->data_phase == TASKFILE_MULTI_OUT) {
......@@ -178,10 +179,10 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
return ide_started;
default:
if (task_dma_ok(task) == 0 || drive->using_dma == 0 ||
hwif->dma_setup(drive))
dma_ops->dma_setup(drive))
return ide_stopped;
hwif->dma_exec_cmd(drive, tf->command);
hwif->dma_start(drive);
dma_ops->dma_exec_cmd(drive, tf->command);
dma_ops->dma_start(drive);
return ide_started;
}
}
......
......@@ -469,7 +469,7 @@ int set_using_dma(ide_drive_t *drive, int arg)
if (!drive->id || !(drive->id->capability & 1))
goto out;
if (hwif->dma_host_set == NULL)
if (hwif->dma_ops == NULL)
goto out;
err = -EBUSY;
......
......@@ -367,20 +367,30 @@ static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 de
dev->dev_flags = flags;
}
#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
static void auide_dma_timeout(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
if (hwif->ide_dma_test_irq(drive))
if (auide_dma_test_irq(drive))
return;
hwif->ide_dma_end(drive);
auide_dma_end(drive);
}
static struct ide_dma_ops au1xxx_dma_ops = {
.dma_host_set = auide_dma_host_set,
.dma_setup = auide_dma_setup,
.dma_exec_cmd = auide_dma_exec_cmd,
.dma_start = auide_dma_start,
.dma_end = auide_dma_end,
.dma_test_irq = auide_dma_test_irq,
.dma_lost_irq = auide_dma_lost_irq,
.dma_timeout = auide_dma_timeout,
};
static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
{
_auide_hwif *auide = (_auide_hwif *)hwif->hwif_data;
......@@ -511,6 +521,9 @@ static const struct ide_port_ops au1xxx_port_ops = {
static const struct ide_port_info au1xxx_port_info = {
.init_dma = auide_ddma_init,
.port_ops = &au1xxx_port_ops,
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
.dma_ops = &au1xxx_dma_ops,
#endif
.host_flags = IDE_HFLAG_POST_SET_MODE |
IDE_HFLAG_NO_IO_32BIT |
IDE_HFLAG_UNMASK_IRQS,
......@@ -588,16 +601,6 @@ static int au_ide_probe(struct device *dev)
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA
hwif->INSW = auide_insw;
hwif->OUTSW = auide_outsw;
#endif
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
hwif->dma_timeout = &auide_dma_timeout;
hwif->dma_host_set = &auide_dma_host_set;
hwif->dma_exec_cmd = &auide_dma_exec_cmd;
hwif->dma_start = &auide_dma_start;
hwif->ide_dma_end = &auide_dma_end;
hwif->dma_setup = &auide_dma_setup;
hwif->ide_dma_test_irq = &auide_dma_test_irq;
hwif->dma_lost_irq = &auide_dma_lost_irq;
#endif
hwif->select_data = 0; /* no chipset-specific code */
hwif->config_data = 0; /* no chipset-specific code */
......
......@@ -652,21 +652,7 @@ static u8 __devinit ali_cable_detect(ide_hwif_t *hwif)
return cbl;
}
/**
* init_hwif_common_ali15x3 - Set up ALI IDE hardware
* @hwif: IDE interface
*
* Initialize the IDE structure side of the ALi 15x3 driver.
*/
static void __devinit init_hwif_common_ali15x3 (ide_hwif_t *hwif)
{
if (hwif->dma_base == 0)
return;
hwif->dma_setup = &ali15x3_dma_setup;
}
#ifndef CONFIG_SPARC64
/**
* init_hwif_ali15x3 - Initialize the ALI IDE x86 stuff
* @hwif: interface to configure
......@@ -716,9 +702,8 @@ static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif)
if(irq >= 0)
hwif->irq = irq;
}
init_hwif_common_ali15x3(hwif);
}
#endif
/**
* init_dma_ali15x3 - set up DMA on ALi15x3
......@@ -746,7 +731,7 @@ static int __devinit init_dma_ali15x3(ide_hwif_t *hwif,
if (ide_allocate_dma_engine(hwif))
return -1;
ide_setup_dma(hwif, base);
ide_setup_dma(hwif, base, d);
return 0;
}
......@@ -758,10 +743,16 @@ static const struct ide_port_ops ali_port_ops = {
.cable_detect = ali_cable_detect,
};
static struct ide_dma_ops ali_dma_ops = {
.dma_setup = ali15x3_dma_setup,
};
static const struct ide_port_info ali15x3_chipset __devinitdata = {
.name = "ALI15X3",
.init_chipset = init_chipset_ali15x3,
#ifndef CONFIG_SPARC64
.init_hwif = init_hwif_ali15x3,
#endif
.init_dma = init_dma_ali15x3,
.port_ops = &ali_port_ops,
.pio_mask = ATA_PIO5,
......@@ -806,6 +797,8 @@ static int __devinit alim15x3_init_one(struct pci_dev *dev, const struct pci_dev
d.udma_mask = ATA_UDMA5;
else
d.udma_mask = ATA_UDMA6;
d.dma_ops = &ali_dma_ops;
} else {
d.host_flags |= IDE_HFLAG_NO_DMA;
......@@ -815,9 +808,6 @@ static int __devinit alim15x3_init_one(struct pci_dev *dev, const struct pci_dev
if (idx == 0)
d.host_flags |= IDE_HFLAG_CLEAR_SIMPLEX;
#if defined(CONFIG_SPARC64)
d.init_hwif = init_hwif_common_ali15x3;
#endif /* CONFIG_SPARC64 */
return ide_setup_pci_device(dev, &d);
}
......
......@@ -223,7 +223,7 @@ static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed)
(void) pci_write_config_byte(dev, pciU, regU);
}
static int cmd648_ide_dma_end (ide_drive_t *drive)
static int cmd648_dma_end(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
unsigned long base = hwif->dma_base - (hwif->channel * 8);
......@@ -239,7 +239,7 @@ static int cmd648_ide_dma_end (ide_drive_t *drive)
return err;
}
static int cmd64x_ide_dma_end (ide_drive_t *drive)
static int cmd64x_dma_end(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = to_pci_dev(hwif->dev);
......@@ -256,7 +256,7 @@ static int cmd64x_ide_dma_end (ide_drive_t *drive)
return err;
}
static int cmd648_ide_dma_test_irq (ide_drive_t *drive)
static int cmd648_dma_test_irq(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
unsigned long base = hwif->dma_base - (hwif->channel * 8);
......@@ -279,7 +279,7 @@ static int cmd648_ide_dma_test_irq (ide_drive_t *drive)
return 0;
}
static int cmd64x_ide_dma_test_irq (ide_drive_t *drive)
static int cmd64x_dma_test_irq(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = to_pci_dev(hwif->dev);
......@@ -310,7 +310,7 @@ static int cmd64x_ide_dma_test_irq (ide_drive_t *drive)
* event order for DMA transfers.
*/
static int cmd646_1_ide_dma_end (ide_drive_t *drive)
static int cmd646_1_dma_end(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
u8 dma_stat = 0, dma_cmd = 0;
......@@ -385,62 +385,33 @@ static u8 __devinit cmd64x_cable_detect(ide_hwif_t *hwif)
}
}
static void __devinit init_hwif_cmd64x(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
if (!hwif->dma_base)
return;
/*
* UltraDMA only supported on PCI646U and PCI646U2, which
* correspond to revisions 0x03, 0x05 and 0x07 respectively.
* Actually, although the CMD tech support people won't
* tell me the details, the 0x03 revision cannot support
* UDMA correctly without hardware modifications, and even
* then it only works with Quantum disks due to some
* hold time assumptions in the 646U part which are fixed
* in the 646U2.
*
* So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
*/
if (dev->device == PCI_DEVICE_ID_CMD_646 && dev->revision < 5)
hwif->ultra_mask = 0x00;
switch (dev->device) {
case PCI_DEVICE_ID_CMD_648:
case PCI_DEVICE_ID_CMD_649:
alt_irq_bits:
hwif->ide_dma_end = &cmd648_ide_dma_end;
hwif->ide_dma_test_irq = &cmd648_ide_dma_test_irq;
break;
case PCI_DEVICE_ID_CMD_646:
if (dev->revision == 0x01) {
hwif->ide_dma_end = &cmd646_1_ide_dma_end;
break;
} else if (dev->revision >= 0x03)
goto alt_irq_bits;
/* fall thru */
default:
hwif->ide_dma_end = &cmd64x_ide_dma_end;
hwif->ide_dma_test_irq = &cmd64x_ide_dma_test_irq;
break;
}
}
static const struct ide_port_ops cmd64x_port_ops = {
.set_pio_mode = cmd64x_set_pio_mode,
.set_dma_mode = cmd64x_set_dma_mode,
.cable_detect = cmd64x_cable_detect,
};
static struct ide_dma_ops cmd64x_dma_ops = {
.dma_end = cmd64x_dma_end,
.dma_test_irq = cmd64x_dma_test_irq,
};
static struct ide_dma_ops cmd646_rev1_dma_ops = {
.dma_end = cmd646_1_dma_end,
};
static struct ide_dma_ops cmd648_dma_ops = {
.dma_end = cmd648_dma_end,
.dma_test_irq = cmd648_dma_test_irq,
};
static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
{ /* 0 */
.name = "CMD643",
.init_chipset = init_chipset_cmd64x,
.init_hwif = init_hwif_cmd64x,
.enablebits = {{0x00,0x00,0x00}, {0x51,0x08,0x08}},
.port_ops = &cmd64x_port_ops,
.dma_ops = &cmd64x_dma_ops,
.host_flags = IDE_HFLAG_CLEAR_SIMPLEX |
IDE_HFLAG_ABUSE_PREFETCH,
.pio_mask = ATA_PIO5,
......@@ -449,10 +420,10 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
},{ /* 1 */
.name = "CMD646",
.init_chipset = init_chipset_cmd64x,
.init_hwif = init_hwif_cmd64x,
.enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
.chipset = ide_cmd646,
.port_ops = &cmd64x_port_ops,
.dma_ops = &cmd648_dma_ops,
.host_flags = IDE_HFLAG_ABUSE_PREFETCH,
.pio_mask = ATA_PIO5,
.mwdma_mask = ATA_MWDMA2,
......@@ -460,9 +431,9 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
},{ /* 2 */
.name = "CMD648",
.init_chipset = init_chipset_cmd64x,
.init_hwif = init_hwif_cmd64x,
.enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
.port_ops = &cmd64x_port_ops,
.dma_ops = &cmd648_dma_ops,
.host_flags = IDE_HFLAG_ABUSE_PREFETCH,
.pio_mask = ATA_PIO5,
.mwdma_mask = ATA_MWDMA2,
......@@ -470,9 +441,9 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
},{ /* 3 */
.name = "CMD649",
.init_chipset = init_chipset_cmd64x,
.init_hwif = init_hwif_cmd64x,
.enablebits = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
.port_ops = &cmd64x_port_ops,
.dma_ops = &cmd648_dma_ops,
.host_flags = IDE_HFLAG_ABUSE_PREFETCH,
.pio_mask = ATA_PIO5,
.mwdma_mask = ATA_MWDMA2,
......@@ -487,12 +458,35 @@ static int __devinit cmd64x_init_one(struct pci_dev *dev, const struct pci_devic
d = cmd64x_chipsets[idx];
if (idx == 1) {
/*
* UltraDMA only supported on PCI646U and PCI646U2, which
* correspond to revisions 0x03, 0x05 and 0x07 respectively.
* Actually, although the CMD tech support people won't
* tell me the details, the 0x03 revision cannot support
* UDMA correctly without hardware modifications, and even
* then it only works with Quantum disks due to some
* hold time assumptions in the 646U part which are fixed
* in the 646U2.
*
* So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
*/
if (dev->revision < 5) {
d.udma_mask = 0x00;
/*
* The original PCI0646 didn't have the primary channel enable bit,
* it appeared starting with PCI0646U (i.e. revision ID 3).
* The original PCI0646 didn't have the primary
* channel enable bit, it appeared starting with
* PCI0646U (i.e. revision ID 3).
*/
if (idx == 1 && dev->revision < 3)
if (dev->revision < 3) {
d.enablebits[0].reg = 0;
if (dev->revision == 1)
d.dma_ops = &cmd646_rev1_dma_ops;
else
d.dma_ops = &cmd64x_dma_ops;
}
}
}
return ide_setup_pci_device(dev, &d);
}
......
......@@ -103,24 +103,20 @@ static void cs5520_dma_host_set(ide_drive_t *drive, int on)
ide_dma_host_set(drive, on);
}
static void __devinit init_hwif_cs5520(ide_hwif_t *hwif)
{
if (hwif->dma_base == 0)
return;
hwif->dma_host_set = &cs5520_dma_host_set;
}
static const struct ide_port_ops cs5520_port_ops = {
.set_pio_mode = cs5520_set_pio_mode,
.set_dma_mode = cs5520_set_dma_mode,
};
static struct ide_dma_ops cs5520_dma_ops = {
.dma_host_set = cs5520_dma_host_set,
};
#define DECLARE_CS_DEV(name_str) \
{ \
.name = name_str, \
.init_hwif = init_hwif_cs5520, \
.port_ops = &cs5520_port_ops, \
.dma_ops = &cs5520_dma_ops, \
.host_flags = IDE_HFLAG_ISA_PORTS | \
IDE_HFLAG_CS5520 | \
IDE_HFLAG_VDMA | \
......
......@@ -808,7 +808,7 @@ static void hpt370_irq_timeout(ide_drive_t *drive)
hpt370_clear_engine(drive);
}
static void hpt370_ide_dma_start(ide_drive_t *drive)
static void hpt370_dma_start(ide_drive_t *drive)
{
#ifdef HPT_RESET_STATE_ENGINE
hpt370_clear_engine(drive);
......@@ -816,7 +816,7 @@ static void hpt370_ide_dma_start(ide_drive_t *drive)
ide_dma_start(drive);
}
static int hpt370_ide_dma_end(ide_drive_t *drive)
static int hpt370_dma_end(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
u8 dma_stat = inb(hwif->dma_status);
......@@ -838,7 +838,7 @@ static void hpt370_dma_timeout(ide_drive_t *drive)
}
/* returns 1 if DMA IRQ issued, 0 otherwise */
static int hpt374_ide_dma_test_irq(ide_drive_t *drive)
static int hpt374_dma_test_irq(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = to_pci_dev(hwif->dev);
......@@ -862,7 +862,7 @@ static int hpt374_ide_dma_test_irq(ide_drive_t *drive)
return 0;
}
static int hpt374_ide_dma_end(ide_drive_t *drive)
static int hpt374_dma_end(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = to_pci_dev(hwif->dev);
......@@ -1312,19 +1312,6 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
if (new_mcr != old_mcr)
pci_write_config_byte(dev, hwif->select_data + 1, new_mcr);
if (hwif->dma_base == 0)
return;
if (chip_type >= HPT374) {
hwif->ide_dma_test_irq = &hpt374_ide_dma_test_irq;
hwif->ide_dma_end = &hpt374_ide_dma_end;
} else if (chip_type >= HPT370) {
hwif->dma_start = &hpt370_ide_dma_start;
hwif->ide_dma_end = &hpt370_ide_dma_end;
hwif->dma_timeout = &hpt370_dma_timeout;
} else
hwif->dma_lost_irq = &hpt366_dma_lost_irq;
}
static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
......@@ -1360,7 +1347,7 @@ static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
if (ide_allocate_dma_engine(hwif))
return -1;
ide_setup_dma(hwif, base);
ide_setup_dma(hwif, base, d);
return 0;
}
......@@ -1428,6 +1415,21 @@ static const struct ide_port_ops hpt3xx_port_ops = {
.cable_detect = hpt3xx_cable_detect,
};
static struct ide_dma_ops hpt37x_dma_ops = {
.dma_end = hpt374_dma_end,
.dma_test_irq = hpt374_dma_test_irq,
};
static struct ide_dma_ops hpt370_dma_ops = {
.dma_start = hpt370_dma_start,
.dma_end = hpt370_dma_end,
.dma_timeout = hpt370_dma_timeout,
};
static struct ide_dma_ops hpt36x_dma_ops = {
.dma_lost_irq = hpt366_dma_lost_irq,
};
static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
{ /* 0 */
.name = "HPT36x",
......@@ -1442,6 +1444,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
*/
.enablebits = {{0x50,0x10,0x10}, {0x54,0x04,0x04}},
.port_ops = &hpt3xx_port_ops,
.dma_ops = &hpt36x_dma_ops,
.host_flags = IDE_HFLAGS_HPT3XX | IDE_HFLAG_SINGLE,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
......@@ -1452,6 +1455,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
.init_dma = init_dma_hpt366,
.enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
.port_ops = &hpt3xx_port_ops,
.dma_ops = &hpt37x_dma_ops,
.host_flags = IDE_HFLAGS_HPT3XX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
......@@ -1462,6 +1466,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
.init_dma = init_dma_hpt366,
.enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
.port_ops = &hpt3xx_port_ops,
.dma_ops = &hpt37x_dma_ops,
.host_flags = IDE_HFLAGS_HPT3XX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
......@@ -1472,6 +1477,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
.init_dma = init_dma_hpt366,
.enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
.port_ops = &hpt3xx_port_ops,
.dma_ops = &hpt37x_dma_ops,
.host_flags = IDE_HFLAGS_HPT3XX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
......@@ -1483,6 +1489,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
.enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
.udma_mask = ATA_UDMA5,
.port_ops = &hpt3xx_port_ops,
.dma_ops = &hpt37x_dma_ops,
.host_flags = IDE_HFLAGS_HPT3XX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
......@@ -1493,6 +1500,7 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
.init_dma = init_dma_hpt366,
.enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
.port_ops = &hpt3xx_port_ops,
.dma_ops = &hpt37x_dma_ops,
.host_flags = IDE_HFLAGS_HPT3XX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
......@@ -1555,6 +1563,10 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic
d.name = info->chip_name;
d.udma_mask = info->udma_mask;
/* fixup ->dma_ops for HPT370/HPT370A */
if (info == &hpt370 || info == &hpt370a)
d.dma_ops = &hpt370_dma_ops;
pci_set_drvdata(dev, (void *)info);
if (info == &hpt36x || info == &hpt374)
......
......@@ -511,6 +511,11 @@ static void __devinit it821x_quirkproc(ide_drive_t *drive)
}
static struct ide_dma_ops it821x_pass_through_dma_ops = {
.dma_start = it821x_dma_start,
.dma_end = it821x_dma_end,
};
/**
* init_hwif_it821x - set up hwif structs
* @hwif: interface to set up
......@@ -562,8 +567,7 @@ static void __devinit init_hwif_it821x(ide_hwif_t *hwif)
if (idev->smart == 0) {
/* MWDMA/PIO clock switching for pass through mode */
hwif->dma_start = &it821x_dma_start;
hwif->ide_dma_end = &it821x_dma_end;
hwif->dma_ops = &it821x_pass_through_dma_ops;
} else
hwif->host_flags |= IDE_HFLAG_NO_SET_MODE;
......
......@@ -150,7 +150,7 @@ static void ns87415_selectproc (ide_drive_t *drive)
ns87415_prepare_drive (drive, drive->using_dma);
}
static int ns87415_ide_dma_end (ide_drive_t *drive)
static int ns87415_dma_end(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
u8 dma_stat = 0, dma_cmd = 0;
......@@ -170,7 +170,7 @@ static int ns87415_ide_dma_end (ide_drive_t *drive)
return (dma_stat & 7) != 4;
}
static int ns87415_ide_dma_setup(ide_drive_t *drive)
static int ns87415_dma_setup(ide_drive_t *drive)
{
/* select DMA xfer */
ns87415_prepare_drive(drive, 1);
......@@ -252,14 +252,17 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
return;
outb(0x60, hwif->dma_status);
hwif->dma_setup = &ns87415_ide_dma_setup;
hwif->ide_dma_end = &ns87415_ide_dma_end;
}
static const struct ide_port_ops ns87415_port_ops = {
.selectproc = ns87415_selectproc,
};
static struct ide_dma_ops ns87415_dma_ops = {
.dma_setup = ns87415_dma_setup,
.dma_end = ns87415_dma_end,
};
static const struct ide_port_info ns87415_chipset __devinitdata = {
.name = "NS87415",
#ifdef CONFIG_SUPERIO
......@@ -267,6 +270,7 @@ static const struct ide_port_info ns87415_chipset __devinitdata = {
#endif
.init_hwif = init_hwif_ns87415,
.port_ops = &ns87415_port_ops,
.dma_ops = &ns87415_dma_ops,
.host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA |
IDE_HFLAG_NO_ATAPI_DMA,
};
......
......@@ -163,7 +163,7 @@ static void pdc202xx_quirkproc(ide_drive_t *drive)
drive->quirk_list = 0;
}
static void pdc202xx_old_ide_dma_start(ide_drive_t *drive)
static void pdc202xx_dma_start(ide_drive_t *drive)
{
if (drive->current_speed > XFER_UDMA_2)
pdc_old_enable_66MHz_clock(drive->hwif);
......@@ -185,7 +185,7 @@ static void pdc202xx_old_ide_dma_start(ide_drive_t *drive)
ide_dma_start(drive);
}
static int pdc202xx_old_ide_dma_end(ide_drive_t *drive)
static int pdc202xx_dma_end(ide_drive_t *drive)
{
if (drive->media != ide_disk || drive->addressing == 1) {
ide_hwif_t *hwif = HWIF(drive);
......@@ -202,7 +202,7 @@ static int pdc202xx_old_ide_dma_end(ide_drive_t *drive)
return __ide_dma_end(drive);
}
static int pdc202xx_old_ide_dma_test_irq(ide_drive_t *drive)
static int pdc202xx_dma_test_irq(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
unsigned long high_16 = hwif->extra_base - 16;
......@@ -263,23 +263,6 @@ static void pdc202xx_dma_timeout(ide_drive_t *drive)
ide_dma_timeout(drive);
}
static void __devinit init_hwif_pdc202xx(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
if (hwif->dma_base == 0)
return;
hwif->dma_lost_irq = &pdc202xx_dma_lost_irq;
hwif->dma_timeout = &pdc202xx_dma_timeout;
if (dev->device != PCI_DEVICE_ID_PROMISE_20246) {
hwif->dma_start = &pdc202xx_old_ide_dma_start;
hwif->ide_dma_end = &pdc202xx_old_ide_dma_end;
}
hwif->ide_dma_test_irq = &pdc202xx_old_ide_dma_test_irq;
}
static unsigned int __devinit init_chipset_pdc202xx(struct pci_dev *dev,
const char *name)
{
......@@ -346,12 +329,26 @@ static const struct ide_port_ops pdc2026x_port_ops = {
.cable_detect = pdc2026x_cable_detect,
};
static struct ide_dma_ops pdc20246_dma_ops = {
.dma_test_irq = pdc202xx_dma_test_irq,
.dma_lost_irq = pdc202xx_dma_lost_irq,
.dma_timeout = pdc202xx_dma_timeout,
};
static struct ide_dma_ops pdc2026x_dma_ops = {
.dma_start = pdc202xx_dma_start,
.dma_end = pdc202xx_dma_end,
.dma_test_irq = pdc202xx_dma_test_irq,
.dma_lost_irq = pdc202xx_dma_lost_irq,
.dma_timeout = pdc202xx_dma_timeout,
};
#define DECLARE_PDC2026X_DEV(name_str, udma, extra_flags) \
{ \
.name = name_str, \
.init_chipset = init_chipset_pdc202xx, \
.init_hwif = init_hwif_pdc202xx, \
.port_ops = &pdc2026x_port_ops, \
.dma_ops = &pdc2026x_dma_ops, \
.host_flags = IDE_HFLAGS_PDC202XX | extra_flags, \
.pio_mask = ATA_PIO4, \
.mwdma_mask = ATA_MWDMA2, \
......@@ -362,8 +359,8 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
{ /* 0 */
.name = "PDC20246",
.init_chipset = init_chipset_pdc202xx,
.init_hwif = init_hwif_pdc202xx,
.port_ops = &pdc20246_port_ops,
.dma_ops = &pdc20246_dma_ops,
.host_flags = IDE_HFLAGS_PDC202XX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
......
......@@ -165,7 +165,7 @@ static void sc1200_set_dma_mode(ide_drive_t *drive, const u8 mode)
*
* returns 1 on error, 0 otherwise
*/
static int sc1200_ide_dma_end (ide_drive_t *drive)
static int sc1200_dma_end(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
unsigned long dma_base = hwif->dma_base;
......@@ -214,7 +214,7 @@ static void sc1200_set_pio_mode(ide_drive_t *drive, const u8 pio)
printk("SC1200: %s: changing (U)DMA mode\n", drive->name);
ide_dma_off_quietly(drive);
if (ide_set_dma_mode(drive, mode) == 0 && drive->using_dma)
hwif->dma_host_set(drive, 1);
hwif->dma_ops->dma_host_set(drive, 1);
return;
}
......@@ -286,28 +286,20 @@ static int sc1200_resume (struct pci_dev *dev)
}
#endif
/*
* This gets invoked by the IDE driver once for each channel,
* and performs channel-specific pre-initialization before drive probing.
*/
static void __devinit init_hwif_sc1200 (ide_hwif_t *hwif)
{
if (hwif->dma_base == 0)
return;
hwif->ide_dma_end = &sc1200_ide_dma_end;
}
static const struct ide_port_ops sc1200_port_ops = {
.set_pio_mode = sc1200_set_pio_mode,
.set_dma_mode = sc1200_set_dma_mode,
.udma_filter = sc1200_udma_filter,
};
static struct ide_dma_ops sc1200_dma_ops = {
.dma_end = sc1200_dma_end,
};
static const struct ide_port_info sc1200_chipset __devinitdata = {
.name = "SC1200",
.init_hwif = init_hwif_sc1200,
.port_ops = &sc1200_port_ops,
.dma_ops = &sc1200_dma_ops,
.host_flags = IDE_HFLAG_SERIALIZE |
IDE_HFLAG_POST_SET_MODE |
IDE_HFLAG_ABUSE_DMA_MODES,
......
......@@ -317,14 +317,14 @@ static int scc_dma_setup(ide_drive_t *drive)
/**
* scc_ide_dma_end - Stop DMA
* scc_dma_end - Stop DMA
* @drive: IDE drive
*
* Check and clear INT Status register.
* Then call __ide_dma_end().
*/
static int scc_ide_dma_end(ide_drive_t * drive)
static int scc_dma_end(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
unsigned long intsts_port = hwif->dma_base + 0x014;
......@@ -692,10 +692,6 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
/* PTERADD */
out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
hwif->dma_setup = scc_dma_setup;
hwif->ide_dma_end = scc_ide_dma_end;
hwif->ide_dma_test_irq = scc_dma_test_irq;
if (in_be32((void __iomem *)(hwif->config_data + 0xff0)) & CCKCTRL_ATACLKOEN)
hwif->ultra_mask = ATA_UDMA6; /* 133MHz */
else
......@@ -709,12 +705,19 @@ static const struct ide_port_ops scc_port_ops = {
.cable_detect = scc_cable_detect,
};
static struct ide_dma_ops scc_dma_ops = {
.dma_setup = scc_dma_setup,
.dma_end = scc_dma_end,
.dma_test_irq = scc_dma_test_irq,
};
#define DECLARE_SCC_DEV(name_str) \
{ \
.name = name_str, \
.init_iops = init_iops_scc, \
.init_hwif = init_hwif_scc, \
.port_ops = &scc_port_ops, \
.dma_ops = &scc_dma_ops, \
.host_flags = IDE_HFLAG_SINGLE, \
.pio_mask = ATA_PIO4, \
}
......
......@@ -188,7 +188,7 @@ sgiioc4_clearirq(ide_drive_t * drive)
return intr_reg & 3;
}
static void sgiioc4_ide_dma_start(ide_drive_t * drive)
static void sgiioc4_dma_start(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
unsigned long ioc4_dma_addr = hwif->dma_base + IOC4_DMA_CTRL * 4;
......@@ -215,8 +215,7 @@ sgiioc4_ide_dma_stop(ide_hwif_t *hwif, u64 dma_base)
}
/* Stops the IOC4 DMA Engine */
static int
sgiioc4_ide_dma_end(ide_drive_t * drive)
static int sgiioc4_dma_end(ide_drive_t *drive)
{
u32 ioc4_dma, bc_dev, bc_mem, num, valid = 0, cnt = 0;
ide_hwif_t *hwif = HWIF(drive);
......@@ -279,8 +278,7 @@ static void sgiioc4_set_dma_mode(ide_drive_t *drive, const u8 speed)
}
/* returns 1 if dma irq issued, 0 otherwise */
static int
sgiioc4_ide_dma_test_irq(ide_drive_t * drive)
static int sgiioc4_dma_test_irq(ide_drive_t *drive)
{
return sgiioc4_checkirq(HWIF(drive));
}
......@@ -294,7 +292,7 @@ static void sgiioc4_dma_host_set(ide_drive_t *drive, int on)
static void
sgiioc4_resetproc(ide_drive_t * drive)
{
sgiioc4_ide_dma_end(drive);
sgiioc4_dma_end(drive);
sgiioc4_clearirq(drive);
}
......@@ -327,8 +325,6 @@ sgiioc4_INB(unsigned long port)
return reg;
}
static void __devinit ide_init_sgiioc4(ide_hwif_t *);
/* Creates a dma map for the scatter-gather list entries */
static int __devinit
ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d)
......@@ -377,7 +373,6 @@ ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d)
if (pad) {
ide_set_hwifdata(hwif, pad);
ide_init_sgiioc4(hwif);
return 0;
}
......@@ -526,7 +521,7 @@ sgiioc4_build_dma_table(ide_drive_t * drive, struct request *rq, int ddir)
return 0; /* revert to PIO for this request */
}
static int sgiioc4_ide_dma_setup(ide_drive_t *drive)
static int sgiioc4_dma_setup(ide_drive_t *drive)
{
struct request *rq = HWGROUP(drive)->rq;
unsigned int count = 0;
......@@ -555,18 +550,6 @@ static int sgiioc4_ide_dma_setup(ide_drive_t *drive)
return 0;
}
static void __devinit
ide_init_sgiioc4(ide_hwif_t * hwif)
{
hwif->dma_host_set = &sgiioc4_dma_host_set;
hwif->dma_setup = &sgiioc4_ide_dma_setup;
hwif->dma_start = &sgiioc4_ide_dma_start;
hwif->ide_dma_end = &sgiioc4_ide_dma_end;
hwif->ide_dma_test_irq = &sgiioc4_ide_dma_test_irq;
hwif->dma_lost_irq = &sgiioc4_dma_lost_irq;
hwif->dma_timeout = &ide_dma_timeout;
}
static const struct ide_port_ops sgiioc4_port_ops = {
.set_dma_mode = sgiioc4_set_dma_mode,
/* reset DMA engine, clear IRQs */
......@@ -575,10 +558,21 @@ static const struct ide_port_ops sgiioc4_port_ops = {
.maskproc = sgiioc4_maskproc,
};
static struct ide_dma_ops sgiioc4_dma_ops = {
.dma_host_set = sgiioc4_dma_host_set,
.dma_setup = sgiioc4_dma_setup,
.dma_start = sgiioc4_dma_start,
.dma_end = sgiioc4_dma_end,
.dma_test_irq = sgiioc4_dma_test_irq,
.dma_lost_irq = sgiioc4_dma_lost_irq,
.dma_timeout = ide_dma_timeout,
};
static const struct ide_port_info sgiioc4_port_info __devinitdata = {
.chipset = ide_pci,
.init_dma = ide_dma_sgiioc4,
.port_ops = &sgiioc4_port_ops,
.dma_ops = &sgiioc4_dma_ops,
.host_flags = IDE_HFLAG_NO_AUTOTUNE,
.mwdma_mask = ATA_MWDMA2_ONLY,
};
......
......@@ -301,7 +301,7 @@ static void sil_set_dma_mode(ide_drive_t *drive, const u8 speed)
}
/* returns 1 if dma irq issued, 0 otherwise */
static int siimage_io_ide_dma_test_irq (ide_drive_t *drive)
static int siimage_io_dma_test_irq(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
struct pci_dev *dev = to_pci_dev(hwif->dev);
......@@ -320,14 +320,14 @@ static int siimage_io_ide_dma_test_irq (ide_drive_t *drive)
}
/**
* siimage_mmio_ide_dma_test_irq - check we caused an IRQ
* siimage_mmio_dma_test_irq - check we caused an IRQ
* @drive: drive we are testing
*
* Check if we caused an IDE DMA interrupt. We may also have caused
* SATA status interrupts, if so we clean them up and continue.
*/
static int siimage_mmio_ide_dma_test_irq (ide_drive_t *drive)
static int siimage_mmio_dma_test_irq(ide_drive_t *drive)
{
ide_hwif_t *hwif = HWIF(drive);
unsigned long addr = siimage_selreg(hwif, 0x1);
......@@ -369,6 +369,14 @@ static int siimage_mmio_ide_dma_test_irq (ide_drive_t *drive)
return 0;
}
static int siimage_dma_test_irq(ide_drive_t *drive)
{
if (drive->hwif->mmio)
return siimage_mmio_dma_test_irq(drive);
else
return siimage_io_dma_test_irq(drive);
}
/**
* sil_sata_reset_poll - wait for SATA reset
* @drive: drive we are resetting
......@@ -756,41 +764,6 @@ static u8 __devinit sil_cable_detect(ide_hwif_t *hwif)
return (ata66 & 0x01) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
}
/**
* init_hwif_siimage - set up hwif structs
* @hwif: interface to set up
*
* We do the basic set up of the interface structure. The SIIMAGE
* requires several custom handlers so we override the default
* ide DMA handlers appropriately
*/
static void __devinit init_hwif_siimage(ide_hwif_t *hwif)
{
u8 sata = is_sata(hwif);
if (sata) {
static int first = 1;
if (first) {
printk(KERN_INFO "siimage: For full SATA support you should use the libata sata_sil module.\n");
first = 0;
}
}
if (hwif->dma_base == 0)
return;
if (sata)
hwif->host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
if (hwif->mmio) {
hwif->ide_dma_test_irq = &siimage_mmio_ide_dma_test_irq;
} else {
hwif->ide_dma_test_irq = & siimage_io_ide_dma_test_irq;
}
}
static const struct ide_port_ops sil_pata_port_ops = {
.set_pio_mode = sil_set_pio_mode,
.set_dma_mode = sil_set_dma_mode,
......@@ -809,13 +782,17 @@ static const struct ide_port_ops sil_sata_port_ops = {
.cable_detect = sil_cable_detect,
};
static struct ide_dma_ops sil_dma_ops = {
.dma_test_irq = siimage_dma_test_irq,
};
#define DECLARE_SII_DEV(name_str, p_ops) \
{ \
.name = name_str, \
.init_chipset = init_chipset_siimage, \
.init_iops = init_iops_siimage, \
.init_hwif = init_hwif_siimage, \
.port_ops = p_ops, \
.dma_ops = &sil_dma_ops, \
.pio_mask = ATA_PIO4, \
.mwdma_mask = ATA_MWDMA2, \
.udma_mask = ATA_UDMA6, \
......@@ -838,7 +815,24 @@ static const struct ide_port_info siimage_chipsets[] __devinitdata = {
static int __devinit siimage_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
return ide_setup_pci_device(dev, &siimage_chipsets[id->driver_data]);
struct ide_port_info d;
u8 idx = id->driver_data;
d = siimage_chipsets[idx];
if (idx) {
static int first = 1;
if (first) {
printk(KERN_INFO "siimage: For full SATA support you "
"should use the libata sata_sil module.\n");
first = 0;
}
d.host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
}
return ide_setup_pci_device(dev, &d);
}
static const struct pci_device_id siimage_pci_tbl[] = {
......
......@@ -282,34 +282,25 @@ static unsigned int __devinit init_chipset_sl82c105(struct pci_dev *dev, const c
return dev->irq;
}
/*
* Initialise IDE channel
*/
static void __devinit init_hwif_sl82c105(ide_hwif_t *hwif)
{
DBG(("init_hwif_sl82c105(hwif: ide%d)\n", hwif->index));
if (!hwif->dma_base)
return;
hwif->dma_lost_irq = &sl82c105_dma_lost_irq;
hwif->dma_start = &sl82c105_dma_start;
hwif->ide_dma_end = &sl82c105_dma_end;
hwif->dma_timeout = &sl82c105_dma_timeout;
}
static const struct ide_port_ops sl82c105_port_ops = {
.set_pio_mode = sl82c105_set_pio_mode,
.set_dma_mode = sl82c105_set_dma_mode,
.resetproc = sl82c105_resetproc,
};
static struct ide_dma_ops sl82c105_dma_ops = {
.dma_start = sl82c105_dma_start,
.dma_end = sl82c105_dma_end,
.dma_lost_irq = sl82c105_dma_lost_irq,
.dma_timeout = sl82c105_dma_timeout,
};
static const struct ide_port_info sl82c105_chipset __devinitdata = {
.name = "W82C105",
.init_chipset = init_chipset_sl82c105,
.init_hwif = init_hwif_sl82c105,
.enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
.port_ops = &sl82c105_port_ops,
.dma_ops = &sl82c105_dma_ops,
.host_flags = IDE_HFLAG_IO_32BIT |
IDE_HFLAG_UNMASK_IRQS |
/* FIXME: check for Compatibility mode in generic IDE PCI code */
......@@ -334,7 +325,7 @@ static int __devinit sl82c105_init_one(struct pci_dev *dev, const struct pci_dev
*/
printk(KERN_INFO "W82C105_IDE: Winbond W83C553 bridge "
"revision %d, BM-DMA disabled\n", rev);
d.init_hwif = NULL;
d.dma_ops = NULL;
d.mwdma_mask = 0;
d.host_flags &= ~IDE_HFLAG_SERIALIZE_DMA;
}
......
......@@ -168,8 +168,6 @@ static void __devinit init_hwif_tc86c001(ide_hwif_t *hwif)
/* Sector Count Register limit */
hwif->rqsize = 0xffff;
hwif->dma_start = &tc86c001_dma_start;
}
static unsigned int __devinit init_chipset_tc86c001(struct pci_dev *dev,
......@@ -188,11 +186,16 @@ static const struct ide_port_ops tc86c001_port_ops = {
.cable_detect = tc86c001_cable_detect,
};
static struct ide_dma_ops tc86c001_dma_ops = {
.dma_start = tc86c001_dma_start,
};
static const struct ide_port_info tc86c001_chipset __devinitdata = {
.name = "TC86C001",
.init_chipset = init_chipset_tc86c001,
.init_hwif = init_hwif_tc86c001,
.port_ops = &tc86c001_port_ops,
.dma_ops = &tc86c001_dma_ops,
.host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD |
IDE_HFLAG_ABUSE_SET_DMA_MODE,
.pio_mask = ATA_PIO4,
......
......@@ -214,7 +214,7 @@ static void trm290_dma_start(ide_drive_t *drive)
{
}
static int trm290_ide_dma_end (ide_drive_t *drive)
static int trm290_dma_end(ide_drive_t *drive)
{
u16 status;
......@@ -225,7 +225,7 @@ static int trm290_ide_dma_end (ide_drive_t *drive)
return status != 0x00ff;
}
static int trm290_ide_dma_test_irq (ide_drive_t *drive)
static int trm290_dma_test_irq(ide_drive_t *drive)
{
u16 status;
......@@ -280,12 +280,6 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
/* sharing IRQ with mate */
hwif->irq = hwif->mate->irq;
hwif->dma_host_set = &trm290_dma_host_set;
hwif->dma_setup = &trm290_dma_setup;
hwif->dma_exec_cmd = &trm290_dma_exec_cmd;
hwif->dma_start = &trm290_dma_start;
hwif->ide_dma_end = &trm290_ide_dma_end;
hwif->ide_dma_test_irq = &trm290_ide_dma_test_irq;
#if 1
{
/*
......@@ -319,11 +313,21 @@ static const struct ide_port_ops trm290_port_ops = {
.selectproc = trm290_selectproc,
};
static struct ide_dma_ops trm290_dma_ops = {
.dma_host_set = trm290_dma_host_set,
.dma_setup = trm290_dma_setup,
.dma_exec_cmd = trm290_dma_exec_cmd,
.dma_start = trm290_dma_start,
.dma_end = trm290_dma_end,
.dma_test_irq = trm290_dma_test_irq,
};
static const struct ide_port_info trm290_chipset __devinitdata = {
.name = "TRM290",
.init_hwif = init_hwif_trm290,
.chipset = ide_trm290,
.port_ops = &trm290_port_ops,
.dma_ops = &trm290_dma_ops,
.host_flags = IDE_HFLAG_NO_ATAPI_DMA |
#if 0 /* play it safe for now */
IDE_HFLAG_TRUST_BIOS_FOR_DMA |
......
......@@ -930,9 +930,14 @@ static const struct ide_port_ops pmac_ide_port_ops = {
.selectproc = pmac_ide_selectproc,
};
static struct ide_dma_ops pmac_dma_ops;
static const struct ide_port_info pmac_port_info = {
.init_dma = pmac_ide_init_dma,
.chipset = ide_pmac,
#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
.dma_ops = &pmac_dma_ops,
#endif
.port_ops = &pmac_ide_port_ops,
.host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
IDE_HFLAG_POST_SET_MODE |
......@@ -1670,6 +1675,17 @@ pmac_ide_dma_lost_irq (ide_drive_t *drive)
printk(KERN_ERR "ide-pmac lost interrupt, dma status: %lx\n", status);
}
static struct ide_dma_ops pmac_dma_ops = {
.dma_host_set = pmac_ide_dma_host_set,
.dma_setup = pmac_ide_dma_setup,
.dma_exec_cmd = pmac_ide_dma_exec_cmd,
.dma_start = pmac_ide_dma_start,
.dma_end = pmac_ide_dma_end,
.dma_test_irq = pmac_ide_dma_test_irq,
.dma_timeout = ide_dma_timeout,
.dma_lost_irq = pmac_ide_dma_lost_irq,
};
/*
* Allocate the data structures needed for using DMA with an interface
* and fill the proper list of functions pointers
......@@ -1702,15 +1718,6 @@ static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
hwif->sg_max_nents = MAX_DCMDS;
hwif->dma_host_set = &pmac_ide_dma_host_set;
hwif->dma_setup = &pmac_ide_dma_setup;
hwif->dma_exec_cmd = &pmac_ide_dma_exec_cmd;
hwif->dma_start = &pmac_ide_dma_start;
hwif->ide_dma_end = &pmac_ide_dma_end;
hwif->ide_dma_test_irq = &pmac_ide_dma_test_irq;
hwif->dma_timeout = &ide_dma_timeout;
hwif->dma_lost_irq = &pmac_ide_dma_lost_irq;
return 0;
}
#else
......
......@@ -385,7 +385,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
if (ide_allocate_dma_engine(hwif))
return -1;
ide_setup_dma(hwif, base);
ide_setup_dma(hwif, base, d);
}
return 0;
......
......@@ -393,7 +393,7 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
printk ("ide-scsi: %s: DMA complete\n", drive->name);
#endif /* IDESCSI_DEBUG_LOG */
pc->xferred = pc->req_xfer;
(void) HWIF(drive)->ide_dma_end(drive);
(void)hwif->dma_ops->dma_end(drive);
}
/* Clear the interrupt */
......@@ -498,7 +498,7 @@ static ide_startstop_t idescsi_transfer_pc(ide_drive_t *drive)
drive->hwif->atapi_output_bytes(drive, scsi->pc->c, 12);
if (pc->flags & PC_FLAG_DMA_OK) {
pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
hwif->dma_start(drive);
hwif->dma_ops->dma_start(drive);
}
return ide_started;
}
......@@ -560,7 +560,7 @@ static ide_startstop_t idescsi_issue_pc(ide_drive_t *drive,
if (drive->using_dma && !idescsi_map_sg(drive, pc)) {
hwif->sg_mapped = 1;
dma = !hwif->dma_setup(drive);
dma = !hwif->dma_ops->dma_setup(drive);
hwif->sg_mapped = 0;
}
......
......@@ -413,6 +413,17 @@ struct ide_port_ops {
u8 (*cable_detect)(struct hwif_s *);
};
struct ide_dma_ops {
void (*dma_host_set)(struct ide_drive_s *, int);
int (*dma_setup)(struct ide_drive_s *);
void (*dma_exec_cmd)(struct ide_drive_s *, u8);
void (*dma_start)(struct ide_drive_s *);
int (*dma_end)(struct ide_drive_s *);
int (*dma_test_irq)(struct ide_drive_s *);
void (*dma_lost_irq)(struct ide_drive_s *);
void (*dma_timeout)(struct ide_drive_s *);
};
typedef struct hwif_s {
struct hwif_s *next; /* for linked-list in ide_hwgroup_t */
struct hwif_s *mate; /* other hwif from same PCI chip */
......@@ -451,6 +462,7 @@ typedef struct hwif_s {
void (*rw_disk)(ide_drive_t *, struct request *);
const struct ide_port_ops *port_ops;
struct ide_dma_ops *dma_ops;
void (*ata_input_data)(ide_drive_t *, void *, u32);
void (*ata_output_data)(ide_drive_t *, void *, u32);
......@@ -458,15 +470,7 @@ typedef struct hwif_s {
void (*atapi_input_bytes)(ide_drive_t *, void *, u32);
void (*atapi_output_bytes)(ide_drive_t *, void *, u32);
void (*dma_host_set)(ide_drive_t *, int);
int (*dma_setup)(ide_drive_t *);
void (*dma_exec_cmd)(ide_drive_t *, u8);
void (*dma_start)(ide_drive_t *);
int (*ide_dma_end)(ide_drive_t *drive);
int (*ide_dma_test_irq)(ide_drive_t *drive);
void (*ide_dma_clear_irq)(ide_drive_t *drive);
void (*dma_lost_irq)(ide_drive_t *drive);
void (*dma_timeout)(ide_drive_t *drive);
void (*OUTB)(u8 addr, unsigned long port);
void (*OUTBSYNC)(ide_drive_t *drive, u8 addr, unsigned long port);
......@@ -1114,6 +1118,7 @@ struct ide_port_info {
const struct ide_port_info *);
const struct ide_port_ops *port_ops;
struct ide_dma_ops *dma_ops;
ide_pci_enablebit_t enablebits[2];
hwif_chipset_t chipset;
......@@ -1165,7 +1170,7 @@ void ide_destroy_dmatable(ide_drive_t *);
extern int ide_build_dmatable(ide_drive_t *, struct request *);
int ide_allocate_dma_engine(ide_hwif_t *);
void ide_release_dma_engine(ide_hwif_t *);
extern void ide_setup_dma(ide_hwif_t *, unsigned long);
void ide_setup_dma(ide_hwif_t *, unsigned long, const struct ide_port_info *);
void ide_dma_host_set(ide_drive_t *, int);
extern int ide_dma_setup(ide_drive_t *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment