Commit 2dbd1502 authored by Martin Dalecki's avatar Martin Dalecki Committed by Linus Torvalds

[PATCH] IDE 98

Synchronize with 2.5.25.

Incorporate IDE-94, as well as 95, 96, 97 and 98-pre as announced by Bartek and
unfortunately still not included in 2.5.25, which makes admittedly things
still fall appart:

Missing changelog for 98-pre by Bartlomiej Zolnierkiewicz (BTW.  Handling
Unicode should be essential at least to make proper crediting of many many
peoples possible!) follows here:

 - add missing channel->lock unlocking/locking and fix some comments
   in ide_timer_expiry()

 - allow PCI drivers to disable autodma in ->init_dma()
   (bug introduced in IDE 97, affects sl82c105.c only)

   noticed by Russell King

 - alim15x3.c, if revision is <= 0x20 disable autodma

 - remove unneeded checks (drive.dn > 3) from pdc202xx.c and sis5513.c

 - use block layer wrappers

And my additions follow:

 - Fix TCQ code. Patch based on work by Alexander Atanasov.

 - Use the FreeBSD derived request handler return values:

	ATA_OP_FINISHED
	ATA_OP_CONTINUES
	ATA_OP_RELEASED
	ATA_OP_READY	/* for status ready reporting during poll */

 - PMAC compilation fix by Paul Mackerras.

 - Simplify the ata_status_poll function significantly.

 - Fix logic used to prevent drive IRQ assertion from drive on channels sharing
   our interrupt.

NOTE: We will move it later to the time where a request is really finished
soon.

 - Don't use ata_busy_poll() use ata_status_poll() instead. This increases code
   unification.

NOTE: We should maybe invent some way to prevent the error recovery path to be
taken at all. In esp to prevent ata_error from trying to reissue commands.
parent 5b2a1577
......@@ -703,11 +703,11 @@ static ide_startstop_t etrax_dma_intr(struct ata_device *drive, struct request *
i -= rq->current_nr_sectors;
ide_end_request(drive, rq, 1);
}
return ide_stopped;
return ATA_OP_FINISHED;
}
printk("%s: bad DMA status\n", drive->name);
}
return ata_error(drive, __FUNCTION__);
return ata_error(drive, rq, __FUNCTION__);
}
/*
......
......@@ -1202,6 +1202,26 @@ struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask)
return rq;
}
/*
* Non-locking blk_get_request variant, for special requests from drivers.
*/
struct request *__blk_get_request(request_queue_t *q, int rw)
{
struct request *rq;
BUG_ON(rw != READ && rw != WRITE);
rq = get_request(q, rw);
if (rq) {
rq->flags = 0;
rq->buffer = NULL;
rq->bio = rq->biotail = NULL;
rq->waiting = NULL;
}
return rq;
}
void blk_put_request(struct request *rq)
{
blkdev_release_request(rq);
......@@ -1381,6 +1401,14 @@ void blk_attempt_remerge(request_queue_t *q, struct request *rq)
spin_unlock_irqrestore(q->queue_lock, flags);
}
/*
* Non-locking blk_attempt_remerge variant.
*/
void __blk_attempt_remerge(request_queue_t *q, struct request *rq)
{
attempt_back_merge(q, rq);
}
static int __make_request(request_queue_t *q, struct bio *bio)
{
struct request *req, *freereq = NULL;
......@@ -2039,6 +2067,7 @@ EXPORT_SYMBOL(generic_unplug_device);
EXPORT_SYMBOL(blk_plug_device);
EXPORT_SYMBOL(blk_remove_plug);
EXPORT_SYMBOL(blk_attempt_remerge);
EXPORT_SYMBOL(__blk_attempt_remerge);
EXPORT_SYMBOL(blk_max_low_pfn);
EXPORT_SYMBOL(blk_max_pfn);
EXPORT_SYMBOL(blk_queue_max_sectors);
......@@ -2055,6 +2084,7 @@ EXPORT_SYMBOL(blk_queue_assign_lock);
EXPORT_SYMBOL(blk_phys_contig_segment);
EXPORT_SYMBOL(blk_hw_contig_segment);
EXPORT_SYMBOL(blk_get_request);
EXPORT_SYMBOL(__blk_get_request);
EXPORT_SYMBOL(blk_put_request);
EXPORT_SYMBOL(blk_queue_prep_rq);
......
......@@ -160,16 +160,15 @@ static void aec62xx_tune_drive(struct ata_device *drive, unsigned char pio)
}
#ifdef CONFIG_BLK_DEV_IDEDMA
static int aec62xx_udma_setup(struct ata_device *drive)
static int __init aec62xx_modes_map(struct ata_channel *ch)
{
u32 bmide = pci_resource_start(drive->channel->pci_dev, 4);
short speed;
u32 bmide = pci_resource_start(ch->pci_dev, 4);
int map;
map = XFER_PIO | XFER_EPIO | XFER_MWDMA | XFER_UDMA | XFER_SWDMA | XFER_UDMA;
map = XFER_EPIO | XFER_SWDMA | XFER_MWDMA | XFER_UDMA;
if (drive->channel->udma_four)
switch (drive->channel->pci_dev->device) {
if (ch->udma_four)
switch (ch->pci_dev->device) {
case PCI_DEVICE_ID_ARTOP_ATP865R:
case PCI_DEVICE_ID_ARTOP_ATP865:
/* Can't use these modes simultaneously,
......@@ -180,11 +179,7 @@ static int aec62xx_udma_setup(struct ata_device *drive)
map |= XFER_UDMA_66;
}
speed = ata_timing_mode(drive, map);
aec_set_drive(drive, speed);
udma_enable(drive, drive->channel->autodma && (speed & XFER_MODE) != XFER_PIO, 0);
return 0;
return map;
}
#endif
......@@ -256,11 +251,12 @@ static void __init aec62xx_init_channel(struct ata_channel *ch)
ch->tuneproc = aec62xx_tune_drive;
ch->speedproc = aec_set_drive;
ch->autodma = 0;
ch->io_32bit = 1;
ch->unmask = 1;
ch->udma_four = aec62xx_ata66_check(ch);
for (i = 0; i < 2; i++) {
ch->drives[i].autotune = 1;
ch->drives[i].dn = ch->unit * 2 + i;
......@@ -269,11 +265,8 @@ static void __init aec62xx_init_channel(struct ata_channel *ch)
#ifdef CONFIG_BLK_DEV_IDEDMA
if (ch->dma_base) {
ch->highmem = 1;
ch->udma_setup = aec62xx_udma_setup;
#ifdef CONFIG_IDEDMA_AUTO
if (!noautodma)
ch->autodma = 1;
#endif
ch->modes_map = aec62xx_modes_map(ch);
ch->udma_setup = udma_generic_setup;
}
#endif
}
......@@ -306,17 +299,15 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_ARTOP,
device: PCI_DEVICE_ID_ARTOP_ATP860,
init_chipset: aec62xx_init_chipset,
ata66_check: aec62xx_ata66_check,
init_channel: aec62xx_init_channel,
enablebits: { {0x4a,0x02,0x02}, {0x4a,0x04,0x04} },
bootable: NEVER_BOARD,
flags: ATA_F_IRQ | ATA_F_NOADMA | ATA_F_DMA
flags: ATA_F_IRQ | ATA_F_DMA
},
{
vendor: PCI_VENDOR_ID_ARTOP,
device: PCI_DEVICE_ID_ARTOP_ATP860R,
init_chipset: aec62xx_init_chipset,
ata66_check: aec62xx_ata66_check,
init_channel: aec62xx_init_channel,
enablebits: { {0x4a,0x02,0x02}, {0x4a,0x04,0x04} },
bootable: OFF_BOARD,
......@@ -326,7 +317,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_ARTOP,
device: PCI_DEVICE_ID_ARTOP_ATP865,
init_chipset: aec62xx_init_chipset,
ata66_check: aec62xx_ata66_check,
init_channel: aec62xx_init_channel,
enablebits: { {0x4a,0x02,0x02}, {0x4a,0x04,0x04} },
bootable: NEVER_BOARD,
......@@ -336,7 +326,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_ARTOP,
device: PCI_DEVICE_ID_ARTOP_ATP865R,
init_chipset: aec62xx_init_chipset,
ata66_check: aec62xx_ata66_check,
init_channel: aec62xx_init_channel,
enablebits: { {0x4a,0x02,0x02}, {0x4a,0x04,0x04} },
bootable: OFF_BOARD,
......
......@@ -99,43 +99,6 @@ static void ali15x3_tune_drive(struct ata_device *drive, byte pio)
__restore_flags(flags);
}
static byte ali15x3_can_ultra(struct ata_device *drive)
{
if (m5229_revision <= 0x20) {
return 0;
} else if ((m5229_revision < 0xC2) &&
#ifndef CONFIG_WDC_ALI15X3
((chip_is_1543c_e && strstr(drive->id->model, "WDC ")) ||
(drive->type != ATA_DISK))) {
#else
(drive->type != ATA_DISK)) {
#endif
return 0;
} else {
return 1;
}
}
static int ali15x3_ratemask(struct ata_device *drive)
{
int map = 0;
if (!ali15x3_can_ultra(drive))
return 0;
map |= XFER_UDMA;
if (!eighty_ninty_three(drive))
return map;
if (m5229_revision >= 0xC4)
map |= XFER_UDMA_100;
if (m5229_revision >= 0xC2)
map |= XFER_UDMA_66;
return map;
}
static int ali15x3_tune_chipset(struct ata_device *drive, byte speed)
{
struct pci_dev *dev = drive->channel->pci_dev;
......@@ -156,6 +119,7 @@ static int ali15x3_tune_chipset(struct ata_device *drive, byte speed)
if (speed < XFER_SW_DMA_0)
ali15x3_tune_drive(drive, speed);
#ifdef CONFIG_BLK_DEV_IDEDMA
/* FIXME: no support for MWDMA and SWDMA modes --bkz */
else if (speed >= XFER_UDMA_0) {
pci_read_config_byte(dev, m5229_udma, &tmpbyte);
tmpbyte &= (0x0f << ((1-unit) << 2));
......@@ -176,91 +140,40 @@ static int ali15x3_tune_chipset(struct ata_device *drive, byte speed)
}
#ifdef CONFIG_BLK_DEV_IDEDMA
static int config_chipset_for_dma(struct ata_device *drive, u8 udma)
static int ali15x3_udma_setup(struct ata_device *drive, int map)
{
int map;
u8 mode;
if (udma)
map = ali15x3_ratemask(drive);
else
map = XFER_SWDMA | XFER_MWDMA;
mode = ata_timing_mode(drive, map);
if (mode < XFER_SW_DMA_0)
return 0;
return !ali15x3_tune_chipset(drive, mode);
#ifndef CONFIG_WDC_ALI15X3
if ((m5229_revision < 0xC2) && chip_is_1543c_e &&
strstr(drive->id->model, "WDC "))
map &= ~XFER_UDMA_ALL;
#endif
return udma_generic_setup(drive, map);
}
static int ali15x3_udma_setup(struct ata_device *drive)
static int ali15x3_udma_init(struct ata_device *drive, struct request *rq)
{
struct hd_driveid *id = drive->id;
struct ata_channel *hwif = drive->channel;
int on = 1;
int verbose = 1;
byte can_ultra_dma = ali15x3_can_ultra(drive);
if ((m5229_revision < 0xC2) && (drive->type != ATA_DISK))
return ATA_OP_FINISHED; /* try PIO instead of DMA */
if ((m5229_revision<=0x20) && (drive->type != ATA_DISK)) {
udma_enable(drive, 0, 0);
return 0;
}
return udma_pci_init(drive, rq);
}
if ((id != NULL) && ((id->capability & 1) != 0) && hwif->autodma) {
/* Consult the list of known "bad" drives */
if (udma_black_list(drive)) {
on = 0;
goto fast_ata_pio;
}
on = 0;
verbose = 0;
if ((id->field_valid & 4) && (m5229_revision >= 0xC2)) {
if (id->dma_ultra & 0x003F) {
/* Force if Capable UltraDMA */
on = config_chipset_for_dma(drive, can_ultra_dma);
if ((id->field_valid & 2) &&
(!on))
goto try_dma_modes;
}
} else if (id->field_valid & 2) {
try_dma_modes:
if ((id->dma_mword & 0x0007) ||
(id->dma_1word & 0x0007)) {
/* Force if Capable regular DMA modes */
on = config_chipset_for_dma(drive, can_ultra_dma);
if (!on)
goto no_dma_set;
}
} else if (udma_white_list(drive)) {
if (id->eide_dma_time > 150) {
goto no_dma_set;
}
/* Consult the list of known "good" drives */
on = config_chipset_for_dma(drive, can_ultra_dma);
if (!on)
goto no_dma_set;
} else {
goto fast_ata_pio;
}
} else if ((id->capability & 8) || (id->field_valid & 2)) {
fast_ata_pio:
on = 0;
verbose = 0;
no_dma_set:
ali15x3_tune_drive(drive, 255);
}
static int __init ali15x3_modes_map(struct ata_channel *ch)
{
int map = XFER_EPIO | XFER_SWDMA | XFER_MWDMA;
udma_enable(drive, on, verbose);
if (m5229_revision <= 0x20)
return map;
return 0;
}
map |= XFER_UDMA;
static int ali15x3_udma_init(struct ata_device *drive, struct request *rq)
{
if ((m5229_revision < 0xC2) && (drive->type != ATA_DISK))
return ide_stopped; /* try PIO instead of DMA */
if (m5229_revision >= 0xC2) {
map |= XFER_UDMA_66;
if (m5229_revision >= 0xC4)
map |= XFER_UDMA_100;
}
return udma_pci_init(drive, rq);
return map;
}
#endif
......@@ -426,6 +339,8 @@ static void __init ali15x3_init_channel(struct ata_channel *hwif)
}
#endif /* CONFIG_SPARC64 */
hwif->udma_four = ali15x3_ata66_check(hwif);
hwif->tuneproc = &ali15x3_tune_drive;
hwif->drives[0].autotune = 1;
hwif->drives[1].autotune = 1;
......@@ -436,22 +351,21 @@ static void __init ali15x3_init_channel(struct ata_channel *hwif)
/*
* M1543C or newer for DMAing
*/
hwif->udma_init = ali15x3_udma_init;
hwif->modes_map = ali15x3_modes_map(hwif);
if (m5229_revision < 0xC2)
hwif->no_atapi_autodma = 1;
hwif->udma_setup = ali15x3_udma_setup;
hwif->autodma = 1;
hwif->udma_init = ali15x3_udma_init;
}
if (noautodma)
hwif->autodma = 0;
#else
hwif->autodma = 0;
#endif
}
static void __init ali15x3_init_dma(struct ata_channel *ch, unsigned long dmabase)
{
if ((dmabase) && (m5229_revision < 0x20))
if (dmabase && (m5229_revision < 0x20)) {
ch->autodma = 0;
return;
}
ata_init_dma(ch, dmabase);
}
......@@ -472,7 +386,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_AL,
device: PCI_DEVICE_ID_AL_M5229,
init_chipset: ali15x3_init_chipset,
ata66_check: ali15x3_ata66_check,
init_channel: ali15x3_init_channel,
init_dma: ali15x3_init_dma,
enablebits: { {0x00,0x00,0x00}, {0x00,0x00,0x00} },
......
......@@ -175,21 +175,15 @@ static void amd74xx_tune_drive(struct ata_device *drive, u8 pio)
}
#ifdef CONFIG_BLK_DEV_IDEDMA
static int amd74xx_udma_setup(struct ata_device *drive)
static int __init amd_modes_map(struct ata_channel *ch)
{
short w80 = drive->channel->udma_four;
short w80 = ch->udma_four;
int map = XFER_EPIO | XFER_MWDMA | XFER_UDMA |
((amd_config->flags & AMD_BAD_SWDMA) ? 0 : XFER_SWDMA) |
(w80 && (amd_config->flags & AMD_UDMA) >= AMD_UDMA_66 ? XFER_UDMA_66 : 0) |
(w80 && (amd_config->flags & AMD_UDMA) >= AMD_UDMA_100 ? XFER_UDMA_100 : 0);
short speed = ata_timing_mode(drive,
XFER_PIO | XFER_EPIO | XFER_MWDMA | XFER_UDMA |
((amd_config->flags & AMD_BAD_SWDMA) ? 0 : XFER_SWDMA) |
(w80 && (amd_config->flags & AMD_UDMA) >= AMD_UDMA_66 ? XFER_UDMA_66 : 0) |
(w80 && (amd_config->flags & AMD_UDMA) >= AMD_UDMA_100 ? XFER_UDMA_100 : 0));
amd_set_drive(drive, speed);
udma_enable(drive, drive->channel->autodma && (speed & XFER_MODE) != XFER_PIO, 0);
return 0;
return map;
}
#endif
......@@ -274,9 +268,10 @@ static void __init amd74xx_init_channel(struct ata_channel *hwif)
{
int i;
hwif->udma_four = amd74xx_ata66_check(hwif);
hwif->tuneproc = &amd74xx_tune_drive;
hwif->speedproc = &amd_set_drive;
hwif->autodma = 0;
hwif->io_32bit = 1;
hwif->unmask = 1;
......@@ -289,11 +284,8 @@ static void __init amd74xx_init_channel(struct ata_channel *hwif)
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base) {
hwif->highmem = 1;
hwif->udma_setup = amd74xx_udma_setup;
# ifdef CONFIG_IDEDMA_AUTO
if (!noautodma)
hwif->autodma = 1;
# endif
hwif->modes_map = amd_modes_map(hwif);
hwif->udma_setup = udma_generic_setup;
}
#endif
}
......@@ -314,7 +306,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_AMD,
device: PCI_DEVICE_ID_AMD_COBRA_7401,
init_chipset: amd74xx_init_chipset,
ata66_check: amd74xx_ata66_check,
init_channel: amd74xx_init_channel,
init_dma: amd74xx_init_dma,
enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}},
......@@ -324,7 +315,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_AMD,
device: PCI_DEVICE_ID_AMD_VIPER_7409,
init_chipset: amd74xx_init_chipset,
ata66_check: amd74xx_ata66_check,
init_channel: amd74xx_init_channel,
init_dma: amd74xx_init_dma,
enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}},
......@@ -335,7 +325,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_AMD,
device: PCI_DEVICE_ID_AMD_VIPER_7411,
init_chipset: amd74xx_init_chipset,
ata66_check: amd74xx_ata66_check,
init_channel: amd74xx_init_channel,
init_dma: amd74xx_init_dma,
enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}},
......@@ -345,7 +334,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_AMD,
device: PCI_DEVICE_ID_AMD_OPUS_7441,
init_chipset: amd74xx_init_chipset,
ata66_check: amd74xx_ata66_check,
init_channel: amd74xx_init_channel,
init_dma: amd74xx_init_dma,
enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}},
......@@ -355,7 +343,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_AMD,
device: PCI_DEVICE_ID_AMD_8111_IDE,
init_chipset: amd74xx_init_chipset,
ata66_check: amd74xx_ata66_check,
init_channel: amd74xx_init_channel,
init_dma: amd74xx_init_dma,
enablebits: {{0x40,0x01,0x01}, {0x40,0x02,0x02}},
......@@ -365,7 +352,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_NVIDIA,
device: PCI_DEVICE_ID_NVIDIA_NFORCE_IDE,
init_chipset: amd74xx_init_chipset,
ata66_check: amd74xx_ata66_check,
init_channel: amd74xx_init_channel,
init_dma: amd74xx_init_dma,
enablebits: {{0x50,0x01,0x01}, {0x50,0x02,0x02}},
......
......@@ -86,9 +86,11 @@ short ata_timing_mode(struct ata_device *drive, int map)
if ((map & XFER_UDMA_100) == XFER_UDMA_100)
if ((best = (id->dma_ultra & 0x0020) ? XFER_UDMA_5 : 0)) return best;
if ((map & XFER_UDMA_66) == XFER_UDMA_66)
if ((best = (id->dma_ultra & 0x0010) ? XFER_UDMA_4 :
(id->dma_ultra & 0x0008) ? XFER_UDMA_3 : 0)) return best;
if ((map & XFER_UDMA_66_4) == XFER_UDMA_66_4)
if ((best = (id->dma_ultra & 0x0010) ? XFER_UDMA_4 : 0)) return best;
if ((map & XFER_UDMA_66_3) == XFER_UDMA_66_3)
if ((best = (id->dma_ultra & 0x0008) ? XFER_UDMA_3 : 0)) return best;
if ((best = (id->dma_ultra & 0x0004) ? XFER_UDMA_2 :
(id->dma_ultra & 0x0002) ? XFER_UDMA_1 :
......
......@@ -59,15 +59,22 @@ extern struct ata_timing ata_timing[];
#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
#define XFER_MODE 0xf0
#define XFER_UDMA_133 0x48
#define XFER_UDMA_100 0x44
#define XFER_UDMA_66 0x42
#define XFER_UDMA 0x40
#define XFER_MWDMA 0x20
#define XFER_SWDMA 0x10
#define XFER_EPIO 0x01
#define XFER_PIO 0x00
/* see hpt366.c for details */
#define XFER_UDMA_66_3 0x100
#define XFER_UDMA_66_4 0x200
#define XFER_MODE 0xff0
#define XFER_UDMA_133 0x800
#define XFER_UDMA_100 0x400
#define XFER_UDMA_66 0x300
#define XFER_UDMA 0x040
#define XFER_MWDMA 0x020
#define XFER_SWDMA 0x010
#define XFER_EPIO 0x001
#define XFER_PIO 0x000
#define XFER_UDMA_ALL 0xf40
#define XFER_UDMA_80W 0xf00
/* External interface to host chips channel timing setup.
*
......
......@@ -217,10 +217,10 @@ static void cmd64x_tuneproc(struct ata_device *drive, u8 pio)
ide_config_drive_speed(drive, speed);
}
static int cmd64x_ratemask(struct ata_device *drive)
static int __init cmd6xx_modes_map(struct ata_channel *ch)
{
struct pci_dev *dev = drive->channel->pci_dev;
int map = 0;
struct pci_dev *dev = ch->pci_dev;
int map = XFER_EPIO | XFER_SWDMA | XFER_MWDMA;
switch(dev->device) {
case PCI_DEVICE_ID_CMD_680:
......@@ -234,10 +234,9 @@ static int cmd64x_ratemask(struct ata_device *drive)
break;
case PCI_DEVICE_ID_CMD_646:
{
u32 class_rev;
pci_read_config_dword(dev,
PCI_CLASS_REVISION, &class_rev);
class_rev &= 0xff;
u32 rev;
pci_read_config_dword(dev, PCI_CLASS_REVISION, &rev);
rev &= 0xff;
/*
* UltraDMA only supported on PCI646U and PCI646U2, which
* correspond to revisions 0x03, 0x05 and 0x07 respectively.
......@@ -250,7 +249,7 @@ static int cmd64x_ratemask(struct ata_device *drive)
*
* So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
*/
switch(class_rev) {
switch(rev) {
case 0x07:
case 0x05:
map |= XFER_UDMA;
......@@ -260,11 +259,6 @@ static int cmd64x_ratemask(struct ata_device *drive)
}
}
if (!eighty_ninty_three(drive)) {
if (map & XFER_UDMA)
return XFER_UDMA;
return 0;
}
return map;
}
......@@ -515,80 +509,6 @@ speed_break :
}
#ifdef CONFIG_BLK_DEV_IDEDMA
static int config_chipset_for_dma(struct ata_device *drive, u8 udma)
{
int map;
u8 mode;
if (udma)
map = cmd64x_ratemask(drive);
else
map = XFER_SWDMA | XFER_MWDMA;
mode = ata_timing_mode(drive, map);
return !drive->channel->speedproc(drive, mode);
}
static int cmd6xx_udma_setup(struct ata_device *drive)
{
struct hd_driveid *id = drive->id;
struct ata_channel *hwif = drive->channel;
int on = 1;
int verbose = 1;
hwif->tuneproc(drive, 255);
if ((id != NULL) && ((id->capability & 1) != 0) &&
hwif->autodma && (drive->type == ATA_DISK)) {
/* Consult the list of known "bad" drives */
if (udma_black_list(drive)) {
on = 0;
goto fast_ata_pio;
}
on = 0;
verbose = 0;
if ((id->field_valid & 4)) {
if (id->dma_ultra & 0x007F) {
/* Force if Capable UltraDMA */
on = config_chipset_for_dma(drive, 1);
if ((id->field_valid & 2) &&
(!on))
goto try_dma_modes;
}
} else if (id->field_valid & 2) {
try_dma_modes:
if ((id->dma_mword & 0x0007) ||
(id->dma_1word & 0x0007)) {
/* Force if Capable regular DMA modes */
on = config_chipset_for_dma(drive, 0);
if (!on)
goto no_dma_set;
}
} else if (udma_white_list(drive)) {
if (id->eide_dma_time > 150) {
goto no_dma_set;
}
/* Consult the list of known "good" drives */
on = config_chipset_for_dma(drive, 0);
if (!on)
goto no_dma_set;
} else {
goto fast_ata_pio;
}
} else if ((id->capability & 8) || (id->field_valid & 2)) {
fast_ata_pio:
on = 0;
verbose = 0;
no_dma_set:
hwif->tuneproc(drive, 255);
}
udma_enable(drive, on, verbose);
return 0;
}
static int cmd64x_udma_stop(struct ata_device *drive)
{
struct ata_channel *ch = drive->channel;
......@@ -822,13 +742,6 @@ static unsigned int cmd64x_ata66(struct ata_channel *hwif)
return (ata66 & mask) ? 1 : 0;
}
static unsigned int __init cmd64x_ata66_check(struct ata_channel *hwif)
{
if (hwif->pci_dev->device == PCI_DEVICE_ID_CMD_680)
return cmd680_ata66(hwif);
return cmd64x_ata66(hwif);
}
static void __init cmd64x_init_channel(struct ata_channel *hwif)
{
struct pci_dev *dev = hwif->pci_dev;
......@@ -843,32 +756,28 @@ static void __init cmd64x_init_channel(struct ata_channel *hwif)
switch(dev->device) {
case PCI_DEVICE_ID_CMD_680:
hwif->busproc = cmd680_busproc;
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base)
hwif->udma_setup = cmd6xx_udma_setup;
#endif
hwif->resetproc = cmd680_reset;
hwif->speedproc = cmd680_tune_chipset;
hwif->tuneproc = cmd680_tuneproc;
hwif->udma_four = cmd680_ata66(hwif);
break;
case PCI_DEVICE_ID_CMD_649:
case PCI_DEVICE_ID_CMD_648:
case PCI_DEVICE_ID_CMD_643:
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base) {
hwif->udma_setup = cmd6xx_udma_setup;
hwif->udma_stop = cmd64x_udma_stop;
hwif->udma_irq_status = cmd64x_udma_irq_status;
}
#endif
hwif->tuneproc = cmd64x_tuneproc;
hwif->speedproc = cmd64x_tune_chipset;
hwif->udma_four = cmd64x_ata66(hwif);
break;
case PCI_DEVICE_ID_CMD_646:
hwif->chipset = ide_cmd646;
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base) {
hwif->udma_setup = cmd6xx_udma_setup;
if (class_rev == 0x01) {
hwif->udma_stop = cmd646_1_udma_stop;
} else {
......@@ -879,6 +788,7 @@ static void __init cmd64x_init_channel(struct ata_channel *hwif)
#endif
hwif->tuneproc = cmd64x_tuneproc;
hwif->speedproc = cmd64x_tune_chipset;
hwif->udma_four = cmd64x_ata66(hwif);
break;
default:
break;
......@@ -887,10 +797,9 @@ static void __init cmd64x_init_channel(struct ata_channel *hwif)
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base) {
hwif->highmem = 1;
# ifdef CONFIG_IDEDMA_AUTO
if (!noautodma)
hwif->autodma = 1;
# endif
hwif->modes_map = cmd6xx_modes_map(hwif);
hwif->no_atapi_autodma = 1;
hwif->udma_setup = udma_generic_setup;
}
#endif
}
......@@ -919,7 +828,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_CMD,
device: PCI_DEVICE_ID_CMD_648,
init_chipset: cmd64x_init_chipset,
ata66_check: cmd64x_ata66_check,
init_channel: cmd64x_init_channel,
bootable: ON_BOARD,
flags: ATA_F_DMA
......@@ -928,7 +836,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_CMD,
device: PCI_DEVICE_ID_CMD_649,
init_chipset: cmd64x_init_chipset,
ata66_check: cmd64x_ata66_check,
init_channel: cmd64x_init_channel,
bootable: ON_BOARD,
flags: ATA_F_DMA
......@@ -937,7 +844,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_CMD,
device: PCI_DEVICE_ID_CMD_680,
init_chipset: cmd64x_init_chipset,
ata66_check: cmd64x_ata66_check,
init_channel: cmd64x_init_channel,
bootable: ON_BOARD,
flags: ATA_F_DMA
......
......@@ -191,7 +191,7 @@ static int cs5530_config_dma(struct ata_device *drive)
return 0;
}
static int cs5530_udma_setup(struct ata_device *drive)
static int cs5530_udma_setup(struct ata_device *drive, int map)
{
return cs5530_config_dma(drive);
}
......@@ -285,17 +285,15 @@ static unsigned int __init pci_init_cs5530(struct pci_dev *dev)
*/
static void __init ide_init_cs5530(struct ata_channel *hwif)
{
u32 basereg, d0_timings;
hwif->serialized = 1;
if (!hwif->dma_base) {
hwif->autodma = 0;
} else {
unsigned int basereg, d0_timings;
#ifdef CONFIG_BLK_DEV_IDEDMA
hwif->udma_setup = cs5530_udma_setup;
hwif->highmem = 1;
#else
hwif->autodma = 0;
if (hwif->dma_base) {
hwif->highmem = 1;
hwif->udma_setup = cs5530_udma_setup;
}
#endif
hwif->tuneproc = &cs5530_tuneproc;
......@@ -311,7 +309,6 @@ static void __init ide_init_cs5530(struct ata_channel *hwif)
if (!hwif->drives[1].autotune)
hwif->drives[1].autotune = 1; /* needs autotuning later */
}
}
}
......
......@@ -237,7 +237,7 @@ static void cy82c693_dma_enable(struct ata_device *drive, int mode, int single)
/*
* used to set DMA mode for CY82C693 (single and multi modes)
*/
static int cy82c693_udma_setup(struct ata_device *drive)
static int cy82c693_udma_setup(struct ata_device *drive, int map)
{
/*
* Set dma mode for drive everything else is done by the defaul func.
......@@ -414,14 +414,11 @@ static void __init ide_init_cy82c693(struct ata_channel *hwif)
hwif->tuneproc = cy82c693_tune_drive;
hwif->drives[0].autotune = 1;
hwif->drives[1].autotune = 1;
hwif->autodma = 0;
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base) {
hwif->highmem = 1;
hwif->udma_setup = cy82c693_udma_setup;
if (!noautodma)
hwif->autodma = 1;
}
#endif
}
......
......@@ -79,30 +79,8 @@ void ata_mask(struct ata_device *drive)
ch->maskproc(drive);
}
/*
* Spin until the drive is no longer busy.
*
* Not exported, since it's not used within any modules.
*/
int ata_busy_poll(struct ata_device *drive, unsigned long timeout)
{
/* spec allows drive 400ns to assert "BUSY" */
udelay(1);
if (!ata_status(drive, 0, BUSY_STAT)) {
timeout += jiffies;
while (!ata_status(drive, 0, BUSY_STAT)) {
if (time_after(jiffies, timeout))
return 1;
}
}
return 0;
}
/*
* Check the state of the status register.
*
* FIXME: Channel lock should be held.
*/
int ata_status(struct ata_device *drive, u8 good, u8 bad)
{
......@@ -120,31 +98,33 @@ EXPORT_SYMBOL(ata_status);
* all of the "good" bits and none of the "bad" bits, and if all is okay it
* returns 0. All other cases return 1 after invoking error handler -- caller
* should just return.
*
* This routine should get fixed to not hog the cpu during extra long waits..
* That could be done by busy-waiting for the first jiffy or two, and then
* setting a timer to wake up at half second intervals thereafter, until
* timeout is achieved, before timing out.
*
* Channel lock should be held.
*/
int ata_status_poll(struct ata_device *drive, u8 good, u8 bad,
unsigned long timeout,
struct request *rq, ide_startstop_t *startstop)
unsigned long timeout, struct request *rq)
{
int i;
/* bail early if we've exceeded max_failures */
if (drive->max_failures && (drive->failures > drive->max_failures)) {
*startstop = ide_stopped;
return 1;
}
if (ata_busy_poll(drive, timeout)) {
*startstop = ata_error(drive, rq, "status timeout");
if (drive->max_failures && (drive->failures > drive->max_failures))
return ATA_OP_FINISHED;
/*
* Spin until the drive is no longer busy.
* Spec allows drive 400ns to assert "BUSY"
*/
udelay(1);
if (!ata_status(drive, 0, BUSY_STAT)) {
unsigned long flags;
return 1;
__save_flags(flags);
ide__sti();
timeout += jiffies;
while (!ata_status(drive, 0, BUSY_STAT)) {
if (time_after(jiffies, timeout)) {
__restore_flags(flags);
return ata_error(drive, rq, "status timeout");
}
}
__restore_flags(flags);
}
/*
......@@ -156,12 +136,10 @@ int ata_status_poll(struct ata_device *drive, u8 good, u8 bad,
for (i = 0; i < 10; i++) {
udelay(1);
if (ata_status(drive, good, bad))
return 0;
return ATA_OP_READY;
}
*startstop = ata_error(drive, rq, "status error");
return 1;
return ata_error(drive, rq, "status error");
}
EXPORT_SYMBOL(ata_status_poll);
......
......@@ -72,83 +72,13 @@ static void hpt34x_tune_drive(struct ata_device *drive, u8 pio)
}
#ifdef CONFIG_BLK_DEV_IDEDMA
static int config_chipset_for_dma(struct ata_device *drive, u8 udma)
static int hpt34x_udma_setup(struct ata_device *drive, int map)
{
int map;
u8 mode;
if (drive->type != ATA_DISK)
return 0;
if (udma)
map = XFER_UDMA;
else
map = XFER_SWDMA | XFER_MWDMA;
mode = ata_timing_mode(drive, map);
if (mode < XFER_SW_DMA_0)
return 0;
return !hpt34x_tune_chipset(drive, mode);
}
static int hpt34x_udma_setup(struct ata_device *drive)
{
struct hd_driveid *id = drive->id;
int on = 1;
int verbose = 1;
if (id && (id->capability & 1) && drive->channel->autodma) {
/* Consult the list of known "bad" drives */
if (udma_black_list(drive)) {
on = 0;
goto fast_ata_pio;
}
on = 0;
verbose = 0;
if (id->field_valid & 4) {
if (id->dma_ultra & 0x0007) {
/* Force if Capable UltraDMA */
on = config_chipset_for_dma(drive, 1);
if ((id->field_valid & 2) &&
(!on))
goto try_dma_modes;
}
} else if (id->field_valid & 2) {
try_dma_modes:
if ((id->dma_mword & 0x0007) ||
(id->dma_1word & 0x0007)) {
/* Force if Capable regular DMA modes */
on = config_chipset_for_dma(drive, 0);
if (!on)
goto no_dma_set;
}
} else if (udma_white_list(drive)) {
if (id->eide_dma_time > 150) {
goto no_dma_set;
}
/* Consult the list of known "good" drives */
on = config_chipset_for_dma(drive, 0);
if (!on)
goto no_dma_set;
} else {
goto fast_ata_pio;
}
} else if ((id->capability & 8) || (id->field_valid & 2)) {
fast_ata_pio:
on = 0;
verbose = 0;
no_dma_set:
hpt34x_tune_chipset(drive, ata_best_pio_mode(drive));
}
#ifndef CONFIG_HPT34X_AUTODMA
if (on)
on = 0;
#endif
udma_enable(drive, on, verbose);
#ifdef CONFIG_HPT34X_AUTODMA
return udma_generic_setup(drive, map);
#else
return 0;
#endif
}
static int hpt34x_udma_stop(struct ata_device *drive)
......@@ -173,7 +103,7 @@ static int hpt34x_udma_init(struct ata_device *drive, struct request *rq)
u8 cmd;
if (!(count = udma_new_table(drive, rq)))
return ide_stopped; /* try PIO instead of DMA */
return ATA_OP_FINISHED; /* try PIO instead of DMA */
if (rq_data_dir(rq) == READ)
cmd = 0x09;
......@@ -189,7 +119,7 @@ static int hpt34x_udma_init(struct ata_device *drive, struct request *rq)
OUT_BYTE((cmd == 0x09) ? WIN_READDMA : WIN_WRITEDMA, IDE_COMMAND_REG);
}
return ide_started;
return ATA_OP_CONTINUES;
}
#endif
......@@ -252,24 +182,21 @@ static void __init ide_init_hpt34x(struct ata_channel *hwif)
unsigned short pcicmd = 0;
pci_read_config_word(hwif->pci_dev, PCI_COMMAND, &pcicmd);
if (!noautodma)
hwif->autodma = (pcicmd & PCI_COMMAND_MEMORY) ? 1 : 0;
else
hwif->autodma = 0;
#ifdef CONFIG_IDEDMA_AUTO
hwif->autodma = (pcicmd & PCI_COMMAND_MEMORY) ? 1 : 0;
#endif
hwif->udma_stop = hpt34x_udma_stop;
hwif->udma_init = hpt34x_udma_init;
hwif->modes_map = XFER_EPIO | XFER_SWDMA | XFER_MWDMA | XFER_UDMA;
hwif->no_atapi_autodma = 1;
hwif->udma_setup = hpt34x_udma_setup;
hwif->highmem = 1;
} else {
} else
#endif
{
hwif->drives[0].autotune = 1;
hwif->drives[1].autotune = 1;
}
#else
hwif->drives[0].autotune = 1;
hwif->drives[1].autotune = 1;
hwif->autodma = 0;
#endif
}
......@@ -281,7 +208,7 @@ static struct ata_pci_device chipset __initdata = {
init_channel: ide_init_hpt34x,
bootable: NEVER_BOARD,
extra: 16,
flags: ATA_F_NOADMA | ATA_F_DMA
flags: ATA_F_DMA
};
int __init init_hpt34x(void)
......
......@@ -493,37 +493,23 @@ static unsigned int hpt_revision(struct pci_dev *dev)
return class_rev;
}
static int hpt3xx_ratemask(struct ata_device *drive)
static int __init hpt3xx_modes_map(struct ata_channel *ch)
{
u32 rev = hpt_revision(drive->channel->pci_dev);
int map = XFER_UDMA;
u32 rev = hpt_revision(ch->pci_dev);
int map = XFER_EPIO | XFER_MWDMA | XFER_UDMA | XFER_UDMA_66;
if (rev >= 8) { /* HPT374 */
if (HPT374_ALLOW_ATA133_6)
map |= XFER_UDMA_133;
map |= (XFER_UDMA_100 | XFER_UDMA_66);
map |= XFER_UDMA_100;
} else if (rev >= 5) { /* HPT372 */
if (HPT372_ALLOW_ATA133_6)
map |= XFER_UDMA_133;
map |= (XFER_UDMA_100 | XFER_UDMA_66);
} else if (rev >= 4) { /* HPT370A */
map |= XFER_UDMA_100;
} else if (rev >= 3) { /* HPT370A / HPT370 */
if (HPT370_ALLOW_ATA100_5)
map |= XFER_UDMA_100;
map |= XFER_UDMA_66;
} else if (rev >= 3) { /* HPT370 */
if (HPT370_ALLOW_ATA100_5)
map |= XFER_UDMA_100;
map |= XFER_UDMA_66;
if (check_in_drive_lists(drive, bad_ata33))
return 0;
} else { /* HPT366 and HPT368 */
map |= XFER_UDMA_66;
if (check_in_drive_lists(drive, bad_ata33))
return 0;
}
if (!eighty_ninty_three(drive))
return XFER_UDMA;
} /* HPT366 / HPT368 */
return map;
}
......@@ -662,62 +648,42 @@ static int hpt3xx_tune_chipset(struct ata_device *drive, u8 speed)
return ide_config_drive_speed(drive, speed);
}
/* FIXME: pio == 255 -> ata_best_pio_mode(drive) --bkz */
static void hpt3xx_tune_drive(struct ata_device *drive, u8 pio)
{
(void) hpt3xx_tune_chipset(drive, XFER_PIO_0 + min_t(u8, pio, 4));
}
#ifdef CONFIG_BLK_DEV_IDEDMA
static int config_chipset_for_dma(struct ata_device *drive)
static int hpt3xx_udma_setup(struct ata_device *drive, int map)
{
int map;
u32 rev;
u8 mode;
if (drive->type != ATA_DISK)
return 0;
rev = hpt_revision(drive->channel->pci_dev);
/* FIXME: check SWDMA modes --bkz */
map = hpt3xx_ratemask(drive) | XFER_MWDMA;
mode = ata_timing_mode(drive, map);
/* FIXME: badlists need futher investigation --bkz */
/* FIXME: badlists need futher investigation --bkz
bad_ata100_5 is for HPT370/370A,
bad_ata66_4, bad_ata66_3 and bad_ata33 are for HPT366/368
*/
if (mode == XFER_UDMA_5 && rev < 5) {
if (check_in_drive_lists(drive, bad_ata100_5)) {
/* FIXME: make XFER_UDMA_66/100/133
independent of XFER_UDMA --bkz */
map &= ~XFER_UDMA_100;
map |= XFER_UDMA;
mode = ata_timing_mode(drive, map);
}
}
if (mode == XFER_UDMA_4 && rev < 3) {
if (check_in_drive_lists(drive, bad_ata66_4)) {
if (drive->id->dma_ultra & 0x0008) {
mode = XFER_UDMA_3;
} else {
map &= ~XFER_UDMA_66;
map |= XFER_UDMA;
mode = ata_timing_mode(drive, map);
}
}
}
if (mode == XFER_UDMA_3 && rev < 3) {
if (check_in_drive_lists(drive, bad_ata66_3)) {
map &= ~XFER_UDMA_66;
map |= XFER_UDMA;
mode = ata_timing_mode(drive, map);
}
/* bad_ata100_5 is for HPT370/370A,
bad_ata66_4, bad_ata66_3 and bad_ata33 are for HPT366/368 */
if (rev < 5 && check_in_drive_lists(drive, bad_ata100_5))
map &= ~XFER_UDMA_100;
if (rev < 3) {
if (check_in_drive_lists(drive, bad_ata66_4))
map &= ~XFER_UDMA_66_4;
if (check_in_drive_lists(drive, bad_ata66_3))
map &= ~XFER_UDMA_66_3;
if (check_in_drive_lists(drive, bad_ata33))
map &= ~XFER_UDMA_ALL;
}
if (check_in_drive_lists(drive, bad_ata33) && rev < 3)
mode = ata_timing_mode(drive, XFER_MWDMA);
return !hpt3xx_tune_chipset(drive, mode);
return udma_generic_setup(drive, map);
}
static int hpt3xx_quirkproc(struct ata_device *drive)
......@@ -754,59 +720,6 @@ static void hpt3xx_maskproc(struct ata_device *drive)
}
}
static int hpt3xx_udma_setup(struct ata_device *drive)
{
struct hd_driveid *id = drive->id;
int on = 1;
int verbose = 1;
if (id && (id->capability & 1) && drive->channel->autodma) {
/* Consult the list of known "bad" drives */
if (udma_black_list(drive)) {
on = 0;
goto fast_ata_pio;
}
on = 0;
verbose = 0;
if (id->field_valid & 4) {
if (id->dma_ultra & 0x007F) {
/* Force if Capable UltraDMA */
on = config_chipset_for_dma(drive);
if ((id->field_valid & 2) &&
(!on))
goto try_dma_modes;
}
} else if (id->field_valid & 2) {
try_dma_modes:
if (id->dma_mword & 0x0007) {
/* Force if Capable regular DMA modes */
on = config_chipset_for_dma(drive);
if (!on)
goto no_dma_set;
}
} else if (udma_white_list(drive)) {
if (id->eide_dma_time > 150) {
goto no_dma_set;
}
/* Consult the list of known "good" drives */
on = config_chipset_for_dma(drive);
if (!on)
goto no_dma_set;
} else {
goto fast_ata_pio;
}
} else if ((id->capability & 8) || (id->field_valid & 2)) {
fast_ata_pio:
on = 0;
verbose = 0;
no_dma_set:
hpt3xx_tune_chipset(drive, ata_best_pio_mode(drive));
}
udma_enable(drive, on, verbose);
return 0;
}
static void hpt366_udma_irq_lost(struct ata_device *drive)
{
struct pci_dev *dev = drive->channel->pci_dev;
......@@ -1232,6 +1145,8 @@ static void __init hpt366_init_channel(struct ata_channel *ch)
struct pci_dev *dev = ch->pci_dev;
u32 rev = hpt_revision(dev);
ch->udma_four = hpt366_ata66_check(ch);
ch->tuneproc = hpt3xx_tune_drive;
ch->speedproc = hpt3xx_tune_chipset;
ch->quirkproc = hpt3xx_quirkproc;
......@@ -1272,17 +1187,12 @@ static void __init hpt366_init_channel(struct ata_channel *ch)
// ch->resetproc = hpt3xx_reset;
// ch->busproc = hpt3xx_tristate;
}
ch->modes_map = hpt3xx_modes_map(ch);
ch->udma_setup = hpt3xx_udma_setup;
if (!noautodma)
ch->autodma = 1;
else
ch->autodma = 0;
ch->highmem = 1;
} else
#endif
{
ch->autodma = 0;
ch->drives[0].autotune = 1;
ch->drives[1].autotune = 1;
}
......@@ -1315,7 +1225,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_TTI,
device: PCI_DEVICE_ID_TTI_HPT366,
init_chipset: hpt366_init_chipset,
ata66_check: hpt366_ata66_check,
init_channel: hpt366_init_channel,
init_dma: hpt366_init_dma,
bootable: OFF_BOARD,
......@@ -1326,7 +1235,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_TTI,
device: PCI_DEVICE_ID_TTI_HPT372,
init_chipset: hpt366_init_chipset,
ata66_check: hpt366_ata66_check,
init_channel: hpt366_init_channel,
init_dma: hpt366_init_dma,
bootable: OFF_BOARD,
......@@ -1337,7 +1245,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_TTI,
device: PCI_DEVICE_ID_TTI_HPT374,
init_chipset: hpt366_init_chipset,
ata66_check: hpt366_ata66_check,
init_channel: hpt366_init_channel,
init_dma: hpt366_init_dma,
bootable: OFF_BOARD,
......
......@@ -405,7 +405,7 @@ static void icside_dma_enable(struct ata_device *drive, int on, int verbose)
#endif
}
static int icside_dma_check(struct ata_device *drive)
static int icside_dma_check(struct ata_device *drive, int map)
{
struct hd_driveid *id = drive->id;
struct ata_channel *ch = drive->channel;
......@@ -466,7 +466,7 @@ static ide_startstop_t icside_dmaintr(struct ata_device *drive, struct request *
if (ata_status(drive, DRIVE_READY, drive->bad_wstat | DRQ_STAT)) {
if (!dma_stat) {
__ide_end_request(drive, rq, 1, rq->nr_sectors);
return ide_stopped;
return ATA_OP_FINISHED;
}
printk("%s: dma_intr: bad DMA status (dma_stat=%x)\n",
drive->name, dma_stat);
......@@ -516,10 +516,10 @@ static int icside_dma_init(struct ata_device *drive, struct request *rq)
u8 int cmd;
if (icside_dma_common(drive, rq, DMA_MODE_WRITE))
return ide_stopped;
return ATA_OP_FINISHED;
if (drive->type != ATA_DISK)
return ide_started;
return ATA_OP_CONTINUES;
ata_set_handler(drive, icside_dmaintr, WAIT_CMD, NULL);
......@@ -535,7 +535,7 @@ static int icside_dma_init(struct ata_device *drive, struct request *rq)
enable_dma(ch->hw.dma);
return ide_started;
return ATA_OP_CONTINUES;
}
static int icside_irq_status(struct ata_device *drive)
......
......@@ -556,7 +556,7 @@ static void cdrom_end_request(struct ata_device *drive, struct request *rq, int
if ((rq->flags & REQ_CMD) && !rq->current_nr_sectors)
uptodate = 1;
ata_end_request(drive, rq, uptodate);
__ata_end_request(drive, rq, uptodate, 0);
}
......@@ -581,7 +581,7 @@ static int cdrom_decode_status(ide_startstop_t *startstop, struct ata_device *dr
if (rq == NULL) {
printk("%s: missing rq in %s\n", drive->name, __FUNCTION__);
*startstop = ide_stopped;
*startstop = ATA_OP_FINISHED;
return 1;
}
......@@ -688,14 +688,12 @@ static int cdrom_decode_status(ide_startstop_t *startstop, struct ata_device *dr
blk_dump_rq_flags(rq, "ide-cd bad flags");
/* Retry, or handle the next request. */
*startstop = ide_stopped;
*startstop = ATA_OP_FINISHED;
return 1;
}
static int cdrom_timer_expiry(struct ata_device *drive, struct request *rq)
static ide_startstop_t cdrom_timer_expiry(struct ata_device *drive, struct request *rq, unsigned long *wait)
{
unsigned long wait = 0;
/*
* Some commands are *slow* and normally take a long time to
* complete. Usually we can use the ATAPI "disconnect" to bypass
......@@ -706,14 +704,14 @@ static int cdrom_timer_expiry(struct ata_device *drive, struct request *rq)
case GPCMD_BLANK:
case GPCMD_FORMAT_UNIT:
case GPCMD_RESERVE_RZONE_TRACK:
wait = WAIT_CMD;
break;
*wait = WAIT_CMD;
return ATA_OP_CONTINUES;
default:
wait = 0;
*wait = 0;
break;
}
return wait;
return ATA_OP_FINISHED;
}
/* Set up the device registers for transferring a packet command on DEV,
......@@ -728,51 +726,40 @@ static ide_startstop_t cdrom_start_packet_command(struct ata_device *drive,
int xferlen,
ata_handler_t handler)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
ide_startstop_t startstop;
struct cdrom_info *info = drive->driver_data;
int ret;
spin_lock_irqsave(ch->lock, flags);
/* Wait for the controller to be idle. */
if (ata_status_poll(drive, 0, BUSY_STAT, WAIT_READY, rq, &startstop))
ret = startstop;
else {
if (info->dma) {
if (info->cmd == READ || info->cmd == WRITE)
info->dma = udma_init(drive, rq);
else
printk("ide-cd: DMA set, but not allowed\n");
}
ret = ata_status_poll(drive, 0, BUSY_STAT, WAIT_READY, rq);
if (ret != ATA_OP_READY)
return ret;
/* Set up the controller registers. */
OUT_BYTE(info->dma, IDE_FEATURE_REG);
OUT_BYTE(0, IDE_NSECTOR_REG);
OUT_BYTE(0, IDE_SECTOR_REG);
OUT_BYTE(xferlen & 0xff, IDE_LCYL_REG);
OUT_BYTE(xferlen >> 8 , IDE_HCYL_REG);
ata_irq_enable(drive, 1);
if (info->dma)
udma_start(drive, rq);
if (CDROM_CONFIG_FLAGS (drive)->drq_interrupt) {
ata_set_handler(drive, handler, WAIT_CMD, cdrom_timer_expiry);
OUT_BYTE (WIN_PACKETCMD, IDE_COMMAND_REG); /* packet command */
ret = ide_started;
} else {
OUT_BYTE (WIN_PACKETCMD, IDE_COMMAND_REG); /* packet command */
if (info->dma) {
if (info->cmd == READ || info->cmd == WRITE)
info->dma = udma_init(drive, rq);
else
printk("ide-cd: DMA set, but not allowed\n");
}
/* FIXME: Oj kurwa! We have to ungrab the lock before
* the IRQ handler gets called.
*/
spin_unlock_irqrestore(ch->lock, flags);
ret = handler(drive, rq);
spin_lock_irqsave(ch->lock, flags);
}
/* Set up the controller registers. */
OUT_BYTE(info->dma, IDE_FEATURE_REG);
OUT_BYTE(0, IDE_NSECTOR_REG);
OUT_BYTE(0, IDE_SECTOR_REG);
OUT_BYTE(xferlen & 0xff, IDE_LCYL_REG);
OUT_BYTE(xferlen >> 8 , IDE_HCYL_REG);
ata_irq_enable(drive, 1);
if (info->dma)
udma_start(drive, rq);
if (CDROM_CONFIG_FLAGS (drive)->drq_interrupt) {
ata_set_handler(drive, handler, WAIT_CMD, cdrom_timer_expiry);
OUT_BYTE (WIN_PACKETCMD, IDE_COMMAND_REG); /* packet command */
ret = ATA_OP_CONTINUES;
} else {
OUT_BYTE (WIN_PACKETCMD, IDE_COMMAND_REG); /* packet command */
ret = handler(drive, rq);
}
spin_unlock_irqrestore(ch->lock, flags);
return ret;
}
......@@ -787,8 +774,6 @@ static ide_startstop_t cdrom_transfer_packet_command(struct ata_device *drive,
unsigned char *cmd, unsigned long timeout,
ata_handler_t handler)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
ide_startstop_t startstop;
if (CDROM_CONFIG_FLAGS (drive)->drq_interrupt) {
......@@ -800,26 +785,18 @@ static ide_startstop_t cdrom_transfer_packet_command(struct ata_device *drive,
if (cdrom_decode_status(&startstop, drive, rq, DRQ_STAT, &stat_dum))
return startstop;
} else {
/* FIXME: make this locking go away */
spin_lock_irqsave(ch->lock, flags);
/* Otherwise, we must wait for DRQ to get set. */
if (ata_status_poll(drive, DRQ_STAT, BUSY_STAT,
WAIT_READY, rq, &startstop)) {
spin_unlock_irqrestore(ch->lock, flags);
startstop = ata_status_poll(drive, DRQ_STAT, BUSY_STAT,
WAIT_READY, rq);
if (startstop != ATA_OP_READY)
return startstop;
}
spin_unlock_irqrestore(ch->lock, flags);
}
/* Arm the interrupt handler and send the command to the device. */
/* FIXME: make this locking go away */
spin_lock_irqsave(ch->lock, flags);
ata_set_handler(drive, handler, timeout, cdrom_timer_expiry);
atapi_write(drive, cmd, CDROM_PACKET_SIZE);
spin_unlock_irqrestore(ch->lock, flags);
return ide_started;
return ATA_OP_CONTINUES;
}
/****************************************************************************
......@@ -917,8 +894,6 @@ int cdrom_read_check_ireason(struct ata_device *drive, struct request *rq, int l
*/
static ide_startstop_t cdrom_read_intr(struct ata_device *drive, struct request *rq)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
int stat;
int ireason, len, sectors_to_transfer, nskip;
struct cdrom_info *info = drive->driver_data;
......@@ -937,15 +912,9 @@ static ide_startstop_t cdrom_read_intr(struct ata_device *drive, struct request
if (dma) {
if (!dma_error) {
/* FIXME: this locking should encompass the above register
* file access too.
*/
spin_lock_irqsave(ch->lock, flags);
__ata_end_request(drive, rq, 1, rq->nr_sectors);
spin_unlock_irqrestore(ch->lock, flags);
return ide_stopped;
return ATA_OP_FINISHED;
} else
return ata_error(drive, rq, "dma error");
}
......@@ -964,12 +933,12 @@ static ide_startstop_t cdrom_read_intr(struct ata_device *drive, struct request
cdrom_end_request(drive, rq, 0);
} else
cdrom_end_request(drive, rq, 1);
return ide_stopped;
return ATA_OP_FINISHED;
}
/* Check that the drive is expecting to do the same thing we are. */
if (cdrom_read_check_ireason(drive, rq, len, ireason))
return ide_stopped;
return ATA_OP_FINISHED;
/* Assume that the drive will always provide data in multiples
of at least SECTOR_SIZE, as it gets hairy to keep track
......@@ -984,7 +953,7 @@ static ide_startstop_t cdrom_read_intr(struct ata_device *drive, struct request
CDROM_CONFIG_FLAGS (drive)->limit_nframes = 1;
}
cdrom_end_request(drive, rq, 0);
return ide_stopped;
return ATA_OP_FINISHED;
}
/* The number of sectors we need to read from the drive. */
......@@ -1040,11 +1009,9 @@ static ide_startstop_t cdrom_read_intr(struct ata_device *drive, struct request
}
/* Done moving data! Wait for another interrupt. */
spin_lock_irqsave(ch->lock, flags);
ata_set_handler(drive, cdrom_read_intr, WAIT_CMD, NULL);
spin_unlock_irqrestore(ch->lock, flags);
return ide_started;
return ATA_OP_CONTINUES;
}
/*
......@@ -1132,7 +1099,7 @@ static ide_startstop_t cdrom_start_read_continuation(struct ata_device *drive, s
printk ("%s: %s: buffer botch (%u)\n",
drive->name, __FUNCTION__, rq->current_nr_sectors);
cdrom_end_request(drive, rq, 0);
return ide_stopped;
return ATA_OP_FINISHED;
}
sector -= nskip;
nsect += nskip;
......@@ -1181,7 +1148,7 @@ static ide_startstop_t cdrom_seek_intr(struct ata_device *drive, struct request
drive->dsc_overlap = 0;
}
}
return ide_stopped;
return ATA_OP_FINISHED;
}
static ide_startstop_t cdrom_start_seek_continuation(struct ata_device *drive, struct request *rq)
......@@ -1243,7 +1210,7 @@ static ide_startstop_t cdrom_start_read(struct ata_device *drive, struct request
/* Satisfy whatever we can of this request from our cached sector. */
if (cdrom_read_from_buffer(drive, rq))
return ide_stopped;
return ATA_OP_FINISHED;
blk_attempt_remerge(&drive->queue, rq);
......@@ -1269,8 +1236,6 @@ static ide_startstop_t cdrom_start_read(struct ata_device *drive, struct request
/* Interrupt routine for packet command completion. */
static ide_startstop_t cdrom_pc_intr(struct ata_device *drive, struct request *rq)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
int ireason, len, stat, thislen;
/* FIXME --mdcki */
......@@ -1312,7 +1277,7 @@ static ide_startstop_t cdrom_pc_intr(struct ata_device *drive, struct request *r
pc->stat = 1;
cdrom_end_request(drive, rq, 1);
}
return ide_stopped;
return ATA_OP_FINISHED;
}
/* Figure out how much data to transfer. */
......@@ -1363,11 +1328,9 @@ static ide_startstop_t cdrom_pc_intr(struct ata_device *drive, struct request *r
}
/* Now we wait for another interrupt. */
spin_lock_irqsave(ch->lock, flags);
ata_set_handler(drive, cdrom_pc_intr, WAIT_CMD, cdrom_timer_expiry);
spin_unlock_irqrestore(ch->lock, flags);
return ide_started;
return ATA_OP_CONTINUES;
}
static ide_startstop_t cdrom_do_pc_continuation(struct ata_device *drive, struct request *rq)
......@@ -1508,8 +1471,6 @@ static inline int cdrom_write_check_ireason(struct ata_device *drive, struct req
static ide_startstop_t cdrom_write_intr(struct ata_device *drive, struct request *rq)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
int stat, ireason, len, sectors_to_transfer, uptodate;
struct cdrom_info *info = drive->driver_data;
int dma_error = 0, dma = info->dma;
......@@ -1536,15 +1497,9 @@ static ide_startstop_t cdrom_write_intr(struct ata_device *drive, struct request
if (dma_error)
return ata_error(drive, rq, "dma error");
/* FIXME: this locking should encompass the above register
* file access too.
*/
spin_lock_irqsave(ch->lock, flags);
__ata_end_request(drive, rq, 1, rq->nr_sectors);
spin_unlock_irqrestore(ch->lock, flags);
return ide_stopped;
return ATA_OP_FINISHED;
}
/* Read the interrupt reason and the transfer length. */
......@@ -1563,13 +1518,13 @@ static ide_startstop_t cdrom_write_intr(struct ata_device *drive, struct request
uptodate = 0;
}
cdrom_end_request(drive, rq, uptodate);
return ide_stopped;
return ATA_OP_FINISHED;
}
/* Check that the drive is expecting to do the same thing we are. */
if (ireason & 3)
if (cdrom_write_check_ireason(drive, rq, len, ireason))
return ide_stopped;
return ATA_OP_FINISHED;
sectors_to_transfer = len / SECTOR_SIZE;
......@@ -1607,11 +1562,9 @@ static ide_startstop_t cdrom_write_intr(struct ata_device *drive, struct request
}
/* re-arm handler */
spin_lock_irqsave(ch->lock, flags);
ata_set_handler(drive, cdrom_write_intr, 5 * WAIT_CMD, NULL);
spin_unlock_irqrestore(ch->lock, flags);
return ide_started;
return ATA_OP_CONTINUES;
}
static ide_startstop_t cdrom_start_write_cont(struct ata_device *drive, struct request *rq)
......@@ -1628,7 +1581,7 @@ static ide_startstop_t cdrom_start_write(struct ata_device *drive, struct reques
*/
if ((rq->nr_sectors & 3) || (rq->sector & 3)) {
cdrom_end_request(drive, rq, 0);
return ide_stopped;
return ATA_OP_FINISHED;
}
/*
......@@ -1658,7 +1611,6 @@ static ide_startstop_t cdrom_start_write(struct ata_device *drive, struct reques
static ide_startstop_t
ide_cdrom_do_request(struct ata_device *drive, struct request *rq, sector_t block)
{
struct ata_channel *ch = drive->channel;
int ret;
struct cdrom_info *info = drive->driver_data;
......@@ -1669,14 +1621,12 @@ ide_cdrom_do_request(struct ata_device *drive, struct request *rq, sector_t bloc
if (!ata_status(drive, SEEK_STAT, 0)) {
if (elpased < IDECD_SEEK_TIMEOUT) {
ide_stall_queue(drive, IDECD_SEEK_TIMER);
return ide_stopped;
return ATA_OP_FINISHED;
}
printk ("%s: DSC timeout\n", drive->name);
}
CDROM_CONFIG_FLAGS(drive)->seeking = 0;
}
/* FIXME: make this unlocking go away*/
spin_unlock_irq(ch->lock);
if (IDE_LARGE_SEEK(info->last_block, block, IDECD_SEEK_THRESHOLD) && drive->dsc_overlap) {
ret = cdrom_start_seek(drive, rq, block);
} else {
......@@ -1686,13 +1636,9 @@ ide_cdrom_do_request(struct ata_device *drive, struct request *rq, sector_t bloc
ret = cdrom_start_write(drive, rq);
}
info->last_block = block;
spin_lock_irq(ch->lock);
return ret;
} else if (rq->flags & (REQ_PC | REQ_SENSE)) {
/* FIXME: make this unlocking go away*/
spin_unlock_irq(ch->lock);
ret = cdrom_do_packet_command(drive, rq);
spin_lock_irq(ch->lock);
return ret;
} else if (rq->flags & REQ_SPECIAL) {
......@@ -1703,12 +1649,9 @@ ide_cdrom_do_request(struct ata_device *drive, struct request *rq, sector_t bloc
* right now this can only be a reset...
*/
/* FIXME: make this unlocking go away*/
spin_unlock_irq(ch->lock);
cdrom_end_request(drive, rq, 1);
spin_lock_irq(ch->lock);
return ide_stopped;
return ATA_OP_FINISHED;
} else if (rq->flags & REQ_BLOCK_PC) {
struct packet_command pc;
ide_startstop_t startstop;
......@@ -1720,10 +1663,7 @@ ide_cdrom_do_request(struct ata_device *drive, struct request *rq, sector_t bloc
/* FIXME --mdcki */
rq->special = (char *) &pc;
/* FIXME: make this unlocking go away*/
spin_unlock_irq(ch->lock);
startstop = cdrom_do_packet_command(drive, rq);
spin_lock_irq(ch->lock);
if (pc.stat)
++rq->errors;
......@@ -1732,12 +1672,10 @@ ide_cdrom_do_request(struct ata_device *drive, struct request *rq, sector_t bloc
}
blk_dump_rq_flags(rq, "ide-cd bad flags");
/* FIXME: make this unlocking go away*/
spin_unlock_irq(ch->lock);
cdrom_end_request(drive, rq, 0);
spin_lock_irq(ch->lock);
return ide_stopped;
return ATA_OP_FINISHED;
}
......
......@@ -11,8 +11,6 @@
* This is the ATA disk device driver, as evolved from hd.c and ide.c.
*/
#define IDEDISK_VERSION "1.14"
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
......@@ -40,6 +38,24 @@
# define IS_PDC4030_DRIVE (0) /* auto-NULLs out pdc4030 code */
#endif
/*
* for now, taskfile requests are special :/
*/
static inline char *ide_map_rq(struct request *rq, unsigned long *flags)
{
if (rq->bio)
return bio_kmap_irq(rq->bio, flags) + ide_rq_offset(rq);
else
return rq->buffer + ((rq)->nr_sectors - (rq)->current_nr_sectors) * SECTOR_SIZE;
}
static inline void ide_unmap_rq(struct request *rq, char *to,
unsigned long *flags)
{
if (rq->bio)
bio_kunmap_irq(to, flags);
}
/*
* Perform a sanity check on the claimed "lba_capacity"
* value for this drive (from its reported identification information).
......@@ -95,25 +111,17 @@ static int lba_capacity_is_ok(struct hd_driveid *id)
*/
static ide_startstop_t task_in_intr(struct ata_device *drive, struct request *rq)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
int ret;
spin_lock_irqsave(ch->lock, flags);
if (!ata_status(drive, DATA_READY, BAD_R_STAT)) {
if (drive->status & (ERR_STAT | DRQ_STAT)) {
spin_unlock_irqrestore(ch->lock, flags);
if (drive->status & (ERR_STAT | DRQ_STAT))
return ata_error(drive, rq, __FUNCTION__);
}
/* no data yet, so wait for another interrupt */
ata_set_handler(drive, task_in_intr, WAIT_CMD, NULL);
ret = ide_started;
ret = ATA_OP_CONTINUES;
} else {
// printk("Read: %p, rq->current_nr_sectors: %d\n", buf, (int) rq->current_nr_sectors);
{
unsigned long flags;
......@@ -134,18 +142,16 @@ static ide_startstop_t task_in_intr(struct ata_device *drive, struct request *rq
if (rq->current_nr_sectors <= 0) {
if (!__ata_end_request(drive, rq, 1, 0)) {
// printk("Request Ended stat: %02x\n", drive->status);
spin_unlock_irqrestore(ch->lock, flags);
return ide_stopped;
return ATA_OP_FINISHED;
}
}
/* still data left to transfer */
ata_set_handler(drive, task_in_intr, WAIT_CMD, NULL);
ret = ide_started;
ret = ATA_OP_CONTINUES;
}
spin_unlock_irqrestore(ch->lock, flags);
return ret;
}
......@@ -155,19 +161,13 @@ static ide_startstop_t task_in_intr(struct ata_device *drive, struct request *rq
*/
static ide_startstop_t task_out_intr(struct ata_device *drive, struct request *rq)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
int ret;
spin_lock_irqsave(ch->lock, flags);
if (!ata_status(drive, DRIVE_READY, drive->bad_wstat)) {
spin_unlock_irqrestore(ch->lock, flags);
if (!ata_status(drive, DRIVE_READY, drive->bad_wstat))
return ata_error(drive, rq, __FUNCTION__);
}
if (!rq->current_nr_sectors && !__ata_end_request(drive, rq, 1, 0)) {
ret = ide_stopped;
ret = ATA_OP_FINISHED;
} else {
if ((rq->nr_sectors == 1) != (drive->status & DRQ_STAT)) {
unsigned long flags;
......@@ -183,9 +183,8 @@ static ide_startstop_t task_out_intr(struct ata_device *drive, struct request *r
}
ata_set_handler(drive, task_out_intr, WAIT_CMD, NULL);
ret = ide_started;
ret = ATA_OP_CONTINUES;
}
spin_unlock_irqrestore(ch->lock, flags);
return ret;
}
......@@ -195,22 +194,16 @@ static ide_startstop_t task_out_intr(struct ata_device *drive, struct request *r
*/
static ide_startstop_t task_mulin_intr(struct ata_device *drive, struct request *rq)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
int ret;
spin_lock_irqsave(ch->lock, flags);
if (!ata_status(drive, DATA_READY, BAD_R_STAT)) {
if (drive->status & (ERR_STAT | DRQ_STAT)) {
spin_unlock_irqrestore(ch->lock, flags);
if (drive->status & (ERR_STAT | DRQ_STAT))
return ata_error(drive, rq, __FUNCTION__);
}
/* no data yet, so wait for another interrupt */
ata_set_handler(drive, task_mulin_intr, WAIT_CMD, NULL);
ret = ide_started;
ret = ATA_OP_CONTINUES;
} else {
unsigned int msect;
......@@ -242,11 +235,8 @@ static ide_startstop_t task_mulin_intr(struct ata_device *drive, struct request
/* FIXME: this seems buggy */
if (rq->current_nr_sectors <= 0) {
if (!__ata_end_request(drive, rq, 1, 0)) {
spin_unlock_irqrestore(ch->lock, flags);
return ide_stopped;
}
if (!__ata_end_request(drive, rq, 1, 0))
return ATA_OP_FINISHED;
}
msect -= nsect;
} while (msect);
......@@ -254,22 +244,17 @@ static ide_startstop_t task_mulin_intr(struct ata_device *drive, struct request
/* more data left */
ata_set_handler(drive, task_mulin_intr, WAIT_CMD, NULL);
ret = ide_started;
ret = ATA_OP_CONTINUES;
}
spin_unlock_irqrestore(ch->lock, flags);
return ret;
}
static ide_startstop_t task_mulout_intr(struct ata_device *drive, struct request *rq)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
int ok;
int ret;
spin_lock_irqsave(ch->lock, flags);
/*
* FIXME: the drive->status checks here seem to be messy.
*
......@@ -280,24 +265,24 @@ static ide_startstop_t task_mulout_intr(struct ata_device *drive, struct request
ok = ata_status(drive, DATA_READY, BAD_R_STAT);
if (!ok || !rq->nr_sectors) {
if (drive->status & (ERR_STAT | DRQ_STAT)) {
spin_unlock_irqrestore(ch->lock, flags);
if (drive->status & (ERR_STAT | DRQ_STAT))
return ata_error(drive, rq, __FUNCTION__);
}
}
if (!rq->nr_sectors) {
__ata_end_request(drive, rq, 1, rq->hard_nr_sectors);
rq->bio = NULL;
ret = ide_stopped;
ret = ATA_OP_FINISHED;
} else if (!ok) {
/* no data yet, so wait for another interrupt */
if (!ch->handler)
ata_set_handler(drive, task_mulout_intr, WAIT_CMD, NULL);
ret = ide_started;
/* not ready yet, so wait for next IRQ */
ata_set_handler(drive, task_mulout_intr, WAIT_CMD, NULL);
ret = ATA_OP_CONTINUES;
} else {
int mcount = drive->mult_count;
/* prepare for next IRQ */
ata_set_handler(drive, task_mulout_intr, WAIT_CMD, NULL);
do {
char *buf;
int nsect = rq->current_nr_sectors;
......@@ -325,6 +310,7 @@ static ide_startstop_t task_mulout_intr(struct ata_device *drive, struct request
rq->current_nr_sectors = bio_iovec(bio)->bv_len >> 9;
}
}
rq->errors = 0; /* FIXME: why? --bzolnier */
/*
* Ok, we're all setup for the interrupt re-entering us on the
......@@ -334,31 +320,209 @@ static ide_startstop_t task_mulout_intr(struct ata_device *drive, struct request
bio_kunmap_irq(buf, &flags);
} while (mcount);
rq->errors = 0;
if (!ch->handler)
ata_set_handler(drive, task_mulout_intr, WAIT_CMD, NULL);
ret = ide_started;
ret = ATA_OP_CONTINUES;
}
spin_unlock_irqrestore(ch->lock, flags);
return ide_started;
return ret;
}
/*
* Channel lock should be held on entry.
* Issue a READ or WRITE command to a disk, using LBA if supported, or CHS
* otherwise, to address sectors. It also takes care of issuing special
* DRIVE_CMDs.
*/
static ide_startstop_t __do_request(struct ata_device *drive,
struct ata_taskfile *ar, struct request *rq)
static ide_startstop_t idedisk_do_request(struct ata_device *drive, struct request *rq, sector_t block)
{
struct ata_taskfile args;
struct ata_taskfile *ar;
struct hd_driveid *id = drive->id;
u8 cmd;
/* Special drive commands don't need any kind of setup.
*/
if (rq->flags & REQ_SPECIAL) {
ar = rq->special;
cmd = ar->cmd;
} else {
unsigned int sectors;
/* FIXME: this check doesn't make sense */
if (!(rq->flags & REQ_CMD)) {
blk_dump_rq_flags(rq, "idedisk_do_request - bad command");
__ata_end_request(drive, rq, 0, 0);
return ATA_OP_FINISHED;
}
if (IS_PDC4030_DRIVE) {
extern ide_startstop_t promise_do_request(struct ata_device *, struct request *, sector_t);
return promise_do_request(drive, rq, block);
}
/*
* start a tagged operation
*/
if (drive->using_tcq) {
int st = blk_queue_start_tag(&drive->queue, rq);
if (ata_pending_commands(drive) > drive->max_depth)
drive->max_depth = ata_pending_commands(drive);
if (ata_pending_commands(drive) > drive->max_last_depth)
drive->max_last_depth = ata_pending_commands(drive);
if (st) {
BUG_ON(!ata_pending_commands(drive));
return ATA_OP_CONTINUES;
}
}
ar = &args;
memset(&args, 0, sizeof(args));
sectors = rq->nr_sectors;
/* Dispatch depending up on the drive access method. */
if ((drive->id->cfs_enable_2 & 0x0400) && (drive->addressing)) {
/* LBA 48 bit */
/*
* 268435455 == 137439 MB or 28bit limit
* 320173056 == 163929 MB or 48bit addressing
* 1073741822 == 549756 MB or 48bit addressing fake drive
*/
if (sectors == 65536)
sectors = 0;
if (blk_rq_tagged(rq)) {
args.taskfile.feature = sectors;
args.hobfile.feature = sectors >> 8;
args.taskfile.sector_count = rq->tag << 3;
} else {
args.taskfile.sector_count = sectors;
args.hobfile.sector_count = sectors >> 8;
}
args.taskfile.sector_number = block; /* low lba */
args.taskfile.low_cylinder = (block >>= 8); /* mid lba */
args.taskfile.high_cylinder = (block >>= 8); /* hi lba */
args.taskfile.device_head = drive->select.all;
args.hobfile.sector_number = (block >>= 8); /* low lba */
args.hobfile.low_cylinder = (block >>= 8); /* mid lba */
args.hobfile.high_cylinder = (block >>= 8); /* hi lba */
} else if (drive->select.b.lba) {
/* LBA 28 bit */
if (sectors == 256)
sectors = 0;
if (blk_rq_tagged(rq)) {
args.taskfile.feature = sectors;
args.taskfile.sector_count = rq->tag << 3;
} else
args.taskfile.sector_count = sectors;
args.taskfile.sector_number = block;
args.taskfile.low_cylinder = (block >>= 8);
args.taskfile.high_cylinder = (block >>= 8);
args.taskfile.device_head = ((block >> 8) & 0x0f);
} else {
/* CHS */
unsigned int track = (block / drive->sect);
unsigned int sect = (block % drive->sect) + 1;
unsigned int head = (track % drive->head);
unsigned int cyl = (track / drive->head);
if (sectors == 256)
sectors = 0;
if (blk_rq_tagged(rq)) {
args.taskfile.feature = sectors;
args.taskfile.sector_count = rq->tag << 3;
} else
args.taskfile.sector_count = sectors;
args.taskfile.sector_number = sect;
args.taskfile.low_cylinder = cyl;
args.taskfile.high_cylinder = (cyl>>8);
args.taskfile.device_head = head;
}
args.taskfile.device_head |= drive->select.all;
/*
* Decode with physical ATA command to use and setup associated data.
*/
if (rq_data_dir(rq) == READ) {
args.command_type = IDE_DRIVE_TASK_IN;
if (drive->addressing) {
if (drive->using_tcq) {
cmd = WIN_READDMA_QUEUED_EXT;
} else if (drive->using_dma) {
cmd = WIN_READDMA_EXT;
} else if (drive->mult_count) {
args.XXX_handler = task_mulin_intr;
cmd = WIN_MULTREAD_EXT;
} else {
args.XXX_handler = task_in_intr;
cmd = WIN_READ_EXT;
}
} else {
if (drive->using_tcq) {
cmd = WIN_READDMA_QUEUED;
} else if (drive->using_dma) {
cmd = WIN_READDMA;
} else if (drive->mult_count) {
args.XXX_handler = task_mulin_intr;
cmd = WIN_MULTREAD;
} else {
args.XXX_handler = task_in_intr;
cmd = WIN_READ;
}
}
} else {
args.command_type = IDE_DRIVE_TASK_RAW_WRITE;
if (drive->addressing) {
if (drive->using_tcq) {
cmd = WIN_WRITEDMA_QUEUED_EXT;
} else if (drive->using_dma) {
cmd = WIN_WRITEDMA_EXT;
} else if (drive->mult_count) {
args.XXX_handler = task_mulout_intr;
cmd = WIN_MULTWRITE_EXT;
} else {
args.XXX_handler = task_out_intr;
cmd = WIN_WRITE_EXT;
}
} else {
if (drive->using_tcq) {
cmd = WIN_WRITEDMA_QUEUED;
} else if (drive->using_dma) {
cmd = WIN_WRITEDMA;
} else if (drive->mult_count) {
args.XXX_handler = task_mulout_intr;
cmd = WIN_MULTWRITE;
} else {
args.XXX_handler = task_out_intr;
cmd = WIN_WRITE;
}
}
}
#ifdef DEBUG
printk("%s: %sing: ", drive->name,
(rq_data_dir(rq)==READ) ? "read" : "writ");
if (lba) printk("LBAsect=%lld, ", block);
else printk("CHS=%d/%d/%d, ", cyl, head, sect);
printk("sectors=%ld, ", rq->nr_sectors);
printk("buffer=%p\n", rq->buffer);
#endif
ar->cmd = cmd;
rq->special = ar;
}
/* (ks/hs): Moved to start, do not use for multiple out commands.
* FIXME: why not?! */
if (!(ar->cmd == CFA_WRITE_MULTI_WO_ERASE ||
ar->cmd == WIN_MULTWRITE ||
ar->cmd == WIN_MULTWRITE_EXT)) {
if (!(cmd == CFA_WRITE_MULTI_WO_ERASE ||
cmd == WIN_MULTWRITE ||
cmd == WIN_MULTWRITE_EXT)) {
ata_irq_enable(drive, 1);
ata_mask(drive);
}
......@@ -372,57 +536,58 @@ static ide_startstop_t __do_request(struct ata_device *drive,
OUT_BYTE((ar->taskfile.device_head & (drive->addressing ? 0xE0 : 0xEF)) | drive->select.all,
IDE_SELECT_REG);
/* FIXME: this is actually distingushing between PIO and DMA requests.
*/
if (ar->XXX_handler) {
struct ata_channel *ch = drive->channel;
if (ar->command_type == IDE_DRIVE_TASK_IN ||
ar->command_type == IDE_DRIVE_TASK_NO_DATA) {
ata_set_handler(drive, ar->XXX_handler, WAIT_CMD, NULL);
OUT_BYTE(ar->cmd, IDE_COMMAND_REG);
ata_set_handler(drive, ar->XXX_handler, WAIT_CMD, NULL);
OUT_BYTE(cmd, IDE_COMMAND_REG);
return ATA_OP_CONTINUES;
}
/* FIXME: Warning check for race between handler and prehandler
* for writing first block of data. however since we are well
* inside the boundaries of the seek, we should be okay.
*
* FIXME: Replace the switch by using a proper command_type.
* FIXME: should be fixed --bzolnier
*/
if (ar->command_type == IDE_DRIVE_TASK_RAW_WRITE) {
ide_startstop_t ret;
OUT_BYTE(cmd, IDE_COMMAND_REG);
if (ar->cmd == CFA_WRITE_SECT_WO_ERASE ||
ar->cmd == WIN_WRITE ||
ar->cmd == WIN_WRITE_EXT ||
ar->cmd == WIN_WRITE_VERIFY ||
ar->cmd == WIN_WRITE_BUFFER ||
ar->cmd == WIN_DOWNLOAD_MICROCODE ||
ar->cmd == CFA_WRITE_MULTI_WO_ERASE ||
ar->cmd == WIN_MULTWRITE ||
ar->cmd == WIN_MULTWRITE_EXT) {
ide_startstop_t startstop;
if (ata_status_poll(drive, DATA_READY, drive->bad_wstat,
WAIT_DRQ, rq, &startstop)) {
ret = ata_status_poll(drive, DATA_READY, drive->bad_wstat,
WAIT_DRQ, rq);
if (ret != ATA_OP_READY) {
printk(KERN_ERR "%s: no DRQ after issuing %s\n",
drive->name, drive->mult_count ? "MULTWRITE" : "WRITE");
return startstop;
return ret;
}
/* FIXME: This doesn't make the slightest sense.
* (ks/hs): Fixed Multi Write
*/
if (!(ar->cmd == CFA_WRITE_MULTI_WO_ERASE ||
ar->cmd == WIN_MULTWRITE ||
ar->cmd == WIN_MULTWRITE_EXT)) {
if (!(cmd == CFA_WRITE_MULTI_WO_ERASE ||
cmd == WIN_MULTWRITE ||
cmd == WIN_MULTWRITE_EXT)) {
unsigned long flags;
char *buf = ide_map_rq(rq, &flags);
ata_set_handler(drive, ar->XXX_handler, WAIT_CMD, NULL);
/* For Write_sectors we need to stuff the first sector */
/* FIXME: what if !rq->current_nr_sectors --bzolnier */
ata_write(drive, buf, SECTOR_WORDS);
rq->current_nr_sectors--;
ide_unmap_rq(rq, buf, &flags);
return ide_started;
return ATA_OP_CONTINUES;
} else {
int i;
int ret;
/* Polling wait until the drive is ready.
*
......@@ -438,14 +603,16 @@ static ide_startstop_t __do_request(struct ata_device *drive,
break;
}
if (!drive_is_ready(drive)) {
printk(KERN_ERR "DISASTER WAITING TO HAPPEN!\n");
/* We are compleatly missing an error
* return path here.
* FIXME: We have only one? -alat
*/
printk(KERN_ERR "DISASTER WAITING TO HAPPEN! Try to Stop it!\n");
return ata_error(drive, rq, __FUNCTION__);
}
/* FIXME: make this unlocking go away*/
spin_unlock_irq(ch->lock);
ret = ar->XXX_handler(drive, rq);
spin_lock_irq(ch->lock);
return ret;
/* will set handler for us */
return ar->XXX_handler(drive, rq);
}
}
} else {
......@@ -456,221 +623,34 @@ static ide_startstop_t __do_request(struct ata_device *drive,
* FIXME: Handle the alternateives by a command type.
*/
/* FIXME: ATA_OP_CONTINUES? --bzolnier */
/* Not started a request - BUG() ot ATA_OP_FINISHED to avoid lockup ? - alat*/
if (!drive->using_dma)
return ide_started;
return ATA_OP_CONTINUES;
/* for dma commands we don't set the handler */
if (ar->cmd == WIN_WRITEDMA ||
ar->cmd == WIN_WRITEDMA_EXT ||
ar->cmd == WIN_READDMA ||
ar->cmd == WIN_READDMA_EXT)
if (cmd == WIN_WRITEDMA ||
cmd == WIN_WRITEDMA_EXT ||
cmd == WIN_READDMA ||
cmd == WIN_READDMA_EXT)
return udma_init(drive, rq);
#ifdef CONFIG_BLK_DEV_IDE_TCQ
else if (ar->cmd == WIN_WRITEDMA_QUEUED ||
ar->cmd == WIN_WRITEDMA_QUEUED_EXT ||
ar->cmd == WIN_READDMA_QUEUED ||
ar->cmd == WIN_READDMA_QUEUED_EXT)
else if (cmd == WIN_WRITEDMA_QUEUED ||
cmd == WIN_WRITEDMA_QUEUED_EXT ||
cmd == WIN_READDMA_QUEUED ||
cmd == WIN_READDMA_QUEUED_EXT)
return udma_tcq_init(drive, rq);
#endif
else {
printk(KERN_ERR "%s: unknown command %x\n", __FUNCTION__, ar->cmd);
return ide_stopped;
}
}
return ide_started;
}
/*
* Issue a READ or WRITE command to a disk, using LBA if supported, or CHS
* otherwise, to address sectors. It also takes care of issuing special
* DRIVE_CMDs.
*
* Channel lock should be held.
*/
static ide_startstop_t idedisk_do_request(struct ata_device *drive, struct request *rq, sector_t block)
{
struct ata_taskfile args;
unsigned int sectors;
/* This issues a special drive command.
*/
if (rq->flags & REQ_SPECIAL)
return __do_request(drive, rq->special, rq);
/* FIXME: this check doesn't make sense */
if (!(rq->flags & REQ_CMD)) {
blk_dump_rq_flags(rq, "idedisk_do_request - bad command");
__ata_end_request(drive, rq, 0, 0);
return ide_stopped;
}
if (IS_PDC4030_DRIVE) {
extern ide_startstop_t promise_do_request(struct ata_device *, struct request *, sector_t);
return promise_do_request(drive, rq, block);
}
/*
* start a tagged operation
*/
if (drive->using_tcq) {
int st = blk_queue_start_tag(&drive->queue, rq);
if (ata_pending_commands(drive) > drive->max_depth)
drive->max_depth = ata_pending_commands(drive);
if (ata_pending_commands(drive) > drive->max_last_depth)
drive->max_last_depth = ata_pending_commands(drive);
if (st) {
BUG_ON(!ata_pending_commands(drive));
return ide_started;
}
}
memset(&args, 0, sizeof(args));
sectors = rq->nr_sectors;
/* Dispatch depending up on the drive access method. */
if ((drive->id->cfs_enable_2 & 0x0400) && (drive->addressing)) {
/* LBA 48 bit */
/*
* 268435455 == 137439 MB or 28bit limit
* 320173056 == 163929 MB or 48bit addressing
* 1073741822 == 549756 MB or 48bit addressing fake drive
*/
if (sectors == 65536)
sectors = 0;
if (blk_rq_tagged(rq)) {
args.taskfile.feature = sectors;
args.hobfile.feature = sectors >> 8;
args.taskfile.sector_count = rq->tag << 3;
} else {
args.taskfile.sector_count = sectors;
args.hobfile.sector_count = sectors >> 8;
}
args.taskfile.sector_number = block; /* low lba */
args.taskfile.low_cylinder = (block >>= 8); /* mid lba */
args.taskfile.high_cylinder = (block >>= 8); /* hi lba */
args.taskfile.device_head = drive->select.all;
args.hobfile.sector_number = (block >>= 8); /* low lba */
args.hobfile.low_cylinder = (block >>= 8); /* mid lba */
args.hobfile.high_cylinder = (block >>= 8); /* hi lba */
} else if (drive->select.b.lba) {
/* LBA 28 bit */
if (sectors == 256)
sectors = 0;
if (blk_rq_tagged(rq)) {
args.taskfile.feature = sectors;
args.taskfile.sector_count = rq->tag << 3;
} else
args.taskfile.sector_count = sectors;
args.taskfile.sector_number = block;
args.taskfile.low_cylinder = (block >>= 8);
args.taskfile.high_cylinder = (block >>= 8);
args.taskfile.device_head = ((block >> 8) & 0x0f);
} else {
/* CHS */
unsigned int track = (block / drive->sect);
unsigned int sect = (block % drive->sect) + 1;
unsigned int head = (track % drive->head);
unsigned int cyl = (track / drive->head);
if (sectors == 256)
sectors = 0;
if (blk_rq_tagged(rq)) {
args.taskfile.feature = sectors;
args.taskfile.sector_count = rq->tag << 3;
} else
args.taskfile.sector_count = sectors;
args.taskfile.sector_number = sect;
args.taskfile.low_cylinder = cyl;
args.taskfile.high_cylinder = (cyl>>8);
args.taskfile.device_head = head;
}
args.taskfile.device_head |= drive->select.all;
/*
* Decode with physical ATA command to use and setup associated data.
*/
printk(KERN_ERR "%s: unknown command %x\n",
__FUNCTION__, cmd);
if (rq_data_dir(rq) == READ) {
args.command_type = IDE_DRIVE_TASK_IN;
if (drive->addressing) {
if (drive->using_tcq) {
args.cmd = WIN_READDMA_QUEUED_EXT;
} else if (drive->using_dma) {
args.cmd = WIN_READDMA_EXT;
} else if (drive->mult_count) {
args.XXX_handler = task_mulin_intr;
args.cmd = WIN_MULTREAD_EXT;
} else {
args.XXX_handler = task_in_intr;
args.cmd = WIN_READ_EXT;
}
} else {
if (drive->using_tcq) {
args.cmd = WIN_READDMA_QUEUED;
} else if (drive->using_dma) {
args.cmd = WIN_READDMA;
} else if (drive->mult_count) {
/* FIXME : Shouldn't this be task_mulin_intr?! */
args.XXX_handler = task_in_intr;
args.cmd = WIN_MULTREAD;
} else {
args.XXX_handler = task_in_intr;
args.cmd = WIN_READ;
}
}
} else {
args.command_type = IDE_DRIVE_TASK_RAW_WRITE;
if (drive->addressing) {
if (drive->using_tcq) {
args.cmd = WIN_WRITEDMA_QUEUED_EXT;
} else if (drive->using_dma) {
args.cmd = WIN_WRITEDMA_EXT;
} else if (drive->mult_count) {
args.XXX_handler = task_mulout_intr;
args.cmd = WIN_MULTWRITE_EXT;
} else {
args.XXX_handler = task_out_intr;
args.cmd = WIN_WRITE_EXT;
}
} else {
if (drive->using_tcq) {
args.cmd = WIN_WRITEDMA_QUEUED;
} else if (drive->using_dma) {
args.cmd = WIN_WRITEDMA;
} else if (drive->mult_count) {
args.XXX_handler = task_mulout_intr;
args.cmd = WIN_MULTWRITE;
} else {
args.XXX_handler = task_out_intr;
args.cmd = WIN_WRITE;
}
return ATA_OP_FINISHED;
}
}
#ifdef DEBUG
printk("%s: %sing: ", drive->name,
(rq_data_dir(rq)==READ) ? "read" : "writ");
if (lba) printk("LBAsect=%lld, ", block);
else printk("CHS=%d/%d/%d, ", cyl, head, sect);
printk("sectors=%ld, ", rq->nr_sectors);
printk("buffer=%p\n", rq->buffer);
#endif
rq->special = &args;
return __do_request(drive, &args, rq);
/* not reached */
return ATA_OP_CONTINUES;
}
static int idedisk_open(struct inode *inode, struct file *__fp, struct ata_device *drive)
......@@ -689,7 +669,7 @@ static int idedisk_open(struct inode *inode, struct file *__fp, struct ata_devic
memset(&args, 0, sizeof(args));
args.cmd = WIN_DOORLOCK;
if (ide_raw_taskfile(drive, &args))
if (ide_raw_taskfile(drive, &args, NULL))
drive->doorlocking = 0;
}
}
......@@ -708,7 +688,7 @@ static int flush_cache(struct ata_device *drive)
else
args.cmd = WIN_FLUSH_CACHE;
return ide_raw_taskfile(drive, &args);
return ide_raw_taskfile(drive, &args, NULL);
}
static void idedisk_release(struct inode *inode, struct file *filp, struct ata_device *drive)
......@@ -722,7 +702,7 @@ static void idedisk_release(struct inode *inode, struct file *filp, struct ata_d
memset(&args, 0, sizeof(args));
args.cmd = WIN_DOORUNLOCK;
if (ide_raw_taskfile(drive, &args))
if (ide_raw_taskfile(drive, &args, NULL))
drive->doorlocking = 0;
}
}
......@@ -769,7 +749,7 @@ static int set_multcount(struct ata_device *drive, int arg)
memset(&args, 0, sizeof(args));
args.taskfile.sector_count = arg;
args.cmd = WIN_SETMULT;
if (!ide_raw_taskfile(drive, &args)) {
if (!ide_raw_taskfile(drive, &args, NULL)) {
/* all went well track this setting as valid */
drive->mult_count = arg;
......@@ -798,7 +778,7 @@ static int write_cache(struct ata_device *drive, int arg)
memset(&args, 0, sizeof(args));
args.taskfile.feature = (arg) ? SETFEATURES_EN_WCACHE : SETFEATURES_DIS_WCACHE;
args.cmd = WIN_SETFEATURES;
ide_raw_taskfile(drive, &args);
ide_raw_taskfile(drive, &args, NULL);
drive->wcache = arg;
......@@ -811,7 +791,7 @@ static int idedisk_standby(struct ata_device *drive)
memset(&args, 0, sizeof(args));
args.cmd = WIN_STANDBYNOW1;
return ide_raw_taskfile(drive, &args);
return ide_raw_taskfile(drive, &args, NULL);
}
static int set_acoustic(struct ata_device *drive, int arg)
......@@ -822,7 +802,7 @@ static int set_acoustic(struct ata_device *drive, int arg)
args.taskfile.feature = (arg)?SETFEATURES_EN_AAM:SETFEATURES_DIS_AAM;
args.taskfile.sector_count = arg;
args.cmd = WIN_SETFEATURES;
ide_raw_taskfile(drive, &args);
ide_raw_taskfile(drive, &args, NULL);
drive->acoustic = arg;
......@@ -942,7 +922,7 @@ static unsigned long native_max_address(struct ata_device *drive)
memset(&args, 0, sizeof(args));
args.taskfile.device_head = 0x40;
args.cmd = WIN_READ_NATIVE_MAX;
ide_raw_taskfile(drive, &args);
ide_raw_taskfile(drive, &args, NULL);
/* if OK, compute maximum address value */
if (!(drive->status & ERR_STAT)) {
......@@ -964,10 +944,9 @@ static u64 native_max_address_ext(struct ata_device *drive)
/* Create IDE/ATA command request structure */
memset(&args, 0, sizeof(args));
args.taskfile.device_head = 0x40;
args.cmd = WIN_READ_NATIVE_MAX_EXT;
ide_raw_taskfile(drive, &args);
ide_raw_taskfile(drive, &args, NULL);
/* if OK, compute maximum address value */
if (!(drive->status & ERR_STAT)) {
......@@ -1005,7 +984,7 @@ static sector_t set_max_address(struct ata_device *drive, sector_t addr_req)
args.taskfile.device_head = ((addr_req >> 24) & 0x0f) | 0x40;
args.cmd = WIN_SET_MAX;
ide_raw_taskfile(drive, &args);
ide_raw_taskfile(drive, &args, NULL);
/* if OK, read new maximum address value */
if (!(drive->status & ERR_STAT)) {
......@@ -1038,7 +1017,7 @@ static u64 set_max_address_ext(struct ata_device *drive, u64 addr_req)
args.hobfile.high_cylinder = (addr_req >>= 8);
args.hobfile.device_head = 0x40;
ide_raw_taskfile(drive, &args);
ide_raw_taskfile(drive, &args, NULL);
/* if OK, compute maximum address value */
if (!(drive->status & ERR_STAT)) {
......@@ -1422,7 +1401,12 @@ static int idedisk_ioctl(struct ata_device *drive, struct inode *inode, struct f
#ifdef CONFIG_BLK_DEV_IDE_TCQ
case HDIO_GET_QDMA: {
u8 val = drive->using_tcq;
/* Foolup hdparm 0 means off 1 on -alat */
/* FIXME: hdparm have only -Q do we need something like:
* hdparm -q 1/0 - TCQ on/off
* hdparm -Q 1-MAX - TCQ queue_depth ?
*/
u8 val = ( drive->using_tcq ? drive->queue_depth : 0 );
if (put_user(val, (u8 *) arg))
return -EFAULT;
......
......@@ -314,270 +314,6 @@ typedef struct {
*/
#define IDEFLOPPY_ERROR_GENERAL 101
/*
* The ATAPI Status Register.
*/
typedef union {
unsigned all :8;
struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned check :1; /* Error occurred */
unsigned idx :1; /* Reserved */
unsigned corr :1; /* Correctable error occurred */
unsigned drq :1; /* Data is request by the device */
unsigned dsc :1; /* Media access command finished */
unsigned reserved5 :1; /* Reserved */
unsigned drdy :1; /* Ignored for ATAPI commands (ready to accept ATA command) */
unsigned bsy :1; /* The device has access to the command block */
#elif defined(__BIG_ENDIAN_BITFIELD)
unsigned bsy :1; /* The device has access to the command block */
unsigned drdy :1; /* Ignored for ATAPI commands (ready to accept ATA command) */
unsigned reserved5 :1; /* Reserved */
unsigned dsc :1; /* Media access command finished */
unsigned drq :1; /* Data is request by the device */
unsigned corr :1; /* Correctable error occurred */
unsigned idx :1; /* Reserved */
unsigned check :1; /* Error occurred */
#else
#error "Bitfield endianness not defined! Check your byteorder.h"
#endif
} b;
} idefloppy_status_reg_t;
/*
* The ATAPI error register.
*/
typedef union {
unsigned all :8;
struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned ili :1; /* Illegal Length Indication */
unsigned eom :1; /* End Of Media Detected */
unsigned abrt :1; /* Aborted command - As defined by ATA */
unsigned mcr :1; /* Media Change Requested - As defined by ATA */
unsigned sense_key :4; /* Sense key of the last failed packet command */
#elif defined(__BIG_ENDIAN_BITFIELD)
unsigned sense_key :4; /* Sense key of the last failed packet command */
unsigned mcr :1; /* Media Change Requested - As defined by ATA */
unsigned abrt :1; /* Aborted command - As defined by ATA */
unsigned eom :1; /* End Of Media Detected */
unsigned ili :1; /* Illegal Length Indication */
#else
#error "Bitfield endianness not defined! Check your byteorder.h"
#endif
} b;
} idefloppy_error_reg_t;
/*
* ATAPI Feature Register
*/
typedef union {
unsigned all :8;
struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned dma :1; /* Using DMA or PIO */
unsigned reserved321 :3; /* Reserved */
unsigned reserved654 :3; /* Reserved (Tag Type) */
unsigned reserved7 :1; /* Reserved */
#elif defined(__BIG_ENDIAN_BITFIELD)
unsigned reserved7 :1; /* Reserved */
unsigned reserved654 :3; /* Reserved (Tag Type) */
unsigned reserved321 :3; /* Reserved */
unsigned dma :1; /* Using DMA or PIO */
#else
#error "Bitfield endianness not defined! Check your byteorder.h"
#endif
} b;
} idefloppy_feature_reg_t;
/*
* ATAPI Byte Count Register.
*/
typedef union {
unsigned all :16;
struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned low :8; /* LSB */
unsigned high :8; /* MSB */
#elif defined(__BIG_ENDIAN_BITFIELD)
unsigned high :8; /* MSB */
unsigned low :8; /* LSB */
#else
#error "Bitfield endianness not defined! Check your byteorder.h"
#endif
} b;
} idefloppy_bcount_reg_t;
/*
* ATAPI Interrupt Reason Register.
*/
typedef union {
unsigned all :8;
struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned cod :1; /* Information transferred is command (1) or data (0) */
unsigned io :1; /* The device requests us to read (1) or write (0) */
unsigned reserved :6; /* Reserved */
#elif defined(__BIG_ENDIAN_BITFIELD)
unsigned reserved :6; /* Reserved */
unsigned io :1; /* The device requests us to read (1) or write (0) */
unsigned cod :1; /* Information transferred is command (1) or data (0) */
#else
#error "Bitfield endianness not defined! Check your byteorder.h"
#endif
} b;
} idefloppy_ireason_reg_t;
/*
* ATAPI floppy Drive Select Register
*/
typedef union {
unsigned all :8;
struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned sam_lun :3; /* Logical unit number */
unsigned reserved3 :1; /* Reserved */
unsigned drv :1; /* The responding drive will be drive 0 (0) or drive 1 (1) */
unsigned one5 :1; /* Should be set to 1 */
unsigned reserved6 :1; /* Reserved */
unsigned one7 :1; /* Should be set to 1 */
#elif defined(__BIG_ENDIAN_BITFIELD)
unsigned one7 :1; /* Should be set to 1 */
unsigned reserved6 :1; /* Reserved */
unsigned one5 :1; /* Should be set to 1 */
unsigned drv :1; /* The responding drive will be drive 0 (0) or drive 1 (1) */
unsigned reserved3 :1; /* Reserved */
unsigned sam_lun :3; /* Logical unit number */
#else
#error "Bitfield endianness not defined! Check your byteorder.h"
#endif
} b;
} idefloppy_drivesel_reg_t;
/*
* ATAPI Device Control Register
*/
typedef union {
unsigned all :8;
struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned zero0 :1; /* Should be set to zero */
unsigned nien :1; /* Device interrupt is disabled (1) or enabled (0) */
unsigned srst :1; /* ATA software reset. ATAPI devices should use the new ATAPI srst. */
unsigned one3 :1; /* Should be set to 1 */
unsigned reserved4567 :4; /* Reserved */
#elif defined(__BIG_ENDIAN_BITFIELD)
unsigned reserved4567 :4; /* Reserved */
unsigned one3 :1; /* Should be set to 1 */
unsigned srst :1; /* ATA software reset. ATAPI devices should use the new ATAPI srst. */
unsigned nien :1; /* Device interrupt is disabled (1) or enabled (0) */
unsigned zero0 :1; /* Should be set to zero */
#else
#error "Bitfield endianness not defined! Check your byteorder.h"
#endif
} b;
} idefloppy_control_reg_t;
/*
* The following is used to format the general configuration word of
* the ATAPI IDENTIFY DEVICE command.
*/
struct idefloppy_id_gcw {
#if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned packet_size :2; /* Packet Size */
unsigned reserved234 :3; /* Reserved */
unsigned drq_type :2; /* Command packet DRQ type */
unsigned removable :1; /* Removable media */
unsigned device_type :5; /* Device type */
unsigned reserved13 :1; /* Reserved */
unsigned protocol :2; /* Protocol type */
#elif defined(__BIG_ENDIAN_BITFIELD)
unsigned protocol :2; /* Protocol type */
unsigned reserved13 :1; /* Reserved */
unsigned device_type :5; /* Device type */
unsigned removable :1; /* Removable media */
unsigned drq_type :2; /* Command packet DRQ type */
unsigned reserved234 :3; /* Reserved */
unsigned packet_size :2; /* Packet Size */
#else
#error "Bitfield endianness not defined! Check your byteorder.h"
#endif
};
/*
* INQUIRY packet command - Data Format
*/
typedef struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned device_type :5; /* Peripheral Device Type */
unsigned reserved0_765 :3; /* Peripheral Qualifier - Reserved */
unsigned reserved1_6t0 :7; /* Reserved */
unsigned rmb :1; /* Removable Medium Bit */
unsigned ansi_version :3; /* ANSI Version */
unsigned ecma_version :3; /* ECMA Version */
unsigned iso_version :2; /* ISO Version */
unsigned response_format :4; /* Response Data Format */
unsigned reserved3_45 :2; /* Reserved */
unsigned reserved3_6 :1; /* TrmIOP - Reserved */
unsigned reserved3_7 :1; /* AENC - Reserved */
#elif defined(__BIG_ENDIAN_BITFIELD)
unsigned reserved0_765 :3; /* Peripheral Qualifier - Reserved */
unsigned device_type :5; /* Peripheral Device Type */
unsigned rmb :1; /* Removable Medium Bit */
unsigned reserved1_6t0 :7; /* Reserved */
unsigned iso_version :2; /* ISO Version */
unsigned ecma_version :3; /* ECMA Version */
unsigned ansi_version :3; /* ANSI Version */
unsigned reserved3_7 :1; /* AENC - Reserved */
unsigned reserved3_6 :1; /* TrmIOP - Reserved */
unsigned reserved3_45 :2; /* Reserved */
unsigned response_format :4; /* Response Data Format */
#else
#error "Bitfield endianness not defined! Check your byteorder.h"
#endif
u8 additional_length; /* Additional Length (total_length-4) */
u8 rsv5, rsv6, rsv7; /* Reserved */
u8 vendor_id[8]; /* Vendor Identification */
u8 product_id[16]; /* Product Identification */
u8 revision_level[4]; /* Revision Level */
u8 vendor_specific[20]; /* Vendor Specific - Optional */
u8 reserved56t95[40]; /* Reserved - Optional */
/* Additional information may be returned */
} idefloppy_inquiry_result_t;
/*
* REQUEST SENSE packet command result - Data Format.
*/
typedef struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned error_code :7; /* Current error (0x70) */
unsigned valid :1; /* The information field conforms to SFF-8070i */
u8 reserved1 :8; /* Reserved */
unsigned sense_key :4; /* Sense Key */
unsigned reserved2_4 :1; /* Reserved */
unsigned ili :1; /* Incorrect Length Indicator */
unsigned reserved2_67 :2;
#elif defined(__BIG_ENDIAN_BITFIELD)
unsigned valid :1; /* The information field conforms to SFF-8070i */
unsigned error_code :7; /* Current error (0x70) */
u8 reserved1 :8; /* Reserved */
unsigned reserved2_67 :2;
unsigned ili :1; /* Incorrect Length Indicator */
unsigned reserved2_4 :1; /* Reserved */
unsigned sense_key :4; /* Sense Key */
#else
#error "Bitfield endianness not defined! Check your byteorder.h"
#endif
u32 information __attribute__ ((packed));
u8 asl; /* Additional sense length (n-7) */
u32 command_specific; /* Additional command specific information */
u8 asc; /* Additional Sense Code */
u8 ascq; /* Additional Sense Code Qualifier */
u8 replaceable_unit_code; /* Field Replaceable Unit Code */
u8 sksv[3];
u8 pad[2]; /* Padding to 20 bytes */
} idefloppy_request_sense_result_t;
/*
* Pages of the SELECT SENSE / MODE SENSE packet commands.
*/
......@@ -602,9 +338,6 @@ typedef struct {
u8 reserved[4];
} idefloppy_mode_parameter_header_t;
#define IDEFLOPPY_MIN(a,b) ((a)<(b) ? (a):(b))
#define IDEFLOPPY_MAX(a,b) ((a)>(b) ? (a):(b))
/*
* idefloppy_end_request is used to finish servicing a request.
*
......@@ -613,6 +346,8 @@ typedef struct {
*/
static int idefloppy_end_request(struct ata_device *drive, struct request *rq, int uptodate)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
idefloppy_floppy_t *floppy = drive->driver_data;
int error;
......@@ -632,15 +367,19 @@ static int idefloppy_end_request(struct ata_device *drive, struct request *rq, i
return 0;
if (!(rq->flags & REQ_SPECIAL)) {
ata_end_request(drive, rq, uptodate);
__ata_end_request(drive, rq, uptodate, 0);
return 0;
}
spin_lock_irqsave(ch->lock, flags);
rq->errors = error;
blkdev_dequeue_request(rq);
drive->rq = NULL;
end_that_request_last(rq);
spin_unlock_irqrestore(ch->lock, flags);
return 0;
}
......@@ -663,7 +402,7 @@ static void idefloppy_input_buffers(struct ata_device *drive, struct request *rq
atapi_discard_data(drive, bcount);
return;
}
count = IDEFLOPPY_MIN(bio->bi_size - pc->b_count, bcount);
count = min_t(unsigned int, bio->bi_size - pc->b_count, bcount);
atapi_read(drive, bio_data(bio) + pc->b_count, count);
bcount -= count; pc->b_count += count;
}
......@@ -690,7 +429,7 @@ static void idefloppy_output_buffers(struct ata_device *drive, struct request *r
atapi_write_zeros (drive, bcount);
return;
}
count = IDEFLOPPY_MIN(pc->b_count, bcount);
count = min_t(unsigned int, pc->b_count, bcount);
atapi_write(drive, pc->b_data, count);
bcount -= count; pc->b_data += count; pc->b_count -= count;
}
......@@ -743,13 +482,13 @@ static struct request *idefloppy_next_rq_storage(struct ata_device *drive)
* idefloppy_analyze_error is called on each failed packet command retry
* to analyze the request sense.
*/
static void idefloppy_analyze_error(struct ata_device *drive, idefloppy_request_sense_result_t *result)
static void idefloppy_analyze_error(struct ata_device *drive, atapi_request_sense_result_t *result)
{
idefloppy_floppy_t *floppy = drive->driver_data;
floppy->sense_key = result->sense_key; floppy->asc = result->asc; floppy->ascq = result->ascq;
floppy->progress_indication= result->sksv[0] & 0x80 ?
(unsigned short)get_unaligned((u16 *)(result->sksv+1)):0x10000;
floppy->progress_indication= result->sksv ?
(unsigned short)get_unaligned((u16 *)(result->sk_specific)):0x10000;
#if IDEFLOPPY_DEBUG_LOG
if (floppy->failed_pc)
printk (KERN_INFO "ide-floppy: pc = %x, sense key = %x, asc = %x, ascq = %x\n",floppy->failed_pc->c[0],result->sense_key,result->asc,result->ascq);
......@@ -766,7 +505,7 @@ static void idefloppy_request_sense_callback(struct ata_device *drive, struct re
printk (KERN_INFO "ide-floppy: Reached idefloppy_request_sense_callback\n");
#endif
if (!floppy->pc->error) {
idefloppy_analyze_error(drive,(idefloppy_request_sense_result_t *) floppy->pc->buffer);
idefloppy_analyze_error(drive,(atapi_request_sense_result_t *) floppy->pc->buffer);
idefloppy_end_request(drive, rq, 1);
} else {
printk (KERN_ERR "Error in REQUEST SENSE itself - Aborting request!\n");
......@@ -806,7 +545,7 @@ static void idefloppy_retry_pc(struct ata_device *drive)
{
struct atapi_packet_command *pc;
struct request *rq;
idefloppy_error_reg_t error;
atapi_error_reg_t error;
error.all = IN_BYTE(IDE_ERROR_REG);
pc = idefloppy_next_pc_storage(drive);
......@@ -821,12 +560,10 @@ static void idefloppy_retry_pc(struct ata_device *drive)
*/
static ide_startstop_t idefloppy_pc_intr(struct ata_device *drive, struct request *rq)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
idefloppy_floppy_t *floppy = drive->driver_data;
idefloppy_status_reg_t status;
idefloppy_bcount_reg_t bcount;
idefloppy_ireason_reg_t ireason;
atapi_status_reg_t status;
atapi_bcount_reg_t bcount;
atapi_ireason_reg_t ireason;
struct atapi_packet_command *pc = floppy->pc;
unsigned int temp;
......@@ -866,46 +603,40 @@ static ide_startstop_t idefloppy_pc_intr(struct ata_device *drive, struct reques
rq->errors++;
if (pc->c[0] == IDEFLOPPY_REQUEST_SENSE_CMD) {
printk (KERN_ERR "ide-floppy: I/O error in request sense command\n");
return ide_stopped;
return ATA_OP_FINISHED;
}
idefloppy_retry_pc (drive); /* Retry operation */
return ide_stopped; /* queued, but not started */
return ATA_OP_FINISHED; /* queued, but not started */
}
pc->error = 0;
if (floppy->failed_pc == pc)
floppy->failed_pc=NULL;
pc->callback(drive, rq); /* Command finished - Call the callback function */
return ide_stopped;
return ATA_OP_FINISHED;
}
#ifdef CONFIG_BLK_DEV_IDEDMA
if (test_and_clear_bit(PC_DMA_IN_PROGRESS, &pc->flags)) {
printk (KERN_ERR "ide-floppy: The floppy wants to issue more interrupts in DMA mode\n");
udma_enable(drive, 0, 1);
return ide_stopped;
return ATA_OP_FINISHED;
}
#endif
/* FIXME: this locking should encompass the above register
* file access too.
*/
spin_lock_irqsave(ch->lock, flags);
bcount.b.high=IN_BYTE (IDE_BCOUNTH_REG); /* Get the number of bytes to transfer */
bcount.b.low=IN_BYTE (IDE_BCOUNTL_REG); /* on this interrupt */
ireason.all=IN_BYTE (IDE_IREASON_REG);
if (ireason.b.cod) {
spin_unlock_irqrestore(ch->lock, flags);
printk (KERN_ERR "ide-floppy: CoD != 0 in idefloppy_pc_intr\n");
return ide_stopped;
return ATA_OP_FINISHED;
}
if (ireason.b.io == test_bit(PC_WRITING, &pc->flags)) { /* Hopefully, we will never get here */
spin_unlock_irqrestore(ch->lock, flags);
printk (KERN_ERR "ide-floppy: We wanted to %s, ", ireason.b.io ? "Write":"Read");
printk (KERN_ERR "but the floppy wants us to %s !\n",ireason.b.io ? "Read":"Write");
return ide_stopped;
return ATA_OP_FINISHED;
}
if (!test_bit(PC_WRITING, &pc->flags)) { /* Reading - Check that we have enough space */
temp = pc->actually_transferred + bcount.all;
......@@ -915,9 +646,8 @@ static ide_startstop_t idefloppy_pc_intr(struct ata_device *drive, struct reques
atapi_discard_data (drive,bcount.all);
ata_set_handler(drive, idefloppy_pc_intr,IDEFLOPPY_WAIT_CMD, NULL);
spin_unlock_irqrestore(ch->lock, flags);
return ide_started;
return ATA_OP_CONTINUES;
}
#if IDEFLOPPY_DEBUG_LOG
printk (KERN_NOTICE "ide-floppy: The floppy wants to send us more data than expected - allowing transfer\n");
......@@ -939,9 +669,8 @@ static ide_startstop_t idefloppy_pc_intr(struct ata_device *drive, struct reques
pc->current_position+=bcount.all;
ata_set_handler(drive, idefloppy_pc_intr, IDEFLOPPY_WAIT_CMD, NULL); /* And set the interrupt handler again */
spin_unlock_irqrestore(ch->lock, flags);
return ide_started;
return ATA_OP_CONTINUES;
}
/*
......@@ -951,34 +680,27 @@ static ide_startstop_t idefloppy_pc_intr(struct ata_device *drive, struct reques
*/
static ide_startstop_t idefloppy_transfer_pc(struct ata_device *drive, struct request *rq)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
ide_startstop_t startstop;
idefloppy_floppy_t *floppy = drive->driver_data;
idefloppy_ireason_reg_t ireason;
atapi_ireason_reg_t ireason;
int ret;
/* FIXME: Move this lock upwards.
*/
spin_lock_irqsave(ch->lock, flags);
if (ata_status_poll(drive, DRQ_STAT, BUSY_STAT,
WAIT_READY, rq, &startstop)) {
ret = ata_status_poll(drive, DRQ_STAT, BUSY_STAT, WAIT_READY, rq);
if (ret != ATA_OP_READY) {
printk (KERN_ERR "ide-floppy: Strange, packet command initiated yet DRQ isn't asserted\n");
ret = startstop;
} else {
ireason.all=IN_BYTE (IDE_IREASON_REG);
return ret;
}
if (!ireason.b.cod || ireason.b.io) {
printk (KERN_ERR "ide-floppy: (IO,CoD) != (0,1) while issuing a packet command\n");
ret = ide_stopped;
} else {
ata_set_handler (drive, idefloppy_pc_intr, IDEFLOPPY_WAIT_CMD, NULL); /* Set the interrupt routine */
atapi_write(drive, floppy->pc->c, 12); /* Send the actual packet */
ret = ide_started;
}
ireason.all = IN_BYTE(IDE_IREASON_REG);
if (!ireason.b.cod || ireason.b.io) {
printk (KERN_ERR "ide-floppy: (IO,CoD) != (0,1) while issuing a packet command\n");
ret = ATA_OP_FINISHED;
} else {
ata_set_handler(drive, idefloppy_pc_intr, IDEFLOPPY_WAIT_CMD, NULL); /* Set the interrupt routine */
atapi_write(drive, floppy->pc->c, 12); /* Send the actual packet */
ret = ATA_OP_CONTINUES;
}
spin_unlock_irqrestore(ch->lock, flags);
return ret;
}
......@@ -996,39 +718,35 @@ static ide_startstop_t idefloppy_transfer_pc(struct ata_device *drive, struct re
* packet, we schedule the packet transfer to occur about 2-3 ticks
* later in transfer_pc2.
*/
static int idefloppy_transfer_pc2(struct ata_device *drive, struct request *__rq)
static ide_startstop_t idefloppy_transfer_pc2(struct ata_device *drive, struct request *__rq, unsigned long *wait)
{
idefloppy_floppy_t *floppy = drive->driver_data;
atapi_write(drive, floppy->pc->c, 12); /* Send the actual packet */
return IDEFLOPPY_WAIT_CMD; /* Timeout for the packet command */
*wait = IDEFLOPPY_WAIT_CMD; /* Timeout for the packet command */
return ATA_OP_CONTINUES;
}
static ide_startstop_t idefloppy_transfer_pc1(struct ata_device *drive, struct request *rq)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
idefloppy_floppy_t *floppy = drive->driver_data;
ide_startstop_t startstop;
idefloppy_ireason_reg_t ireason;
atapi_ireason_reg_t ireason;
int ret;
if (ata_status_poll(drive, DRQ_STAT, BUSY_STAT,
WAIT_READY, rq, &startstop)) {
ret = ata_status_poll(drive, DRQ_STAT, BUSY_STAT, WAIT_READY, rq);
if (ret != ATA_OP_READY) {
printk (KERN_ERR "ide-floppy: Strange, packet command initiated yet DRQ isn't asserted\n");
return startstop;
return ret;
}
/* FIXME: this locking should encompass the above register
* file access too.
*/
ireason.all = IN_BYTE(IDE_IREASON_REG);
spin_lock_irqsave(ch->lock, flags);
ireason.all=IN_BYTE(IDE_IREASON_REG);
if (!ireason.b.cod || ireason.b.io) {
printk (KERN_ERR "ide-floppy: (IO,CoD) != (0,1) while issuing a packet command\n");
ret = ide_stopped;
ret = ATA_OP_FINISHED;
} else {
/*
......@@ -1043,9 +761,8 @@ static ide_startstop_t idefloppy_transfer_pc1(struct ata_device *drive, struct r
idefloppy_pc_intr, /* service routine for packet command */
floppy->ticks, /* wait this long before "failing" */
idefloppy_transfer_pc2); /* fail == transfer_pc2 */
ret = ide_started;
ret = ATA_OP_CONTINUES;
}
spin_unlock_irqrestore(ch->lock, flags);
return ret;
}
......@@ -1057,7 +774,7 @@ static ide_startstop_t idefloppy_issue_pc(struct ata_device *drive, struct reque
struct atapi_packet_command *pc)
{
idefloppy_floppy_t *floppy = drive->driver_data;
idefloppy_bcount_reg_t bcount;
atapi_bcount_reg_t bcount;
int dma_ok = 0;
ata_handler_t *pkt_xfer_routine;
......@@ -1086,7 +803,7 @@ static ide_startstop_t idefloppy_issue_pc(struct ata_device *drive, struct reque
}
floppy->failed_pc = NULL;
pc->callback(drive, rq);
return ide_stopped;
return ATA_OP_FINISHED;
}
#if IDEFLOPPY_DEBUG_LOG
printk (KERN_INFO "Retry number - %d\n",pc->retries);
......@@ -1126,19 +843,10 @@ static ide_startstop_t idefloppy_issue_pc(struct ata_device *drive, struct reque
}
if (test_bit(IDEFLOPPY_DRQ_INTERRUPT, &floppy->flags)) {
unsigned long flags;
struct ata_channel *ch = drive->channel;
/* FIXME: this locking should encompass the above register
* file access too.
*/
spin_lock_irqsave(ch->lock, flags);
ata_set_handler(drive, pkt_xfer_routine, IDEFLOPPY_WAIT_CMD, NULL);
OUT_BYTE (WIN_PACKETCMD, IDE_COMMAND_REG); /* Issue the packet command */
spin_unlock_irqrestore(ch->lock, flags);
return ide_started;
return ATA_OP_CONTINUES;
} else {
OUT_BYTE (WIN_PACKETCMD, IDE_COMMAND_REG);
return pkt_xfer_routine(drive, rq);
......@@ -1277,10 +985,8 @@ static void idefloppy_create_rw_cmd(idefloppy_floppy_t *floppy,
*/
static ide_startstop_t idefloppy_do_request(struct ata_device *drive, struct request *rq, sector_t block)
{
struct ata_channel *ch = drive->channel;
idefloppy_floppy_t *floppy = drive->driver_data;
struct atapi_packet_command *pc;
int ret;
#if IDEFLOPPY_DEBUG_LOG
printk (KERN_INFO "rq_status: %d, rq_dev: %u, flags: %lx, errors: %d\n",rq->rq_status,(unsigned int) rq->rq_dev,rq->flags,rq->errors);
......@@ -1294,22 +1000,17 @@ static ide_startstop_t idefloppy_do_request(struct ata_device *drive, struct req
else
printk (KERN_ERR "ide-floppy: %s: I/O error\n", drive->name);
/* FIXME: make this unlocking go away*/
spin_unlock_irq(ch->lock);
idefloppy_end_request(drive, rq, 0);
spin_lock_irq(ch->lock);
return ide_stopped;
return ATA_OP_FINISHED;
}
if (rq->flags & REQ_CMD) {
if (rq->sector % floppy->bs_factor || rq->nr_sectors % floppy->bs_factor) {
printk ("%s: unsupported r/w request size\n", drive->name);
/* FIXME: make this unlocking go away*/
spin_unlock_irq(ch->lock);
idefloppy_end_request(drive, rq, 0);
spin_lock_irq(ch->lock);
return ide_stopped;
return ATA_OP_FINISHED;
}
pc = idefloppy_next_pc_storage(drive);
idefloppy_create_rw_cmd (floppy, pc, rq, block);
......@@ -1318,20 +1019,13 @@ static ide_startstop_t idefloppy_do_request(struct ata_device *drive, struct req
pc = (struct atapi_packet_command *) rq->buffer;
} else {
blk_dump_rq_flags(rq, "ide-floppy: unsupported command in queue");
/* FIXME: make this unlocking go away*/
spin_unlock_irq(ch->lock);
idefloppy_end_request(drive, rq, 0);
spin_lock_irq(ch->lock);
return ide_stopped;
return ATA_OP_FINISHED;
}
/* FIXME: make this unlocking go away*/
spin_unlock_irq(ch->lock);
ret = idefloppy_issue_pc(drive, rq, pc);
spin_lock_irq(ch->lock);
return ret;
return idefloppy_issue_pc(drive, rq, pc);
}
/*
......@@ -1655,7 +1349,7 @@ static int idefloppy_get_format_progress(struct ata_device *drive,
}
else
{
idefloppy_status_reg_t status;
atapi_status_reg_t status;
unsigned long flags;
__save_flags(flags);
......@@ -1861,7 +1555,7 @@ static unsigned long idefloppy_capacity(struct ata_device *drive)
*/
static int idefloppy_identify_device(struct ata_device *drive,struct hd_driveid *id)
{
struct idefloppy_id_gcw gcw;
struct atapi_id_gcw gcw;
#if IDEFLOPPY_DEBUG_INFO
unsigned short mask,i;
char buffer[80];
......@@ -1976,7 +1670,7 @@ static int idefloppy_identify_device(struct ata_device *drive,struct hd_driveid
*/
static void idefloppy_setup(struct ata_device *drive, idefloppy_floppy_t *floppy)
{
struct idefloppy_id_gcw gcw;
struct atapi_id_gcw gcw;
int i;
*((unsigned short *) &gcw) = drive->id->config;
......
......@@ -257,24 +257,13 @@ static int __init setup_host_channel(struct pci_dev *dev,
if (d->flags & ATA_F_NODMA)
goto no_dma;
/* Check whatever this interface is UDMA4 mode capable. */
if (ch->udma_four) {
if (ch->udma_four)
printk("%s: warning: ATA-66/100 forced bit set!\n", dev->name);
} else {
if (d->ata66_check)
ch->udma_four = d->ata66_check(ch);
}
#ifdef CONFIG_BLK_DEV_IDEDMA
/*
* Setup DMA transfers on the channel.
*/
if (d->flags & ATA_F_NOADMA)
autodma = 0;
if (autodma)
ch->autodma = 1;
if (!((d->flags & ATA_F_DMA) || ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 0x80))))
goto no_dma;
/*
......@@ -324,6 +313,10 @@ static int __init setup_host_channel(struct pci_dev *dev,
* already enabled by the primary channel run.
*/
pci_set_master(dev);
if (autodma)
ch->autodma = 1;
if (d->init_dma)
d->init_dma(ch, dma_base);
else
......@@ -335,6 +328,11 @@ static int __init setup_host_channel(struct pci_dev *dev,
if (d->init_channel)
d->init_channel(ch);
#ifdef CONFIG_BLK_DEV_IDEDMA
if ((d->flags & ATA_F_NOADMA) || noautodma)
ch->autodma = 0;
#endif
return 0;
}
......
......@@ -256,11 +256,11 @@ struct {
static void pmac_ide_setup_dma(struct device_node *np, int ix);
static void pmac_udma_enable(struct ata_device *drive, int on, int verbose);
static int pmac_udma_start(struct ata_device *drive, struct request *rq);
static void pmac_udma_start(struct ata_device *drive, struct request *rq);
static int pmac_udma_stop(struct ata_device *drive);
static int pmac_udma_init(struct ata_device *drive, struct request *rq);
static int pmac_udma_irq_status(struct ata_device *drive);
static int pmac_udma_setup(struct ata_device *drive);
static int pmac_udma_setup(struct ata_device *drive, int map);
static int pmac_ide_build_dmatable(struct ata_device *drive, struct request *rq, int ix, int wr);
static int pmac_ide_tune_chipset(struct ata_device *drive, byte speed);
static void pmac_ide_tuneproc(struct ata_device *drive, byte pio);
......@@ -1340,7 +1340,7 @@ static void pmac_udma_enable(struct ata_device *drive, int on, int verbose)
ide_toggle_bounce(drive, 0);
}
static int pmac_udma_start(struct ata_device *drive, struct request *rq)
static void pmac_udma_start(struct ata_device *drive, struct request *rq)
{
int ix, ata4;
volatile struct dbdma_regs *dma;
......@@ -1350,7 +1350,7 @@ static int pmac_udma_start(struct ata_device *drive, struct request *rq)
*/
ix = pmac_ide_find(drive);
if (ix < 0)
return ide_stopped;
return;
dma = pmac_ide[ix].dma_regs;
ata4 = (pmac_ide[ix].kind == controller_kl_ata4 ||
......@@ -1360,7 +1360,7 @@ static int pmac_udma_start(struct ata_device *drive, struct request *rq)
/* Make sure it gets to the controller right now */
(void)in_le32(&dma->control);
return ide_started;
return;
}
static int pmac_udma_stop(struct ata_device *drive)
......@@ -1397,7 +1397,7 @@ static int pmac_udma_init(struct ata_device *drive, struct request *rq)
*/
ix = pmac_ide_find(drive);
if (ix < 0)
return ide_stopped;
return ATA_OP_FINISHED;
if (rq_data_dir(rq) == READ)
reading = 1;
......@@ -1409,7 +1409,7 @@ static int pmac_udma_init(struct ata_device *drive, struct request *rq)
pmac_ide[ix].kind == controller_kl_ata4_80);
if (!pmac_ide_build_dmatable(drive, rq, ix, !reading))
return ide_stopped;
return ATA_OP_FINISHED;
/* Apple adds 60ns to wrDataSetup on reads */
if (ata4 && (pmac_ide[ix].timings[unit] & TR_66_UDMA_EN)) {
out_le32((unsigned *)(IDE_DATA_REG + IDE_TIMING_CONFIG + _IO_BASE),
......@@ -1419,7 +1419,7 @@ static int pmac_udma_init(struct ata_device *drive, struct request *rq)
}
if (drive->type != ATA_DISK)
return ide_started;
return ATA_OP_CONTINUES;
ata_set_handler(drive, ide_dma_intr, WAIT_CMD, NULL);
if ((rq->flags & REQ_SPECIAL) &&
......@@ -1435,7 +1435,7 @@ static int pmac_udma_init(struct ata_device *drive, struct request *rq)
udma_start(drive, rq);
return ide_started;
return ATA_OP_CONTINUES;
}
/*
......@@ -1491,14 +1491,14 @@ static int pmac_udma_irq_status(struct ata_device *drive)
set_bit(IDE_DMA, drive->channel->active);
// if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
// printk(KERN_WARNING "ide%d, timeout waiting \
for dbdma command stop\n", ix);
return 1;
}
// for dbdma command stop\n", ix);
// return 1;
// }
udelay(1);
return 0;
}
static int pmac_udma_setup(struct ata_device *drive)
static int pmac_udma_setup(struct ata_device *drive, int map)
{
/* Change this to better match ide-dma.c */
pmac_ide_check_dma(drive);
......
......@@ -9,6 +9,10 @@
* (See linux/COPYING).
*/
/*
* BIG FAT FIXME: clean tape->spinlock locking --bzolnier
*/
/*
* IDE ATAPI streaming tape driver.
*
......@@ -770,32 +774,6 @@ typedef struct idetape_stage_s {
os_aux_t *aux; /* OnStream aux ptr */
} idetape_stage_t;
/*
* REQUEST SENSE packet command result - Data Format.
*/
typedef struct {
unsigned error_code :7; /* Current of deferred errors */
unsigned valid :1; /* The information field conforms to QIC-157C */
__u8 reserved1 :8; /* Segment Number - Reserved */
unsigned sense_key :4; /* Sense Key */
unsigned reserved2_4 :1; /* Reserved */
unsigned ili :1; /* Incorrect Length Indicator */
unsigned eom :1; /* End Of Medium */
unsigned filemark :1; /* Filemark */
__u32 information __attribute__ ((packed));
__u8 asl; /* Additional sense length (n-7) */
__u32 command_specific; /* Additional command specific information */
__u8 asc; /* Additional Sense Code */
__u8 ascq; /* Additional Sense Code Qualifier */
__u8 replaceable_unit_code; /* Field Replaceable Unit Code */
unsigned sk_specific1 :7; /* Sense Key Specific */
unsigned sksv :1; /* Sense Key Specific information is valid */
__u8 sk_specific2; /* Sense Key Specific */
__u8 sk_specific3; /* Sense Key Specific */
__u8 pad[2]; /* Padding to 20 bytes */
} idetape_request_sense_result_t;
/*
* Most of our global data which we need to save even as we leave the
* driver due to an interrupt or a timer event is stored in a variable
......@@ -920,7 +898,7 @@ typedef struct {
int avg_size;
int avg_speed;
idetape_request_sense_result_t sense; /* last sense information */
atapi_request_sense_result_t sense; /* last sense information */
char vendor_id[10];
char product_id[18];
......@@ -1123,101 +1101,6 @@ typedef struct {
#define IDETAPE_ERROR_FILEMARK 102
#define IDETAPE_ERROR_EOD 103
/*
* The ATAPI Status Register.
*/
typedef union {
unsigned all :8;
struct {
unsigned check :1; /* Error occurred */
unsigned idx :1; /* Reserved */
unsigned corr :1; /* Correctable error occurred */
unsigned drq :1; /* Data is request by the device */
unsigned dsc :1; /* Buffer availability / Media access command finished */
unsigned reserved5 :1; /* Reserved */
unsigned drdy :1; /* Ignored for ATAPI commands (ready to accept ATA command) */
unsigned bsy :1; /* The device has access to the command block */
} b;
} idetape_status_reg_t;
/*
* The ATAPI error register.
*/
typedef union {
unsigned all :8;
struct {
unsigned ili :1; /* Illegal Length Indication */
unsigned eom :1; /* End Of Media Detected */
unsigned abrt :1; /* Aborted command - As defined by ATA */
unsigned mcr :1; /* Media Change Requested - As defined by ATA */
unsigned sense_key :4; /* Sense key of the last failed packet command */
} b;
} idetape_error_reg_t;
/*
* ATAPI Feature Register
*/
typedef union {
unsigned all :8;
struct {
unsigned dma :1; /* Using DMA or PIO */
unsigned reserved321 :3; /* Reserved */
unsigned reserved654 :3; /* Reserved (Tag Type) */
unsigned reserved7 :1; /* Reserved */
} b;
} idetape_feature_reg_t;
/*
* ATAPI Byte Count Register.
*/
typedef union {
unsigned all :16;
struct {
unsigned low :8; /* LSB */
unsigned high :8; /* MSB */
} b;
} idetape_bcount_reg_t;
/*
* ATAPI Interrupt Reason Register.
*/
typedef union {
unsigned all :8;
struct {
unsigned cod :1; /* Information transferred is command (1) or data (0) */
unsigned io :1; /* The device requests us to read (1) or write (0) */
unsigned reserved :6; /* Reserved */
} b;
} idetape_ireason_reg_t;
/*
* ATAPI Drive Select Register
*/
typedef union {
unsigned all :8;
struct {
unsigned sam_lun :4; /* Should be zero with ATAPI (not used) */
unsigned drv :1; /* The responding drive will be drive 0 (0) or drive 1 (1) */
unsigned one5 :1; /* Should be set to 1 */
unsigned reserved6 :1; /* Reserved */
unsigned one7 :1; /* Should be set to 1 */
} b;
} idetape_drivesel_reg_t;
/*
* ATAPI Device Control Register
*/
typedef union {
unsigned all :8;
struct {
unsigned zero0 :1; /* Should be set to zero */
unsigned nien :1; /* Device interrupt is disabled (1) or enabled (0) */
unsigned srst :1; /* ATA software reset. ATAPI devices should use the new ATAPI srst. */
unsigned one3 :1; /* Should be set to 1 */
unsigned reserved4567 :4; /* Reserved */
} b;
} idetape_control_reg_t;
/*
* idetape_chrdev_t provides the link between out character device
* interface and our block device interface and the corresponding
......@@ -1227,45 +1110,6 @@ typedef struct {
struct ata_device *drive;
} idetape_chrdev_t;
/*
* The following is used to format the general configuration word of
* the ATAPI IDENTIFY DEVICE command.
*/
struct idetape_id_gcw {
unsigned packet_size :2; /* Packet Size */
unsigned reserved234 :3; /* Reserved */
unsigned drq_type :2; /* Command packet DRQ type */
unsigned removable :1; /* Removable media */
unsigned device_type :5; /* Device type */
unsigned reserved13 :1; /* Reserved */
unsigned protocol :2; /* Protocol type */
};
/*
* INQUIRY packet command - Data Format (From Table 6-8 of QIC-157C)
*/
typedef struct {
unsigned device_type :5; /* Peripheral Device Type */
unsigned reserved0_765 :3; /* Peripheral Qualifier - Reserved */
unsigned reserved1_6t0 :7; /* Reserved */
unsigned rmb :1; /* Removable Medium Bit */
unsigned ansi_version :3; /* ANSI Version */
unsigned ecma_version :3; /* ECMA Version */
unsigned iso_version :2; /* ISO Version */
unsigned response_format :4; /* Response Data Format */
unsigned reserved3_45 :2; /* Reserved */
unsigned reserved3_6 :1; /* TrmIOP - Reserved */
unsigned reserved3_7 :1; /* AENC - Reserved */
__u8 additional_length; /* Additional Length (total_length-4) */
__u8 rsv5, rsv6, rsv7; /* Reserved */
__u8 vendor_id[8]; /* Vendor Identification */
__u8 product_id[16]; /* Product Identification */
__u8 revision_level[4]; /* Revision Level */
__u8 vendor_specific[20]; /* Vendor Specific - Optional */
__u8 reserved56t95[40]; /* Reserved - Optional */
/* Additional information may be returned */
} idetape_inquiry_result_t;
/*
* READ POSITION packet command - Data Format (From Table 6-57)
*/
......@@ -1574,7 +1418,7 @@ static struct request *idetape_next_rq_storage(struct ata_device *drive)
* to analyze the request sense. We currently do not utilize this
* information.
*/
static void idetape_analyze_error(struct ata_device *drive, idetape_request_sense_result_t *result)
static void idetape_analyze_error(struct ata_device *drive, atapi_request_sense_result_t *result)
{
idetape_tape_t *tape = drive->driver_data;
struct atapi_packet_command *pc = tape->failed_pc;
......@@ -1778,7 +1622,7 @@ static void idetape_remove_stage_head(struct ata_device *drive)
static int idetape_end_request(struct ata_device *drive, struct request *rq, int uptodate)
{
idetape_tape_t *tape = drive->driver_data;
unsigned long flags;
unsigned long flags, flags2;
int error;
int remove_stage = 0;
#if ONSTREAM_DEBUG
......@@ -1865,10 +1709,15 @@ static int idetape_end_request(struct ata_device *drive, struct request *rq, int
}
}
/* FIXME: replace tape->spinlock with channel->spinlock --bzolnier */
spin_lock_irqsave(drive->channel->lock, flags2);
blkdev_dequeue_request(rq);
drive->rq = NULL;
end_that_request_last(rq);
spin_unlock_irqrestore(drive->channel->lock, flags2);
if (remove_stage)
idetape_remove_stage_head(drive);
if (tape->active_data_request == NULL)
......@@ -1887,7 +1736,7 @@ static void idetape_request_sense_callback(struct ata_device *drive, struct requ
printk (KERN_INFO "ide-tape: Reached idetape_request_sense_callback\n");
#endif
if (!tape->pc->error) {
idetape_analyze_error (drive, (idetape_request_sense_result_t *) tape->pc->buffer);
idetape_analyze_error (drive, (atapi_request_sense_result_t *) tape->pc->buffer);
idetape_end_request(drive, rq, 1);
} else {
printk (KERN_ERR "ide-tape: Error in REQUEST SENSE itself - Aborting request!\n");
......@@ -1941,7 +1790,7 @@ static void idetape_retry_pc(struct ata_device *drive)
idetape_tape_t *tape = drive->driver_data;
struct atapi_packet_command *pc;
struct request *rq;
idetape_error_reg_t error;
atapi_error_reg_t error;
error.all = IN_BYTE (IDE_ERROR_REG);
pc = idetape_next_pc_storage (drive);
......@@ -1978,12 +1827,10 @@ static void idetape_postpone_request(struct ata_device *drive, struct request *r
*/
static ide_startstop_t idetape_pc_intr(struct ata_device *drive, struct request *rq)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
idetape_tape_t *tape = drive->driver_data;
idetape_status_reg_t status;
idetape_bcount_reg_t bcount;
idetape_ireason_reg_t ireason;
atapi_status_reg_t status;
atapi_bcount_reg_t bcount;
atapi_ireason_reg_t ireason;
struct atapi_packet_command *pc = tape->pc;
unsigned int temp;
......@@ -2052,14 +1899,14 @@ static ide_startstop_t idetape_pc_intr(struct ata_device *drive, struct request
#endif
if (pc->c[0] == IDETAPE_REQUEST_SENSE_CMD) {
printk (KERN_ERR "ide-tape: I/O error in request sense command\n");
return ide_stopped;
return ATA_OP_FINISHED;
}
#if IDETAPE_DEBUG_LOG
if (tape->debug_level >= 1)
printk(KERN_INFO "ide-tape: [cmd %x]: check condition\n", pc->c[0]);
#endif
idetape_retry_pc(drive); /* Retry operation */
return ide_stopped;
return ATA_OP_FINISHED;
}
pc->error = 0;
if (!tape->onstream && test_bit (PC_WAIT_FOR_DSC, &pc->flags) && !status.b.dsc) { /* Media access command */
......@@ -2068,26 +1915,20 @@ static ide_startstop_t idetape_pc_intr(struct ata_device *drive, struct request
tape->dsc_timeout = jiffies + IDETAPE_DSC_MA_TIMEOUT;
idetape_postpone_request(drive, rq); /* Allow ide.c to handle other requests */
return ide_stopped;
return ATA_OP_FINISHED;
}
if (tape->failed_pc == pc)
tape->failed_pc = NULL;
pc->callback(drive, rq); /* Command finished - Call the callback function */
return ide_stopped;
return ATA_OP_FINISHED;
}
/* FIXME: this locking should encompass the above register
* file access too.
*/
spin_lock_irqsave(ch->lock, flags);
#ifdef CONFIG_BLK_DEV_IDEDMA
if (test_and_clear_bit (PC_DMA_IN_PROGRESS, &pc->flags)) {
printk (KERN_ERR "ide-tape: The tape wants to issue more interrupts in DMA mode\n");
printk (KERN_ERR "ide-tape: DMA disabled, reverting to PIO\n");
udma_enable(drive, 0, 1);
spin_unlock_irqrestore(ch->lock, flags);
return ide_stopped;
return ATA_OP_FINISHED;
}
#endif
......@@ -2096,17 +1937,15 @@ static ide_startstop_t idetape_pc_intr(struct ata_device *drive, struct request
ireason.all = IN_BYTE (IDE_IREASON_REG);
if (ireason.b.cod) {
spin_unlock_irqrestore(ch->lock, flags);
printk (KERN_ERR "ide-tape: CoD != 0 in idetape_pc_intr\n");
return ide_stopped;
return ATA_OP_FINISHED;
}
if (ireason.b.io == test_bit (PC_WRITING, &pc->flags)) { /* Hopefully, we will never get here */
spin_unlock_irqrestore(ch->lock, flags);
printk (KERN_ERR "ide-tape: We wanted to %s, ", ireason.b.io ? "Write":"Read");
printk (KERN_ERR "ide-tape: but the tape wants us to %s !\n",ireason.b.io ? "Read":"Write");
return ide_stopped;
return ATA_OP_FINISHED;
}
if (!test_bit (PC_WRITING, &pc->flags)) { /* Reading - Check that we have enough space */
temp = pc->actually_transferred + bcount.all;
......@@ -2115,9 +1954,8 @@ static ide_startstop_t idetape_pc_intr(struct ata_device *drive, struct request
printk (KERN_ERR "ide-tape: The tape wants to send us more data than expected - discarding data\n");
atapi_discard_data (drive, bcount.all);
ata_set_handler(drive, idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
spin_unlock_irqrestore(ch->lock, flags);
return ide_started;
return ATA_OP_CONTINUES;
}
#if IDETAPE_DEBUG_LOG
if (tape->debug_level >= 2)
......@@ -2143,9 +1981,8 @@ static ide_startstop_t idetape_pc_intr(struct ata_device *drive, struct request
printk(KERN_INFO "ide-tape: [cmd %x] transferred %d bytes on that interrupt\n", pc->c[0], bcount.all);
#endif
ata_set_handler(drive, idetape_pc_intr, IDETAPE_WAIT_CMD, NULL); /* And set the interrupt handler again */
spin_unlock_irqrestore(ch->lock, flags);
return ide_started;
return ATA_OP_CONTINUES;
}
/*
......@@ -2192,46 +2029,41 @@ static ide_startstop_t idetape_pc_intr(struct ata_device *drive, struct request
*/
static ide_startstop_t idetape_transfer_pc(struct ata_device *drive, struct request *rq)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
idetape_tape_t *tape = drive->driver_data;
struct atapi_packet_command *pc = tape->pc;
idetape_ireason_reg_t ireason;
atapi_ireason_reg_t ireason;
int retries = 100;
ide_startstop_t startstop;
int ret;
/* FIXME: Move this lock upwards.
*/
spin_lock_irqsave(ch->lock, flags);
if (ata_status_poll(drive, DRQ_STAT, BUSY_STAT,
WAIT_READY, rq, &startstop)) {
ret = ata_status_poll(drive, DRQ_STAT, BUSY_STAT, WAIT_READY, rq);
if (ret != ATA_OP_READY) {
printk (KERN_ERR "ide-tape: Strange, packet command initiated yet DRQ isn't asserted\n");
ret = startstop;
} else {
ireason.all = IN_BYTE (IDE_IREASON_REG);
while (retries-- && (!ireason.b.cod || ireason.b.io)) {
printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing a packet command, retrying\n");
udelay(100);
ireason.all = IN_BYTE(IDE_IREASON_REG);
if (retries == 0) {
printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing a packet command, ignoring\n");
ireason.b.cod = 1;
ireason.b.io = 0;
}
}
if (!ireason.b.cod || ireason.b.io) {
printk (KERN_ERR "ide-tape: (IO,CoD) != (0,1) while issuing a packet command\n");
ret = ide_stopped;
} else {
tape->cmd_start_time = jiffies;
ata_set_handler(drive, idetape_pc_intr, IDETAPE_WAIT_CMD, NULL); /* Set the interrupt routine */
atapi_write(drive,pc->c,12); /* Send the actual packet */
ret = ide_started;
return ret;
}
ireason.all = IN_BYTE (IDE_IREASON_REG);
while (retries-- && (!ireason.b.cod || ireason.b.io)) {
printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing a packet command, retrying\n");
udelay(100);
ireason.all = IN_BYTE(IDE_IREASON_REG);
if (retries == 0) {
printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing a packet command, ignoring\n");
ireason.b.cod = 1;
ireason.b.io = 0;
}
}
spin_unlock_irqrestore(ch->lock, flags);
if (!ireason.b.cod || ireason.b.io) {
printk (KERN_ERR "ide-tape: (IO,CoD) != (0,1) while issuing a packet command\n");
ret = ATA_OP_FINISHED;
} else {
tape->cmd_start_time = jiffies;
ata_set_handler(drive, idetape_pc_intr, IDETAPE_WAIT_CMD, NULL); /* Set the interrupt routine */
atapi_write(drive,pc->c,12); /* Send the actual packet */
ret = ATA_OP_CONTINUES;
}
return ret;
}
......@@ -2240,7 +2072,7 @@ static ide_startstop_t idetape_issue_packet_command(struct ata_device *drive,
struct request *rq, struct atapi_packet_command *pc)
{
idetape_tape_t *tape = drive->driver_data;
idetape_bcount_reg_t bcount;
atapi_bcount_reg_t bcount;
int dma_ok = 0;
#if IDETAPE_DEBUG_BUGS
......@@ -2272,7 +2104,7 @@ static ide_startstop_t idetape_issue_packet_command(struct ata_device *drive,
}
tape->failed_pc = NULL;
pc->callback(drive, rq);
return ide_stopped;
return ATA_OP_FINISHED;
}
#if IDETAPE_DEBUG_LOG
if (tape->debug_level >= 2)
......@@ -2305,19 +2137,10 @@ static ide_startstop_t idetape_issue_packet_command(struct ata_device *drive,
}
#endif
if (test_bit(IDETAPE_DRQ_INTERRUPT, &tape->flags)) {
unsigned long flags;
struct ata_channel *ch = drive->channel;
/* FIXME: this locking should encompass the above register
* file access too.
*/
spin_lock_irqsave(ch->lock, flags);
ata_set_handler(drive, idetape_transfer_pc, IDETAPE_WAIT_CMD, NULL);
OUT_BYTE(WIN_PACKETCMD, IDE_COMMAND_REG);
spin_unlock_irqrestore(ch->lock, flags);
return ide_started;
return ATA_OP_CONTINUES;
} else {
OUT_BYTE(WIN_PACKETCMD, IDE_COMMAND_REG);
return idetape_transfer_pc(drive, rq);
......@@ -2451,7 +2274,7 @@ static void idetape_media_access_finished(struct ata_device *drive, struct reque
{
idetape_tape_t *tape = drive->driver_data;
struct atapi_packet_command *pc = tape->pc;
idetape_status_reg_t status;
atapi_status_reg_t status;
if (tape->onstream)
printk(KERN_INFO "ide-tape: bug: onstream, media_access_finished\n");
......@@ -2605,12 +2428,10 @@ static void idetape_create_write_cmd(idetape_tape_t *tape,
*/
static ide_startstop_t idetape_do_request(struct ata_device *drive, struct request *rq, sector_t block)
{
struct ata_channel *ch = drive->channel;
idetape_tape_t *tape = drive->driver_data;
struct atapi_packet_command *pc;
struct request *postponed_rq = tape->postponed_rq;
idetape_status_reg_t status;
int ret;
atapi_status_reg_t status;
#if IDETAPE_DEBUG_LOG
/* if (tape->debug_level >= 5)
......@@ -2625,32 +2446,24 @@ static ide_startstop_t idetape_do_request(struct ata_device *drive, struct reque
*/
printk (KERN_NOTICE "ide-tape: %s: Unsupported command in request queue (%ld)\n", drive->name, rq->flags);
__ata_end_request(drive, rq, 0, 0); /* Let the common code handle it */
return ide_stopped;
return ATA_OP_FINISHED;
}
/*
* Retry a failed packet command
*/
if (tape->failed_pc != NULL && tape->pc->c[0] == IDETAPE_REQUEST_SENSE_CMD) {
int ret;
/* FIXME: make this unlocking go away*/
spin_unlock_irq(ch->lock);
ret = idetape_issue_packet_command(drive, rq, tape->failed_pc);
spin_lock_irq(ch->lock);
return ret;
return idetape_issue_packet_command(drive, rq, tape->failed_pc);
}
#if IDETAPE_DEBUG_BUGS
if (postponed_rq != NULL)
if (rq != postponed_rq) {
printk (KERN_ERR "ide-tape: ide-tape.c bug - Two DSC requests were queued\n");
/* FIXME: make this unlocking go away*/
spin_unlock_irq(ch->lock);
idetape_end_request(drive, rq, 0);
spin_lock_irq(ch->lock);
return ide_stopped;
return ATA_OP_FINISHED;
}
#endif
......@@ -2690,7 +2503,7 @@ static ide_startstop_t idetape_do_request(struct ata_device *drive, struct reque
idetape_queue_onstream_buffer_fill(drive);
if (jiffies > tape->insert_time)
tape->insert_speed = tape->insert_size / 1024 * HZ / (jiffies - tape->insert_time);
return ide_stopped;
return ATA_OP_FINISHED;
}
if (jiffies > tape->insert_time)
tape->insert_speed = tape->insert_size / 1024 * HZ / (jiffies - tape->insert_time);
......@@ -2726,20 +2539,16 @@ static ide_startstop_t idetape_do_request(struct ata_device *drive, struct reque
tape->dsc_timeout = jiffies + IDETAPE_DSC_RW_TIMEOUT;
} else if ((signed long) (jiffies - tape->dsc_timeout) > 0) {
printk (KERN_ERR "ide-tape: %s: DSC timeout\n", tape->name);
if (rq->flags == IDETAPE_PC_RQ2) {
/* FIXME: make this unlocking go away*/
spin_unlock_irq(ch->lock);
if (rq->flags == IDETAPE_PC_RQ2)
idetape_media_access_finished(drive, rq);
spin_lock_irq(ch->lock);
return ide_stopped;
} else {
return ide_stopped;
}
return ATA_OP_FINISHED;
} else if (jiffies - tape->dsc_polling_start > IDETAPE_DSC_MA_THRESHOLD)
tape->dsc_polling_frequency = IDETAPE_DSC_MA_SLOW;
idetape_postpone_request(drive, rq);
return ide_stopped;
return ATA_OP_FINISHED;
}
switch (rq->flags) {
case IDETAPE_READ_RQ:
......@@ -2783,7 +2592,7 @@ static ide_startstop_t idetape_do_request(struct ata_device *drive, struct reque
case IDETAPE_ABORTED_WRITE_RQ:
rq->flags = IDETAPE_WRITE_RQ;
idetape_end_request(drive, rq, IDETAPE_ERROR_EOD);
return ide_stopped;
return ATA_OP_FINISHED;
case IDETAPE_ABORTED_READ_RQ:
#if IDETAPE_DEBUG_LOG
if (tape->debug_level >= 2)
......@@ -2791,32 +2600,25 @@ static ide_startstop_t idetape_do_request(struct ata_device *drive, struct reque
#endif
rq->flags = IDETAPE_READ_RQ;
idetape_end_request(drive, rq, IDETAPE_ERROR_EOD);
return ide_stopped;
return ATA_OP_FINISHED;
case IDETAPE_PC_RQ1:
/* FIXME: --mdcki */
pc = (struct atapi_packet_command *) rq->buffer;
rq->flags = IDETAPE_PC_RQ2;
break;
case IDETAPE_PC_RQ2:
/* FIXME: make this unlocking go away*/
spin_unlock_irq(ch->lock);
idetape_media_access_finished(drive, rq);
spin_lock_irq(ch->lock);
return ide_stopped;
return ATA_OP_FINISHED;
default:
printk (KERN_ERR "ide-tape: bug in IDETAPE_RQ_CMD macro\n");
/* FIXME: make this unlocking go away*/
spin_unlock_irq(ch->lock);
idetape_end_request(drive, rq, 0);
spin_lock_irq(ch->lock);
return ide_stopped;
return ATA_OP_FINISHED;
}
/* FIXME: make this unlocking go away*/
spin_unlock_irq(ch->lock);
ret = idetape_issue_packet_command(drive, rq, pc);
spin_lock_irq(ch->lock);
return ret;
return idetape_issue_packet_command(drive, rq, pc);
}
/*
......@@ -5572,7 +5374,7 @@ static int idetape_chrdev_release (struct inode *inode, struct file *filp)
*/
static int idetape_identify_device(struct ata_device *drive,struct hd_driveid *id)
{
struct idetape_id_gcw gcw;
struct atapi_id_gcw gcw;
#if IDETAPE_DEBUG_INFO
unsigned short mask,i;
#endif /* IDETAPE_DEBUG_INFO */
......@@ -5791,14 +5593,14 @@ static void idetape_get_inquiry_results(struct ata_device *drive)
char *r;
idetape_tape_t *tape = drive->driver_data;
struct atapi_packet_command pc;
idetape_inquiry_result_t *inquiry;
atapi_inquiry_result_t *inquiry;
idetape_create_inquiry_cmd(&pc);
if (idetape_queue_pc_tail (drive, &pc)) {
printk (KERN_ERR "ide-tape: %s: can't get INQUIRY results\n", tape->name);
return;
}
inquiry = (idetape_inquiry_result_t *) pc.buffer;
inquiry = (atapi_inquiry_result_t *) pc.buffer;
memcpy(tape->vendor_id, inquiry->vendor_id, 8);
memcpy(tape->product_id, inquiry->product_id, 16);
memcpy(tape->firmware_revision, inquiry->revision_level, 4);
......@@ -5985,7 +5787,7 @@ static void idetape_setup(struct ata_device *drive, idetape_tape_t *tape, int mi
unsigned long t1, tmid, tn;
unsigned long t;
int speed;
struct idetape_id_gcw gcw;
struct atapi_id_gcw gcw;
int stage_size;
struct sysinfo si;
......
......@@ -197,13 +197,14 @@ int drive_is_ready(struct ata_device *drive)
int ide_do_drive_cmd(struct ata_device *drive, struct request *rq, ide_action_t action)
{
unsigned long flags;
unsigned int major = drive->channel->major;
struct ata_channel *ch = drive->channel;
unsigned int major = ch->major;
request_queue_t *q = &drive->queue;
struct list_head *queue_head = &q->queue_head;
DECLARE_COMPLETION(wait);
#ifdef CONFIG_BLK_DEV_PDC4030
if (drive->channel->chipset == ide_pdc4030 && rq->buffer != NULL)
if (ch->chipset == ide_pdc4030 && rq->buffer)
return -ENOSYS; /* special drive cmds not supported */
#endif
rq->errors = 0;
......@@ -212,22 +213,18 @@ int ide_do_drive_cmd(struct ata_device *drive, struct request *rq, ide_action_t
if (action == ide_wait)
rq->waiting = &wait;
spin_lock_irqsave(drive->channel->lock, flags);
spin_lock_irqsave(ch->lock, flags);
if (blk_queue_empty(&drive->queue) || action == ide_preempt) {
if (action == ide_preempt)
drive->rq = NULL;
} else {
if (action == ide_wait)
queue_head = queue_head->prev;
else
queue_head = queue_head->next;
}
q->elevator.elevator_add_req_fn(q, rq, queue_head);
if (action == ide_preempt)
drive->rq = NULL;
else if (!blk_queue_empty(&drive->queue))
queue_head = queue_head->prev; /* ide_end and ide_wait */
__elv_add_request(q, rq, queue_head);
do_ide_request(q);
spin_unlock_irqrestore(drive->channel->lock, flags);
spin_unlock_irqrestore(ch->lock, flags);
if (action == ide_wait) {
wait_for_completion(&wait); /* wait for it to be serviced */
......@@ -235,23 +232,20 @@ int ide_do_drive_cmd(struct ata_device *drive, struct request *rq, ide_action_t
}
return 0;
}
/*
* Invoked on completion of a special REQ_SPECIAL command.
*/
ide_startstop_t ata_special_intr(struct ata_device *drive, struct
static ide_startstop_t special_intr(struct ata_device *drive, struct
request *rq) {
struct ata_taskfile *ar = rq->special;
ide_startstop_t ret = ide_stopped;
unsigned long flags;
struct ata_channel *ch =drive->channel;
struct ata_taskfile *ar = rq->special;
ide_startstop_t ret = ATA_OP_FINISHED;
ide__sti(); /* local CPU only */
spin_lock_irqsave(drive->channel->lock, flags);
ide__sti();
if (rq->buffer && ar->taskfile.sector_number) {
if (!ata_status(drive, 0, DRQ_STAT) && ar->taskfile.sector_number) {
......@@ -283,24 +277,27 @@ ide_startstop_t ata_special_intr(struct ata_device *drive, struct
ata_in_regfile(drive, &ar->hobfile);
}
spin_lock_irqsave(ch->lock, flags);
blkdev_dequeue_request(rq);
drive->rq = NULL;
end_that_request_last(rq);
spin_unlock_irqrestore(drive->channel->lock, flags);
spin_unlock_irqrestore(ch->lock, flags);
return ret;
}
int ide_raw_taskfile(struct ata_device *drive, struct ata_taskfile *ar)
int ide_raw_taskfile(struct ata_device *drive, struct ata_taskfile *ar, char *buf)
{
struct request req;
ar->command_type = IDE_DRIVE_TASK_NO_DATA;
ar->XXX_handler = ata_special_intr;
ar->XXX_handler = special_intr;
memset(&req, 0, sizeof(req));
req.flags = REQ_SPECIAL;
req.buffer = buf;
req.special = ar;
return ide_do_drive_cmd(drive, &req, ide_wait);
......@@ -310,5 +307,4 @@ EXPORT_SYMBOL(drive_is_ready);
EXPORT_SYMBOL(ide_do_drive_cmd);
EXPORT_SYMBOL(ata_read);
EXPORT_SYMBOL(ata_write);
EXPORT_SYMBOL(ata_special_intr);
EXPORT_SYMBOL(ide_raw_taskfile);
......@@ -106,15 +106,14 @@ int drive_is_flashcard(struct ata_device *drive)
return 0;
}
/*
* Not locking variabt of the end_request method.
*
* Channel lock should be held.
*/
int __ata_end_request(struct ata_device *drive, struct request *rq, int uptodate, unsigned int nr_secs)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
int ret = 1;
spin_lock_irqsave(ch->lock, flags);
BUG_ON(!(rq->flags & REQ_STARTED));
/* FIXME: Make this "small" hack to eliminate locking from
......@@ -143,22 +142,7 @@ int __ata_end_request(struct ata_device *drive, struct request *rq, int uptodate
ret = 0;
}
return ret;
}
/*
* This is the default end request function as well
*/
int ata_end_request(struct ata_device *drive, struct request *rq, int uptodate)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
int ret;
spin_lock_irqsave(ch->lock, flags);
ret = __ata_end_request(drive, rq, uptodate, 0);
spin_unlock_irqrestore(drive->channel->lock, flags);
spin_unlock_irqrestore(ch->lock, flags);
return ret;
}
......@@ -169,14 +153,16 @@ int ata_end_request(struct ata_device *drive, struct request *rq, int uptodate)
* at the appropriate code to handle the next interrupt, and a
* timer is started to prevent us from waiting forever in case
* something goes wrong (see the ide_timer_expiry() handler later on).
*
* Channel lock should be held.
*/
void ata_set_handler(struct ata_device *drive, ata_handler_t handler,
unsigned long timeout, ata_expiry_t expiry)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
spin_lock_irqsave(ch->lock, flags);
/* FIXME: change it later to BUG_ON(ch->handler) --bzolnier */
if (ch->handler)
printk("%s: %s: handler not null; old=%p, new=%p, from %p\n",
drive->name, __FUNCTION__, ch->handler, handler, __builtin_return_address(0));
......@@ -188,48 +174,35 @@ void ata_set_handler(struct ata_device *drive, ata_handler_t handler,
add_timer(&ch->timer);
spin_unlock_irqrestore(ch->lock, flags);
}
static void check_crc_errors(struct ata_device *drive)
{
if (!drive->using_dma)
return;
return;
/* check the DMA crc count */
if (drive->crc_count) {
udma_enable(drive, 0, 0);
if (drive->channel->speedproc) {
u8 pio = XFER_PIO_4;
u8 mode = drive->current_speed;
drive->crc_count = 0;
switch (drive->current_speed) {
case XFER_UDMA_7: pio = XFER_UDMA_6;
break;
case XFER_UDMA_6: pio = XFER_UDMA_5;
break;
case XFER_UDMA_5: pio = XFER_UDMA_4;
break;
case XFER_UDMA_4: pio = XFER_UDMA_3;
break;
case XFER_UDMA_3: pio = XFER_UDMA_2;
break;
case XFER_UDMA_2: pio = XFER_UDMA_1;
break;
case XFER_UDMA_1: pio = XFER_UDMA_0;
break;
if (mode > XFER_UDMA_0)
mode--;
else
/*
* OOPS we do not goto non Ultra DMA modes
* without iCRC's available we force
* the system to PIO and make the user
* invoke the ATA-1 ATA-2 DMA modes.
*/
case XFER_UDMA_0:
default:
pio = XFER_PIO_4;
}
drive->channel->speedproc(drive, pio);
mode = XFER_PIO_4;
drive->channel->speedproc(drive, mode);
}
if (drive->current_speed >= XFER_SW_DMA_0)
if (drive->current_speed >= XFER_UDMA_0)
udma_enable(drive, 1, 1);
} else
udma_enable(drive, 0, 1);
......@@ -262,19 +235,17 @@ static ide_startstop_t do_reset1(struct ata_device *, int); /* needed below */
* Poll the interface for completion every 50ms during an ATAPI drive reset
* operation. If the drive has not yet responded, and we have not yet hit our
* maximum waiting time, then the timer is restarted for another 50ms.
*
* Channel lock should be held.
*/
static ide_startstop_t atapi_reset_pollfunc(struct ata_device *drive, struct request *__rq)
{
struct ata_channel *ch = drive->channel;
int ret = ide_stopped;
int ret = ATA_OP_FINISHED;
ata_select(drive, 10);
if (!ata_status(drive, 0, BUSY_STAT)) {
if (time_before(jiffies, ch->poll_timeout)) {
ata_set_handler(drive, atapi_reset_pollfunc, HZ/20, NULL);
ret = ide_started; /* continue polling */
ret = ATA_OP_CONTINUES; /* continue polling */
} else {
ch->poll_timeout = 0; /* end of polling */
printk("%s: ATAPI reset timed out, status=0x%02x\n", drive->name, drive->status);
......@@ -285,7 +256,7 @@ static ide_startstop_t atapi_reset_pollfunc(struct ata_device *drive, struct req
printk("%s: ATAPI reset complete\n", drive->name);
ch->poll_timeout = 0; /* done polling */
ret = ide_stopped;
ret = ATA_OP_FINISHED;
}
return ret;
......@@ -295,8 +266,6 @@ static ide_startstop_t atapi_reset_pollfunc(struct ata_device *drive, struct req
* Poll the interface for completion every 50ms during an ata reset operation.
* If the drives have not yet responded, and we have not yet hit our maximum
* waiting time, then the timer is restarted for another 50ms.
*
* Channel lock should be held.
*/
static ide_startstop_t reset_pollfunc(struct ata_device *drive, struct request *__rq)
{
......@@ -306,15 +275,17 @@ static ide_startstop_t reset_pollfunc(struct ata_device *drive, struct request *
if (!ata_status(drive, 0, BUSY_STAT)) {
if (time_before(jiffies, ch->poll_timeout)) {
ata_set_handler(drive, reset_pollfunc, HZ/20, NULL);
ret = ide_started; /* continue polling */
ret = ATA_OP_CONTINUES; /* continue polling */
} else {
ch->poll_timeout = 0; /* done polling */
printk("%s: reset timed out, status=0x%02x\n", ch->name, drive->status);
++drive->failures;
ret = ide_stopped;
ret = ATA_OP_FINISHED;
}
} else {
u8 stat;
ch->poll_timeout = 0; /* done polling */
printk("%s: reset: ", ch->name);
if ((stat = GET_ERR()) == 1) {
printk("success\n");
......@@ -343,11 +314,10 @@ static ide_startstop_t reset_pollfunc(struct ata_device *drive, struct request *
++drive->failures;
}
ret = ide_stopped;
ret = ATA_OP_FINISHED;
}
ch->poll_timeout = 0; /* done polling */
return ide_stopped;
return ret;
}
/*
......@@ -364,16 +334,14 @@ static ide_startstop_t reset_pollfunc(struct ata_device *drive, struct request *
* Equally poor, though, is the fact that this may a very long time to
* complete, (up to 30 seconds worst case). So, instead of busy-waiting here
* for it, we set a timer to poll at 50ms intervals.
*
* Channel lock should be held.
*/
static ide_startstop_t do_reset1(struct ata_device *drive, int try_atapi)
{
unsigned int unit;
unsigned long flags;
struct ata_channel *ch = drive->channel;
/* FIXME: --bzolnier */
__save_flags(flags); /* local CPU only */
__cli(); /* local CPU only */
......@@ -387,7 +355,7 @@ static ide_startstop_t do_reset1(struct ata_device *drive, int try_atapi)
ata_set_handler(drive, atapi_reset_pollfunc, HZ/20, NULL);
__restore_flags(flags); /* local CPU only */
return ide_started;
return ATA_OP_CONTINUES;
}
}
......@@ -400,7 +368,7 @@ static ide_startstop_t do_reset1(struct ata_device *drive, int try_atapi)
__restore_flags(flags); /* local CPU only */
return ide_started;
return ATA_OP_CONTINUES;
}
static inline u32 read_24(struct ata_device *drive)
......@@ -467,6 +435,7 @@ u8 ata_dump(struct ata_device *drive, struct request * rq, const char *msg)
unsigned long flags;
u8 err = 0;
/* FIXME: --bzolnier */
__save_flags (flags); /* local CPU only */
ide__sti(); /* local CPU only */
......@@ -554,7 +523,7 @@ static int do_recalibrate(struct ata_device *drive)
{
if (drive->type != ATA_DISK)
return ide_stopped;
return ATA_OP_FINISHED;
if (!IS_PDC4030_DRIVE) {
struct ata_taskfile args;
......@@ -563,17 +532,15 @@ static int do_recalibrate(struct ata_device *drive)
memset(&args, 0, sizeof(args));
args.taskfile.sector_count = drive->sect;
args.cmd = WIN_RESTORE;
ide_raw_taskfile(drive, &args);
ide_raw_taskfile(drive, &args, NULL);
printk(KERN_INFO "%s: done!\n", drive->name);
}
return IS_PDC4030_DRIVE ? ide_stopped : ide_started;
return IS_PDC4030_DRIVE ? ATA_OP_FINISHED : ATA_OP_CONTINUES;
}
/*
* Take action based on the error returned by the drive.
*
* FIXME: Channel lock should be held.
*/
ide_startstop_t ata_error(struct ata_device *drive, struct request *rq, const char *msg)
{
......@@ -581,14 +548,16 @@ ide_startstop_t ata_error(struct ata_device *drive, struct request *rq, const ch
u8 stat = drive->status;
err = ata_dump(drive, rq, msg);
/* FIXME: at least !drive check is bogus --bzolnier */
if (!drive || !rq)
return ide_stopped;
return ATA_OP_FINISHED;
/* retry only "normal" I/O: */
if (!(rq->flags & REQ_CMD)) {
rq->errors = 1;
return ide_stopped;
return ATA_OP_FINISHED;
}
/* other bits are useless when BUSY */
......@@ -599,7 +568,7 @@ ide_startstop_t ata_error(struct ata_device *drive, struct request *rq, const ch
/* err has different meaning on cdrom and tape */
if (err == ABRT_ERR) {
if (drive->select.b.lba && IN_BYTE(IDE_COMMAND_REG) == WIN_SPECIFY)
return ide_stopped; /* some newer drives don't support WIN_SPECIFY */
return ATA_OP_FINISHED; /* some newer drives don't support WIN_SPECIFY */
} else if ((err & (ABRT_ERR | ICRC_ERR)) == (ABRT_ERR | ICRC_ERR))
drive->crc_count++; /* UDMA crc error -- just retry the operation */
else if (err & (BBD_ERR | ECC_ERR)) /* retries won't help these */
......@@ -617,7 +586,6 @@ ide_startstop_t ata_error(struct ata_device *drive, struct request *rq, const ch
if (rq->errors >= ERROR_MAX) {
printk(KERN_ERR "%s: max number of retries exceeded!\n", drive->name);
/* FIXME: make sure all end_request implementations are lock free */
if (ata_ops(drive) && ata_ops(drive)->end_request)
ata_ops(drive)->end_request(drive, rq, 0);
else
......@@ -627,10 +595,11 @@ ide_startstop_t ata_error(struct ata_device *drive, struct request *rq, const ch
if ((rq->errors & ERROR_RESET) == ERROR_RESET)
return do_reset1(drive, 1);
if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
/* FIXME: tries to acquire the channel lock -Zwane */
return do_recalibrate(drive);
}
return ide_stopped;
return ATA_OP_FINISHED;
}
/*
......@@ -674,8 +643,9 @@ static ide_startstop_t start_request(struct ata_device *drive, struct request *r
block = 1; /* redirect MBR access to EZ-Drive partn table */
ata_select(drive, 0);
if (ata_status_poll(drive, drive->ready_stat, BUSY_STAT | DRQ_STAT,
WAIT_READY, rq, &ret)) {
ret = ata_status_poll(drive, drive->ready_stat, BUSY_STAT | DRQ_STAT,
WAIT_READY, rq);
if (ret != ATA_OP_READY) {
printk(KERN_WARNING "%s: drive not ready for command\n", drive->name);
goto kill_rq;
......@@ -695,38 +665,17 @@ static ide_startstop_t start_request(struct ata_device *drive, struct request *r
ret = ata_ops(drive)->do_request(drive, rq, block);
} else {
__ata_end_request(drive, rq, 0, 0);
ret = ide_stopped;
ret = ATA_OP_FINISHED;
}
return ret;
kill_rq:
if (ata_ops(drive)) {
if (ata_ops(drive)->end_request) {
spin_unlock_irq(ch->lock);
ata_ops(drive)->end_request(drive, rq, 0);
spin_lock_irq(ch->lock);
} else
__ata_end_request(drive, rq, 0, 0);
} else
if (ata_ops(drive) && ata_ops(drive)->end_request)
ata_ops(drive)->end_request(drive, rq, 0);
else
__ata_end_request(drive, rq, 0, 0);
return ide_stopped;
}
ide_startstop_t restart_request(struct ata_device *drive)
{
struct ata_channel *ch = drive->channel;
unsigned long flags;
int ret;
spin_lock_irqsave(ch->lock, flags);
ch->handler = NULL;
del_timer(&ch->timer);
ret = start_request(drive, drive->rq);
spin_unlock_irqrestore(ch->lock, flags);
return ret;
return ATA_OP_FINISHED;
}
/*
......@@ -758,7 +707,7 @@ static unsigned long longest_sleep(struct ata_channel *channel)
/* This device is sleeping and waiting to be serviced
* later than any other device we checked thus far.
*/
if (drive->sleep && (!sleep || time_after(sleep, drive->sleep)))
if (drive->sleep && (!sleep || time_after(drive->sleep, sleep)))
sleep = drive->sleep;
}
......@@ -784,7 +733,7 @@ static struct ata_device *choose_urgent_device(struct ata_channel *channel)
/* There are no request pending for this device.
*/
if (list_empty(&drive->queue.queue_head))
if (blk_queue_empty(&drive->queue))
continue;
/* This device still wants to remain idle.
......@@ -834,124 +783,119 @@ static struct ata_device *choose_urgent_device(struct ata_channel *channel)
return NULL;
}
/*
* Feed commands to a drive until it barfs. Called with queue lock held and
* busy channel.
* Issue a new request.
* Caller must have already done spin_lock_irqsave(channel->lock, ...)
*/
static void queue_commands(struct ata_device *drive)
static void do_request(struct ata_channel *channel)
{
struct ata_channel *ch = drive->channel;
ide_startstop_t startstop = -1;
ide_get_lock(&ide_irq_lock, ata_irq_request, channel);/* for atari only: POSSIBLY BROKEN HERE(?) */
__cli(); /* necessary paranoia: ensure IRQs are masked on local CPU */
for (;;) {
while (!test_and_set_bit(IDE_BUSY, channel->active)) {
struct ata_channel *ch;
struct ata_device *drive;
struct request *rq = NULL;
ide_startstop_t startstop;
int i;
if (!test_bit(IDE_BUSY, ch->active))
printk(KERN_ERR "%s: error: not busy while queueing!\n", drive->name);
/* Abort early if we can't queue another command. for non
* tcq, ata_can_queue is always 1 since we never get here
* unless the drive is idle.
*/
if (!ata_can_queue(drive)) {
if (!ata_pending_commands(drive))
clear_bit(IDE_BUSY, ch->active);
break;
}
drive->sleep = 0;
/* this will clear IDE_BUSY, if appropriate */
drive = choose_urgent_device(channel);
if (test_bit(IDE_DMA, ch->active)) {
printk(KERN_ERR "%s: error: DMA in progress...\n", drive->name);
if (!drive)
break;
}
/* There's a small window between where the queue could be
* replugged while we are in here when using tcq (in which
* case the queue is probably empty anyways...), so check
* and leave if appropriate. When not using tcq, this is
* still a severe BUG!
/* Remember the last drive we where acting on.
*/
if (blk_queue_plugged(&drive->queue)) {
BUG_ON(!drive->using_tcq);
break;
}
if (!(rq = elv_next_request(&drive->queue))) {
if (!ata_pending_commands(drive))
clear_bit(IDE_BUSY, ch->active);
drive->rq = NULL;
break;
}
ch = drive->channel;
ch->drive = drive;
/* If there are queued commands, we can't start a non-fs
* request (really, a non-queuable command) until the
* queue is empty.
/* Make sure that all drives on channels sharing the IRQ line
* with us won't generate IRQ's during our activity.
*/
if (!(rq->flags & REQ_CMD) && ata_pending_commands(drive))
break;
for (i = 0; i < MAX_HWIFS; ++i) {
struct ata_channel *tmp = &ide_hwifs[i];
int j;
drive->rq = rq;
if (!tmp->present)
continue;
ide__sti(); /* allow other IRQs while we start this request */
startstop = start_request(drive, rq);
if (ch->lock != tmp->lock)
continue;
/* command started, we are busy */
if (startstop == ide_started)
break;
/* Only care if there is any drive on the channel in
* question.
*/
for (j = 0; j < MAX_DRIVES; ++j) {
struct ata_device * other = &tmp->drives[j];
/* start_request() can return either ide_stopped (no command
* was started), ide_started (command started, don't queue
* more), or ide_released (command started, try and queue
* more).
if (other->present)
ata_irq_enable(other, 0);
}
}
/*
* Feed commands to a drive until it barfs.
*/
#if 0
if (startstop == ide_stopped)
set_bit(IDE_BUSY, &hwgroup->flags);
#endif
do {
if (!test_bit(IDE_BUSY, ch->active))
printk(KERN_ERR "%s: error: not busy while queueing!\n", drive->name);
}
}
/* Abort early if we can't queue another command. for
* non tcq, ata_can_queue is always 1 since we never
* get here unless the drive is idle.
*/
if (!ata_can_queue(drive)) {
if (!ata_pending_commands(drive))
clear_bit(IDE_BUSY, ch->active);
break;
}
/*
* Issue a new request.
* Caller must have already done spin_lock_irqsave(channel->lock, ...)
*/
static void do_request(struct ata_channel *channel)
{
ide_get_lock(&ide_irq_lock, ata_irq_request, channel);/* for atari only: POSSIBLY BROKEN HERE(?) */
// __cli(); /* necessary paranoia: ensure IRQs are masked on local CPU */
drive->sleep = 0;
while (!test_and_set_bit(IDE_BUSY, channel->active)) {
struct ata_channel *ch;
struct ata_device *drive;
if (test_bit(IDE_DMA, ch->active)) {
printk(KERN_ERR "%s: error: DMA in progress...\n", drive->name);
break;
}
/* this will clear IDE_BUSY, if appropriate */
drive = choose_urgent_device(channel);
/* There's a small window between where the queue could
* be replugged while we are in here when using tcq (in
* which case the queue is probably empty anyways...),
* so check and leave if appropriate. When not using
* tcq, this is still a severe BUG!
*/
if (blk_queue_plugged(&drive->queue)) {
BUG_ON(!drive->using_tcq);
break;
}
if (!drive)
break;
if (!(rq = elv_next_request(&drive->queue))) {
if (!ata_pending_commands(drive))
clear_bit(IDE_BUSY, ch->active);
drive->rq = NULL;
break;
}
ch = drive->channel;
/* If there are queued commands, we can't start a
* non-fs request (really, a non-queuable command)
* until the queue is empty.
*/
if (!(rq->flags & REQ_CMD) && ata_pending_commands(drive))
break;
/* Disable intrerrupts from the drive on the previous channel.
*
* FIXME: This should be only done if we are indeed sharing the same
* interrupt line with it.
*
* FIXME: check this! It appears to act on the current channel!
*/
if (ch != channel && channel->sharing_irq && ch->irq == channel->irq)
ata_irq_enable(drive, 0);
drive->rq = rq;
/* Remember the last drive we where acting on.
*/
ch->drive = drive;
spin_unlock(ch->lock);
ide__sti(); /* allow other IRQs while we start this request */
startstop = start_request(drive, rq);
spin_lock_irq(ch->lock);
queue_commands(drive);
/* command started, we are busy */
} while (startstop != ATA_OP_CONTINUES);
/* make sure the BUSY bit is set */
/* FIXME: perhaps there is some place where we miss to set it? */
// set_bit(IDE_BUSY, ch->active);
}
}
void do_ide_request(request_queue_t *q)
......@@ -975,7 +919,7 @@ void ide_timer_expiry(unsigned long data)
del_timer(&ch->timer);
if (!ch->drive) {
printk(KERN_ERR "%s: IRQ handler was NULL\n", __FUNCTION__);
printk(KERN_ERR "%s: channel->drive was NULL\n", __FUNCTION__);
ch->handler = NULL;
} else if (!ch->handler) {
......@@ -995,17 +939,20 @@ void ide_timer_expiry(unsigned long data)
/* paranoia */
if (!test_and_set_bit(IDE_BUSY, ch->active))
printk(KERN_ERR "%s: %s: IRQ handler was not busy?!\n",
printk(KERN_ERR "%s: %s: channel was not busy?!\n",
drive->name, __FUNCTION__);
if (ch->expiry) {
unsigned long wait;
/* continue */
if ((wait = ch->expiry(drive, drive->rq)) != 0) {
ret = ch->expiry(drive, drive->rq, &wait);
if (ret == ATA_OP_CONTINUES) {
/* reengage timer */
ch->timer.expires = jiffies + wait;
add_timer(&ch->timer);
if (wait) {
ch->timer.expires = jiffies + wait;
add_timer(&ch->timer);
}
spin_unlock_irqrestore(ch->lock, flags);
......@@ -1021,14 +968,14 @@ void ide_timer_expiry(unsigned long data)
handler = ch->handler;
ch->handler = NULL;
spin_unlock(ch->lock);
ch = drive->channel;
spin_unlock(ch->lock);
#if DISABLE_IRQ_NOSYNC
disable_irq_nosync(ch->irq);
#else
disable_irq(ch->irq); /* disable_irq_nosync ?? */
#endif
/* FIXME: IRQs are already disabled by spin_lock_irqsave() --bzolnier */
__cli(); /* local CPU only, as if we were handling an interrupt */
if (ch->poll_timeout) {
ret = handler(drive, drive->rq);
......@@ -1072,18 +1019,16 @@ void ide_timer_expiry(unsigned long data)
rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
rq->buffer = NULL;
}
ret = ide_stopped;
ret = ATA_OP_FINISHED;
} else
ret = ata_error(drive, drive->rq, "irq timeout");
enable_irq(ch->irq);
spin_lock_irq(ch->lock);
if (ret == ide_stopped)
if (ret == ATA_OP_FINISHED)
clear_bit(IDE_BUSY, ch->active);
/* Reenter the request handling engine */
do_request(ch);
}
......@@ -1153,7 +1098,7 @@ void ata_irq_request(int irq, void *data, struct pt_regs *regs)
struct ata_channel *ch = data;
unsigned long flags;
struct ata_device *drive;
ata_handler_t *handler = ch->handler;
ata_handler_t *handler;
ide_startstop_t startstop;
spin_lock_irqsave(ch->lock, flags);
......@@ -1161,6 +1106,7 @@ void ata_irq_request(int irq, void *data, struct pt_regs *regs)
if (!ide_ack_intr(ch))
goto out_lock;
handler = ch->handler;
if (handler == NULL || ch->poll_timeout != 0) {
#if 0
printk(KERN_INFO "ide: unexpected interrupt %d %d\n", ch->unit, irq);
......@@ -1214,10 +1160,11 @@ void ata_irq_request(int irq, void *data, struct pt_regs *regs)
spin_unlock(ch->lock);
if (ch->unmask)
ide__sti(); /* local CPU only */
ide__sti();
/* service this interrupt, may set handler for next interrupt */
startstop = handler(drive, drive->rq);
spin_lock_irq(ch->lock);
/*
......@@ -1227,15 +1174,14 @@ void ata_irq_request(int irq, void *data, struct pt_regs *regs)
* same irq as is currently being serviced here, and Linux
* won't allow another of the same (on any CPU) until we return.
*/
if (startstop == ide_stopped) {
if (startstop == ATA_OP_FINISHED) {
if (!ch->handler) { /* paranoia */
clear_bit(IDE_BUSY, ch->active);
do_request(ch);
} else {
printk("%s: %s: huh? expected NULL handler on exit\n", drive->name, __FUNCTION__);
}
} else if (startstop == ide_released)
queue_commands(drive);
}
out_lock:
spin_unlock_irqrestore(ch->lock, flags);
......@@ -1386,9 +1332,6 @@ EXPORT_SYMBOL(ata_set_handler);
EXPORT_SYMBOL(ata_dump);
EXPORT_SYMBOL(ata_error);
/* FIXME: this is a trully bad name */
EXPORT_SYMBOL(restart_request);
EXPORT_SYMBOL(ata_end_request);
EXPORT_SYMBOL(__ata_end_request);
EXPORT_SYMBOL(ide_stall_queue);
......
......@@ -47,7 +47,6 @@ static int do_cmd_ioctl(struct ata_device *drive, unsigned long arg)
u8 *argbuf = vals;
int argsize = 4;
struct ata_taskfile args;
struct request req;
/* Second phase.
*/
......@@ -80,15 +79,7 @@ static int do_cmd_ioctl(struct ata_device *drive, unsigned long arg)
/* Issue ATA command and wait for completion.
*/
args.command_type = IDE_DRIVE_TASK_NO_DATA;
args.XXX_handler = ata_special_intr;
memset(&req, 0, sizeof(req));
req.flags = REQ_SPECIAL;
req.special = &args;
req.buffer = argbuf + 4;
err = ide_do_drive_cmd(drive, &req, ide_wait);
err = ide_raw_taskfile(drive, &args, argbuf + 4);
argbuf[0] = drive->status;
argbuf[1] = args.taskfile.feature;
......@@ -131,9 +122,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
case HDIO_GET_32BIT: {
unsigned long val = drive->channel->io_32bit;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (put_user(val, (unsigned long *) arg))
return -EFAULT;
return 0;
......@@ -181,9 +169,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
case HDIO_GET_UNMASKINTR: {
unsigned long val = drive->channel->unmask;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (put_user(val, (unsigned long *) arg))
return -EFAULT;
......@@ -211,9 +196,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
case HDIO_GET_DMA: {
unsigned long val = drive->using_dma;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (put_user(val, (unsigned long *) arg))
return -EFAULT;
......@@ -245,9 +227,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
struct hd_geometry *loc = (struct hd_geometry *) arg;
unsigned short bios_cyl = drive->bios_cyl; /* truncate */
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!loc || (drive->type != ATA_DISK && drive->type != ATA_FLOPPY))
return -EINVAL;
......@@ -270,9 +249,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
case HDIO_GETGEO_BIG_RAW: {
struct hd_big_geometry *loc = (struct hd_big_geometry *) arg;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (!loc || (drive->type != ATA_DISK && drive->type != ATA_FLOPPY))
return -EINVAL;
......@@ -293,8 +269,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
}
case HDIO_GET_IDENTITY:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (minor(inode->i_rdev) & PARTN_MASK)
return -EINVAL;
......@@ -308,8 +282,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
return 0;
case HDIO_GET_NICE:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
return put_user(drive->dsc_overlap << IDE_NICE_DSC_OVERLAP |
drive->atapi_overlap << IDE_NICE_ATAPI_OVERLAP,
......@@ -332,8 +304,6 @@ int ata_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned
return 0;
case HDIO_GET_BUSSTATE:
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if (put_user(drive->channel->bus_state, (long *)arg))
return -EFAULT;
......
......@@ -179,14 +179,6 @@ static int it8172_tune_chipset(struct ata_device *drive, u8 speed)
return ide_config_drive_speed(drive, speed);
}
static int it8172_udma_setup(struct ata_device *drive)
{
u8 speed = ata_timing_mode(drive, XFER_PIO | XFER_EPIO |
XFER_SWDMA | XFER_MWDMA | XFER_UDMA);
return !it8172_tune_chipset(drive, speed);
}
#endif /* defined(CONFIG_BLK_DEV_IDEDMA) && (CONFIG_IT8172_TUNING) */
......@@ -216,15 +208,11 @@ static void __init ide_init_it8172(struct ata_channel *hwif)
if (!hwif->dma_base)
return;
#ifndef CONFIG_BLK_DEV_IDEDMA
hwif->autodma = 0;
#else /* CONFIG_BLK_DEV_IDEDMA */
# ifdef CONFIG_IT8172_TUNING
hwif->autodma = 1;
hwif->dmaproc = &it8172_dmaproc;
hwif->modes_map = XFER_EPIO | XFER_SWDMA | XFER_MWDMA | XFER_UDMA;
hwif->udma_setup = udma_generic_setup;
hwif->speedproc = &it8172_tune_chipset;
# endif
#endif
cmdBase = dev->resource[0].start;
ctrlBase = dev->resource[1].start;
......
......@@ -1074,7 +1074,8 @@ int ide_register_subdriver(struct ata_device *drive, struct ata_operations *driv
spin_unlock_irqrestore(&ide_lock, flags);
/* Default autotune or requested autotune */
if (drive->autotune != 2) {
if (drive->channel->udma_setup) {
struct ata_channel *ch = drive->channel;
if (ch->udma_setup) {
/*
* Force DMAing for the beginning of the check. Some
......@@ -1085,7 +1086,7 @@ int ide_register_subdriver(struct ata_device *drive, struct ata_operations *driv
*/
udma_enable(drive, 0, 0);
drive->channel->udma_setup(drive);
ch->udma_setup(drive, ch->modes_map);
#ifdef CONFIG_BLK_DEV_IDE_TCQ_DEFAULT
udma_tcq_enable(drive, 1);
#endif
......
......@@ -105,21 +105,21 @@ static int ns87415_udma_init(struct ata_device *drive, struct request *rq)
ns87415_prepare_drive(drive, 1); /* select DMA xfer */
if (udma_pci_init(drive, rq)) /* use standard DMA stuff */
return ide_started;
return ATA_OP_CONTINUES;
ns87415_prepare_drive(drive, 0); /* DMA failed: select PIO xfer */
return ide_stopped;
return ATA_OP_FINISHED;
}
static int ns87415_udma_setup(struct ata_device *drive)
static int ns87415_udma_setup(struct ata_device *drive, int map)
{
if (drive->type != ATA_DISK) {
udma_enable(drive, 0, 0);
return 0;
}
return udma_pci_setup(drive);
return udma_pci_setup(drive, map);
}
#endif
......
......@@ -27,6 +27,8 @@
#include <linux/ide.h>
#include <linux/delay.h>
#include "ata-timing.h"
#include <asm/io.h>
#include <asm/irq.h>
......@@ -44,18 +46,9 @@ ide_startstop_t ide_dma_intr(struct ata_device *drive, struct request *rq)
if (ata_status(drive, DRIVE_READY, drive->bad_wstat | DRQ_STAT)) {
if (!dma_stat) {
unsigned long flags;
struct ata_channel *ch = drive->channel;
/* FIXME: this locking should encompass the above register
* file access too.
*/
spin_lock_irqsave(ch->lock, flags);
__ata_end_request(drive, rq, 1, rq->nr_sectors);
spin_unlock_irqrestore(ch->lock, flags);
return ide_stopped;
return ATA_OP_FINISHED;
}
printk(KERN_ERR "%s: dma_intr: bad DMA status (dma_stat=%x)\n",
drive->name, dma_stat);
......@@ -128,7 +121,7 @@ static int build_sglist(struct ata_device *drive, struct request *rq)
/*
* 1 dma-ing, 2 error, 4 intr
*/
static int dma_timer_expiry(struct ata_device *drive, struct request *rq)
static ide_startstop_t dma_timer_expiry(struct ata_device *drive, struct request *rq, unsigned long *wait)
{
/* FIXME: What's that? */
u8 dma_stat = inb(drive->channel->dma_base + 2);
......@@ -140,15 +133,17 @@ static int dma_timer_expiry(struct ata_device *drive, struct request *rq)
#if 0
drive->expiry = NULL; /* one free ride for now */
#endif
*wait = 0;
if (dma_stat & 2) { /* ERROR */
ata_status(drive, 0, 0);
return ata_error(drive, rq, __FUNCTION__);
}
if (dma_stat & 1) /* DMAing */
return WAIT_CMD;
if (dma_stat & 1) { /* DMAing */
*wait = WAIT_CMD;
return ATA_OP_CONTINUES;
}
return 0;
return ATA_OP_FINISHED;
}
int ata_start_dma(struct ata_device *drive, struct request *rq)
......@@ -171,10 +166,73 @@ int ata_start_dma(struct ata_device *drive, struct request *rq)
return 0;
}
/* generic udma_setup() function for drivers having ->speedproc/tuneproc */
int udma_generic_setup(struct ata_device *drive, int map)
{
struct hd_driveid *id = drive->id;
struct ata_channel *ch = drive->channel;
int on = 0;
u8 mode;
if (!id || (drive->type != ATA_DISK && ch->no_atapi_autodma))
return 0;
if ((map & XFER_UDMA_80W) && !eighty_ninty_three(drive))
map &= ~XFER_UDMA_80W;
if ((id->capability & 1) && ch->autodma && ch->speedproc) {
/* Consult the list of known "bad" devices. */
if (udma_black_list(drive))
goto set_dma;
mode = ata_timing_mode(drive, map);
/* Device is UltraDMA capable. */
if (mode & XFER_UDMA) {
if((on = !ch->speedproc(drive, mode)))
goto set_dma;
printk(KERN_WARNING "%s: UDMA auto-tune failed.\n", drive->name);
map &= ~XFER_UDMA_ALL;
mode = ata_timing_mode(drive, map);
}
/* Device is regular DMA capable. */
if (mode & (XFER_SWDMA | XFER_MWDMA)) {
if((on = !ch->speedproc(drive, mode)))
goto set_dma;
printk(KERN_WARNING "%s: DMA auto-tune failed.\n", drive->name);
}
/* FIXME: this seems non-functional --bkz */
/* Consult the list of known "good" devices. */
if (udma_white_list(drive)) {
if (id->eide_dma_time > 150)
goto set_dma;
printk(KERN_INFO "%s: device is on DMA whitelist.\n", drive->name);
// on = 1;
}
/* Revert to PIO. */
if (!on && ch->tuneproc)
ch->tuneproc(drive, 255);
}
set_dma:
udma_enable(drive, on, !on);
return 0;
}
/*
* Configure a device for DMA operation.
*/
int udma_pci_setup(struct ata_device *drive)
int udma_pci_setup(struct ata_device *drive, int map)
{
int config_allows_dma = 1;
struct hd_driveid *id = drive->id;
......@@ -399,8 +457,6 @@ int udma_new_table(struct ata_device *drive, struct request *rq)
/*
* Teardown mappings after DMA has completed.
*
* Channel lock should be held.
*/
void udma_destroy_table(struct ata_channel *ch)
{
......@@ -411,8 +467,6 @@ void udma_destroy_table(struct ata_channel *ch)
* Prepare the channel for a DMA startfer. Please note that only the broken
* Pacific Digital host chip needs the reques to be passed there to decide
* about addressing modes.
*
* Channel lock should be held.
*/
void udma_pci_start(struct ata_device *drive, struct request *rq)
{
......@@ -426,9 +480,6 @@ void udma_pci_start(struct ata_device *drive, struct request *rq)
outb(inb(dma_base) | 1, dma_base); /* start DMA */
}
/*
* Channel lock should be held.
*/
int udma_pci_stop(struct ata_device *drive)
{
struct ata_channel *ch = drive->channel;
......@@ -445,8 +496,6 @@ int udma_pci_stop(struct ata_device *drive)
/*
* FIXME: This should be attached to a channel as we can see now!
*
* Channel lock should be held.
*/
int udma_pci_irq_status(struct ata_device *drive)
{
......@@ -533,19 +582,17 @@ void ata_init_dma(struct ata_channel *ch, unsigned long dma_base)
*
* It's exported only for host chips which use it for fallback or (too) late
* capability checking.
*
* Channel lock should be held.
*/
int udma_pci_init(struct ata_device *drive, struct request *rq)
{
u8 cmd;
if (ata_start_dma(drive, rq))
return ide_stopped;
return ATA_OP_FINISHED;
/* No DMA transfers on ATAPI devices. */
if (drive->type != ATA_DISK)
return ide_started;
return ATA_OP_CONTINUES;
if (rq_data_dir(rq) == READ)
cmd = 0x08;
......@@ -560,7 +607,7 @@ int udma_pci_init(struct ata_device *drive, struct request *rq)
udma_start(drive, rq);
return ide_started;
return ATA_OP_CONTINUES;
}
EXPORT_SYMBOL(ide_dma_intr);
......
......@@ -117,7 +117,6 @@ struct ata_pci_device {
unsigned short vendor;
unsigned short device;
unsigned int (*init_chipset)(struct pci_dev *);
unsigned int (*ata66_check)(struct ata_channel *);
void (*init_channel)(struct ata_channel *);
void (*init_dma)(struct ata_channel *, unsigned long);
ide_pci_enablebit_t enablebits[2];
......
......@@ -129,29 +129,30 @@ int check_in_drive_lists(struct ata_device *drive)
return 0;
}
static int pdc202xx_ratemask(struct ata_device *drive)
static int __init pdc202xx_modes_map(struct ata_channel *ch)
{
struct pci_dev *dev = drive->channel->pci_dev;
int map = 0;
if (!eighty_ninty_three(drive))
return XFER_UDMA;
int map = XFER_EPIO | XFER_SWDMA | XFER_MWDMA | XFER_UDMA;
switch(dev->device) {
switch(ch->pci_dev->device) {
case PCI_DEVICE_ID_PROMISE_20276:
case PCI_DEVICE_ID_PROMISE_20275:
case PCI_DEVICE_ID_PROMISE_20269:
map |= XFER_UDMA_133;
case PCI_DEVICE_ID_PROMISE_20268R:
case PCI_DEVICE_ID_PROMISE_20268:
map &= ~XFER_SWDMA;
case PCI_DEVICE_ID_PROMISE_20267:
case PCI_DEVICE_ID_PROMISE_20265:
map |= XFER_UDMA_100;
case PCI_DEVICE_ID_PROMISE_20262:
map |= XFER_UDMA_66;
case PCI_DEVICE_ID_PROMISE_20246:
map |= XFER_UDMA;
if (!ch->udma_four) {
printk(KERN_WARNING "%s: 40-pin cable, speed reduced to UDMA(33) mode.\n", ch->name);
map &= ~XFER_UDMA_80W;
}
}
return map;
}
......@@ -164,9 +165,6 @@ static int pdc202xx_tune_chipset(struct ata_device *drive, byte speed)
u8 DP;
#endif
if (drive->dn > 3) /* FIXME: remove this --bkz */
return -1;
drive_pci = 0x60 + (drive->dn << 2);
if ((drive->type != ATA_DISK) && (speed < XFER_SW_DMA_0))
......@@ -372,48 +370,34 @@ static void pdc202xx_tune_drive(struct ata_device *drive, u8 pio)
}
#ifdef CONFIG_BLK_DEV_IDEDMA
/* FIXME: split this for old & new chipsets (jumpbit) --bkz */
static int config_chipset_for_dma(struct ata_device *drive, byte udma)
static int pdc202xx_tx_udma_setup(struct ata_device *drive, int map)
{
struct hd_driveid *id = drive->id;
struct ata_channel *ch = drive->channel;
u32 indexreg = ch->dma_base + 1;
u32 datareg = indexreg + 2;
u8 adj = (drive->dn % 2) ? 0x08 : 0x00;
if (drive->type != ATA_DISK)
return 0;
/* IORDY_EN & PREFETCH_EN */
if (id->capability & 4)
set_2regs(0x13, (IN_BYTE(datareg)|0x03));
return udma_generic_setup(drive, map);
}
static int pdc202xx_udma_setup(struct ata_device *drive, int map)
{
struct hd_driveid *id = drive->id;
struct hd_driveid *id = drive->id;
struct ata_channel *hwif = drive->channel;
struct hd_driveid *mate_id = hwif->drives[!(drive->dn%2)].id;
struct pci_dev *dev = hwif->pci_dev;
struct pci_dev *dev = hwif->pci_dev;
u32 high_16 = pci_resource_start(dev, 4);
u32 dma_base = hwif->dma_base;
u32 indexreg = dma_base + 1;
u32 datareg = dma_base + 3;
byte adj = (drive->dn%2) ? 0x08 : 0x00;
u8 jumpbit;
u32 drive_conf;
u8 drive_pci = 0, AP, tmp, mode = -1;
u8 CLKSPD, mask = hwif->unit ? 0x08 : 0x02;
int map;
/* UDMA 3, 4, 5 and 6 */
u8 needs_80w = (id->dma_ultra & 0x0078);
switch(dev->device) {
case PCI_DEVICE_ID_PROMISE_20267:
case PCI_DEVICE_ID_PROMISE_20265:
case PCI_DEVICE_ID_PROMISE_20262:
case PCI_DEVICE_ID_PROMISE_20246:
jumpbit = 0;
break;
default: /* chipsets newer then 20267 */
jumpbit = 1;
break;
}
/* FIXME: this check is wrong for 20246 --bkz */
/* */
if (needs_80w && !hwif->udma_four) {
printk(KERN_WARNING "%s: channel requires an 80-pin cable.\n", hwif->name);
printk(KERN_INFO "%s: reduced to UDMA(33) mode.\n", drive->name);
}
if (jumpbit)
goto chipset_is_set;
/*
* Set the control register to use the 66Mhz system
......@@ -427,7 +411,7 @@ static int config_chipset_for_dma(struct ata_device *drive, byte udma)
* FIXME: move this to pdc202xx_tuneproc()
* right now you can't downgrade from U66 to U33 --bkz
*/
if (needs_80w) {
if (id->dma_ultra & 0x0078) { /* UDMA 3, 4, 5 and 6 */
CLKSPD = IN_BYTE(high_16 + PDC_CLK);
/* check cable and mate (must be at least udma3 capable) */
if (!hwif->udma_four ||
......@@ -449,12 +433,11 @@ static int config_chipset_for_dma(struct ata_device *drive, byte udma)
/* FIXME: what if SYNC_ERRDY is enabled for slave
and disabled for master? --bkz */
pci_read_config_byte(dev, drive_pci, &AP);
/* enable SYNC_ERRDY for master and slave (if enabled for master) */
if (!(AP & SYNC_ERRDY_EN)) {
if (drive->dn == 0 || drive->dn == 2) {
/* enable SYNC_ERRDY for master */
if (!(drive->dn % 2)) {
pci_write_config_byte(dev, drive_pci, AP|SYNC_ERRDY_EN);
} else {
/* enable SYNC_ERRDY for slave if enabled for master */
pci_read_config_byte(dev, drive_pci - 4, &tmp);
if (tmp & SYNC_ERRDY_EN)
pci_write_config_byte(dev, drive_pci, AP|SYNC_ERRDY_EN);
......@@ -466,85 +449,26 @@ static int config_chipset_for_dma(struct ata_device *drive, byte udma)
if (drive->type != ATA_DISK)
return 0;
if (jumpbit) {
if (id->capability & 4) { /* IORDY_EN & PREFETCH_EN */
set_2regs(0x13, (IN_BYTE(datareg)|0x03));
}
} else {
pci_read_config_byte(dev, drive_pci, &AP);
if (id->capability & 4) /* IORDY_EN */
pci_write_config_byte(dev, drive_pci, AP|IORDY_EN);
pci_read_config_byte(dev, drive_pci, &AP);
if (drive->type == ATA_DISK) /* PREFETCH_EN */
pci_write_config_byte(dev, drive_pci, AP|PREFETCH_EN);
}
pci_read_config_byte(dev, drive_pci, &AP);
if (id->capability & 4) /* IORDY_EN */
pci_write_config_byte(dev, drive_pci, AP|IORDY_EN);
pci_read_config_byte(dev, drive_pci, &AP);
if (drive->type == ATA_DISK) /* PREFETCH_EN */
pci_write_config_byte(dev, drive_pci, AP|PREFETCH_EN);
if (udma) {
map = pdc202xx_ratemask(drive);
} else {
if (!jumpbit)
map = XFER_SWDMA | XFER_MWDMA;
else
map = XFER_MWDMA;
}
map = hwif->modes_map;
if (!eighty_ninty_three(drive))
map &= ~XFER_UDMA_80W;
mode = ata_timing_mode(drive, map);
if (mode < XFER_SW_DMA_0) {
/* restore original pci-config space */
if (!jumpbit)
pci_write_config_dword(dev, drive_pci, drive_conf);
pci_write_config_dword(dev, drive_pci, drive_conf);
return 0;
}
return !hwif->speedproc(drive, mode);
}
static int pdc202xx_udma_setup(struct ata_device *drive)
{
struct hd_driveid *id = drive->id;
struct ata_channel *hwif = drive->channel;
int on = 0;
int verbose = 1;
if (id && (id->capability & 1) && hwif->autodma) {
/* Consult the list of known "bad" drives */
verbose = 0;
if (udma_black_list(drive))
goto no_dma_set;
if (id->field_valid & 4) {
if (id->dma_ultra & 0x007F) {
/* Force if Capable UltraDMA */
on = config_chipset_for_dma(drive, 1);
if ((id->field_valid & 2) &&
(!on))
goto try_dma_modes;
}
} else if (id->field_valid & 2) {
try_dma_modes:
if ((id->dma_mword & 0x0007) ||
(id->dma_1word & 0x0007)) {
/* Force if Capable regular DMA modes */
on = config_chipset_for_dma(drive, 0);
if (!on)
goto no_dma_set;
}
} else if (udma_white_list(drive)) {
if (id->eide_dma_time > 150) {
goto no_dma_set;
}
/* Consult the list of known "good" drives */
on = config_chipset_for_dma(drive, 0);
if (!on)
goto no_dma_set;
} else goto no_dma_set;
} else if ((id->capability & 8) || (id->field_valid & 2)) {
no_dma_set:
pdc202xx_tune_drive(drive, 255);
}
udma_enable(drive, on, verbose);
return 0;
return udma_generic_setup(drive, map);
}
static void pdc202xx_udma_start(struct ata_device *drive, struct request *rq)
......@@ -725,12 +649,20 @@ static void __init ide_init_pdc202xx(struct ata_channel *hwif)
case PCI_DEVICE_ID_PROMISE_20269:
case PCI_DEVICE_ID_PROMISE_20268:
case PCI_DEVICE_ID_PROMISE_20268R:
hwif->udma_four = pdc202xx_tx_ata66_check(hwif);
hwif->speedproc = &pdc202xx_new_tune_chipset;
hwif->resetproc = &pdc202xx_new_reset;
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base)
hwif->udma_setup = pdc202xx_tx_udma_setup;
#endif
break;
case PCI_DEVICE_ID_PROMISE_20267:
case PCI_DEVICE_ID_PROMISE_20265:
case PCI_DEVICE_ID_PROMISE_20262:
hwif->udma_four = pdc202xx_ata66_check(hwif);
hwif->resetproc = &pdc202xx_reset;
#ifdef CONFIG_BLK_DEV_IDEDMA
/* we need special functions for lba48 */
......@@ -741,6 +673,10 @@ static void __init ide_init_pdc202xx(struct ata_channel *hwif)
#endif
/* FIXME: check whether 20246 works with lba48 --bkz */
case PCI_DEVICE_ID_PROMISE_20246:
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base)
hwif->udma_setup = pdc202xx_udma_setup;
#endif
hwif->speedproc = &pdc202xx_tune_chipset;
default:
break;
......@@ -748,18 +684,15 @@ static void __init ide_init_pdc202xx(struct ata_channel *hwif)
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base) {
hwif->modes_map = pdc202xx_modes_map(hwif);
hwif->udma_irq_lost = pdc202xx_bug;
hwif->udma_timeout = pdc202xx_bug;
hwif->udma_setup = pdc202xx_udma_setup;
hwif->highmem = 1;
if (!noautodma)
hwif->autodma = 1;
} else
#endif
{
hwif->drives[0].autotune = 1;
hwif->drives[1].autotune = 1;
hwif->autodma = 0;
}
}
......@@ -770,7 +703,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_PROMISE,
device: PCI_DEVICE_ID_PROMISE_20246,
init_chipset: pdc202xx_init_chipset,
ata66_check: NULL,
init_channel: ide_init_pdc202xx,
#ifndef CONFIG_PDC202XX_FORCE
enablebits: {{0x50,0x02,0x02}, {0x50,0x04,0x04}},
......@@ -783,7 +715,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_PROMISE,
device: PCI_DEVICE_ID_PROMISE_20262,
init_chipset: pdc202xx_init_chipset,
ata66_check: pdc202xx_ata66_check,
init_channel: ide_init_pdc202xx,
#ifndef CONFIG_PDC202XX_FORCE
enablebits: {{0x50,0x02,0x02}, {0x50,0x04,0x04}},
......@@ -796,7 +727,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_PROMISE,
device: PCI_DEVICE_ID_PROMISE_20265,
init_chipset: pdc202xx_init_chipset,
ata66_check: pdc202xx_ata66_check,
init_channel: ide_init_pdc202xx,
#ifndef CONFIG_PDC202XX_FORCE
enablebits: {{0x50,0x02,0x02}, {0x50,0x04,0x04}},
......@@ -811,7 +741,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_PROMISE,
device: PCI_DEVICE_ID_PROMISE_20267,
init_chipset: pdc202xx_init_chipset,
ata66_check: pdc202xx_ata66_check,
init_channel: ide_init_pdc202xx,
#ifndef CONFIG_PDC202XX_FORCE
enablebits: {{0x50,0x02,0x02}, {0x50,0x04,0x04}},
......@@ -824,7 +753,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_PROMISE,
device: PCI_DEVICE_ID_PROMISE_20268,
init_chipset: pdc202xx_tx_init_chipset,
ata66_check: pdc202xx_tx_ata66_check,
init_channel: ide_init_pdc202xx,
bootable: OFF_BOARD,
flags: ATA_F_IRQ | ATA_F_DMA
......@@ -837,7 +765,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_PROMISE,
device: PCI_DEVICE_ID_PROMISE_20268R,
init_chipset: pdc202xx_tx_init_chipset,
ata66_check: pdc202xx_tx_ata66_check,
init_channel: ide_init_pdc202xx,
bootable: OFF_BOARD,
flags: ATA_F_IRQ | ATA_F_DMA
......@@ -846,7 +773,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_PROMISE,
device: PCI_DEVICE_ID_PROMISE_20269,
init_chipset: pdc202xx_tx_init_chipset,
ata66_check: pdc202xx_tx_ata66_check,
init_channel: ide_init_pdc202xx,
bootable: OFF_BOARD,
flags: ATA_F_IRQ | ATA_F_DMA
......@@ -855,7 +781,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_PROMISE,
device: PCI_DEVICE_ID_PROMISE_20275,
init_chipset: pdc202xx_tx_init_chipset,
ata66_check: pdc202xx_tx_ata66_check,
init_channel: ide_init_pdc202xx,
bootable: OFF_BOARD,
flags: ATA_F_IRQ | ATA_F_DMA
......@@ -864,7 +789,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_PROMISE,
device: PCI_DEVICE_ID_PROMISE_20276,
init_chipset: pdc202xx_tx_init_chipset,
ata66_check: pdc202xx_tx_ata66_check,
init_channel: ide_init_pdc202xx,
bootable: OFF_BOARD,
flags: ATA_F_IRQ | ATA_F_DMA
......
......@@ -230,7 +230,7 @@ int __init setup_pdc4030(struct ata_channel *hwif)
struct ata_channel *hwif2;
struct dc_ident ident;
int i;
ide_startstop_t startstop;
ide_startstop_t ret;
if (!hwif)
return 0;
......@@ -250,8 +250,8 @@ int __init setup_pdc4030(struct ata_channel *hwif)
/* FIXME: Make this go away. */
spin_lock_irq(hwif->lock);
if (ata_status_poll(drive, DATA_READY, BAD_W_STAT,
WAIT_DRQ, NULL, &startstop)) {
ret = ata_status_poll(drive, DATA_READY, BAD_W_STAT, WAIT_DRQ, NULL);
if (ret != ATA_OP_READY) {
printk(KERN_INFO
"%s: Failed Promise read config!\n",hwif->name);
spin_unlock_irq(hwif->lock);
......@@ -414,10 +414,8 @@ static ide_startstop_t promise_read_intr(struct ata_device *drive, struct reques
rq->errors = 0;
rq->nr_sectors -= nsect;
total_remaining = rq->nr_sectors;
if ((rq->current_nr_sectors -= nsect) <= 0) {
/* FIXME: no queue locking above! */
ata_end_request(drive, rq, 1);
}
if ((rq->current_nr_sectors -= nsect) <= 0)
__ata_end_request(drive, rq, 1, 0);
/*
* Now the data has been read in, do the following:
......@@ -437,27 +435,18 @@ static ide_startstop_t promise_read_intr(struct ata_device *drive, struct reques
if (drive->status & DRQ_STAT)
goto read_again;
if (drive->status & BUSY_STAT) {
unsigned long flags;
struct ata_channel *ch = drive->channel;
/* FIXME: this locking should encompass the above register
* file access too.
*/
spin_lock_irqsave(ch->lock, flags);
ata_set_handler(drive, promise_read_intr, WAIT_CMD, NULL);
spin_unlock_irqrestore(ch->lock, flags);
#ifdef DEBUG_READ
printk(KERN_DEBUG "%s: promise_read: waiting for"
"interrupt\n", drive->name);
#endif
return ide_started;
return ATA_OP_CONTINUES;
}
printk(KERN_ERR "%s: Eeek! promise_read_intr: sectors left "
"!DRQ !BUSY\n", drive->name);
return ata_error(drive, rq, "promise read intr");
}
return ide_stopped;
return ATA_OP_FINISHED;
}
/*
......@@ -470,20 +459,13 @@ static ide_startstop_t promise_read_intr(struct ata_device *drive, struct reques
*/
static ide_startstop_t promise_complete_pollfunc(struct ata_device *drive, struct request *rq)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
if (!ata_status(drive, 0, BUSY_STAT)) {
if (time_before(jiffies, ch->poll_timeout)) {
/* FIXME: this locking should encompass the above
* register file access too.
*/
spin_lock_irqsave(ch->lock, flags);
ata_set_handler(drive, promise_complete_pollfunc, HZ/100, NULL);
spin_unlock_irqrestore(ch->lock, flags);
return ide_started; /* continue polling... */
return ATA_OP_CONTINUES; /* continue polling... */
}
ch->poll_timeout = 0;
printk(KERN_ERR "%s: completion timeout - still busy!\n",
......@@ -495,15 +477,9 @@ static ide_startstop_t promise_complete_pollfunc(struct ata_device *drive, struc
#ifdef DEBUG_WRITE
printk(KERN_DEBUG "%s: Write complete - end_request\n", drive->name);
#endif
/* FIXME: this locking should encompass the above
* register file access too.
*/
spin_lock_irqsave(ch->lock, flags);
__ata_end_request(drive, rq, 1, rq->nr_sectors);
spin_unlock_irqrestore(ch->lock, flags);
return ide_stopped;
return ATA_OP_FINISHED;
}
/*
......@@ -563,21 +539,17 @@ int promise_multwrite(struct ata_device *drive, struct request *rq, unsigned int
*/
static ide_startstop_t promise_write_pollfunc(struct ata_device *drive, struct request *rq)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
spin_lock_irqsave(ch->lock, flags);
if (inb(IDE_NSECTOR_REG) != 0) {
if (time_before(jiffies, ch->poll_timeout)) {
ata_set_handler(drive, promise_write_pollfunc, HZ/100, NULL);
spin_unlock_irqrestore(ch->lock, flags);
return ide_started; /* continue polling... */
return ATA_OP_CONTINUES; /* continue polling... */
}
ch->poll_timeout = 0;
printk(KERN_ERR "%s: write timed out!\n", drive->name);
ata_status(drive, 0, 0);
spin_unlock_irqrestore(ch->lock, flags);
return ata_error(drive, rq, "write timeout");
}
......@@ -592,8 +564,8 @@ static ide_startstop_t promise_write_pollfunc(struct ata_device *drive, struct r
printk(KERN_DEBUG "%s: Done last 4 sectors - status = %02x\n",
drive->name, drive->status);
#endif
spin_unlock_irqrestore(ch->lock, flags);
return ide_started;
return ATA_OP_CONTINUES;
}
/*
......@@ -605,7 +577,6 @@ static ide_startstop_t promise_write_pollfunc(struct ata_device *drive, struct r
*/
static ide_startstop_t promise_do_write(struct ata_device *drive, struct request *rq)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
#ifdef DEBUG_WRITE
......@@ -613,43 +584,35 @@ static ide_startstop_t promise_do_write(struct ata_device *drive, struct request
"buffer=%p\n", drive->name, rq->sector,
rq->sector + rq->nr_sectors - 1, rq->buffer);
#endif
/* FIXME: this locking should encompass the above register
* file access too.
*/
spin_lock_irqsave(ch->lock, flags);
/*
* If there are more than 4 sectors to transfer, do n-4 then go into
* the polling strategy as defined above.
*/
if (rq->nr_sectors > 4) {
if (promise_multwrite(drive, rq, rq->nr_sectors - 4)) {
spin_unlock_irqrestore(ch->lock, flags);
return ide_stopped;
return ATA_OP_FINISHED;
}
ch->poll_timeout = jiffies + WAIT_WORSTCASE;
ata_set_handler(drive, promise_write_pollfunc, HZ/100, NULL);
spin_unlock_irqrestore(ch->lock, flags);
return ide_started;
return ATA_OP_CONTINUES;
} else {
/*
* There are 4 or fewer sectors to transfer, do them all in one go
* and wait for NOT BUSY.
*/
if (promise_multwrite(drive, rq, rq->nr_sectors)) {
spin_unlock_irqrestore(ch->lock, flags);
return ide_stopped;
}
if (promise_multwrite(drive, rq, rq->nr_sectors))
return ATA_OP_FINISHED;
ch->poll_timeout = jiffies + WAIT_WORSTCASE;
ata_set_handler(drive, promise_complete_pollfunc, HZ/100, NULL);
spin_unlock_irqrestore(ch->lock, flags);
#ifdef DEBUG_WRITE
printk(KERN_DEBUG "%s: promise_write: <= 4 sectors, "
"status = %02x\n", drive->name, drive->status);
#endif
return ide_started;
return ATA_OP_CONTINUES;
}
}
......@@ -666,9 +629,9 @@ ide_startstop_t do_pdc4030_io(struct ata_device *drive, struct ata_taskfile *arg
/* Check that it's a regular command. If not, bomb out early. */
if (!(rq->flags & REQ_CMD)) {
blk_dump_rq_flags(rq, "pdc4030 bad flags");
ata_end_request(drive, rq, 0);
__ata_end_request(drive, rq, 0, 0);
return ide_stopped;
return ATA_OP_FINISHED;
}
ata_irq_enable(drive, 1);
......@@ -701,34 +664,23 @@ ide_startstop_t do_pdc4030_io(struct ata_device *drive, struct ata_taskfile *arg
return promise_read_intr(drive, rq);
}
if (inb(IDE_SELECT_REG) & 0x01) {
unsigned long flags;
struct ata_channel *ch = drive->channel;
/* FIXME: this locking should encompass the above register
* file access too.
*/
spin_lock_irqsave(ch->lock, flags);
#ifdef DEBUG_READ
printk(KERN_DEBUG "%s: read: waiting for "
"interrupt\n", drive->name);
#endif
ata_set_handler(drive, promise_read_intr, WAIT_CMD, NULL);
spin_unlock_irqrestore(ch->lock, flags);
return ide_started;
return ATA_OP_CONTINUES;
}
udelay(1);
} while (time_before(jiffies, timeout));
printk(KERN_ERR "%s: reading: No DRQ and not waiting - Odd!\n",
drive->name);
return ide_stopped;
return ATA_OP_FINISHED;
case WRITE: {
ide_startstop_t startstop;
unsigned long flags;
struct ata_channel *ch = drive->channel;
ide_startstop_t ret;
/*
* Strategy on write is: look for the DRQ that should have been
......@@ -740,29 +692,25 @@ ide_startstop_t do_pdc4030_io(struct ata_device *drive, struct ata_taskfile *arg
* completion must be polled
*/
/* FIXME: Move this lock upwards.
*/
spin_lock_irqsave(ch->lock, flags);
if (ata_status_poll(drive, DATA_READY, drive->bad_wstat,
WAIT_DRQ, rq, &startstop )) {
ret = ata_status_poll(drive, DATA_READY, drive->bad_wstat,
WAIT_DRQ, rq);
if (ret != ATA_OP_READY) {
printk(KERN_ERR "%s: no DRQ after issuing "
"PROMISE_WRITE\n", drive->name);
spin_unlock_irqrestore(ch->lock, flags);
return startstop;
return ret;
}
if (!drive->channel->unmask)
__cli(); /* local CPU only */
spin_unlock_irqrestore(ch->lock, flags);
return promise_do_write(drive, rq);
}
default:
printk(KERN_ERR "pdc4030: command not READ or WRITE! Huh?\n");
/* FIXME: This should already run under the lock. */
ata_end_request(drive, rq, 0);
return ide_stopped;
__ata_end_request(drive, rq, 0, 0);
return ATA_OP_FINISHED;
}
}
......
......@@ -244,26 +244,18 @@ static void piix_tune_drive(struct ata_device *drive, unsigned char pio)
}
#ifdef CONFIG_BLK_DEV_IDEDMA
static int piix_udma_setup(struct ata_device *drive)
static int __init piix_modes_map(struct ata_channel *ch)
{
short w80 = drive->channel->udma_four;
short speed = ata_timing_mode(drive,
XFER_PIO | XFER_EPIO |
(piix_config->flags & PIIX_NODMA ? 0 : (XFER_SWDMA | XFER_MWDMA |
(piix_config->flags & PIIX_UDMA ? XFER_UDMA : 0) |
(w80 && (piix_config->flags & PIIX_UDMA) >= PIIX_UDMA_66 ? XFER_UDMA_66 : 0) |
(w80 && (piix_config->flags & PIIX_UDMA) >= PIIX_UDMA_100 ? XFER_UDMA_100 : 0) |
(w80 && (piix_config->flags & PIIX_UDMA) >= PIIX_UDMA_133 ? XFER_UDMA_133 : 0))));
piix_set_drive(drive, speed);
udma_enable(drive, drive->channel->autodma && (speed & XFER_MODE) != XFER_PIO, 0);
return 0;
short w80 = ch->udma_four;
int map = XFER_EPIO |
(piix_config->flags & PIIX_NODMA ? 0 : (XFER_SWDMA | XFER_MWDMA |
(piix_config->flags & PIIX_UDMA ? XFER_UDMA : 0) |
(w80 && (piix_config->flags & PIIX_UDMA) >= PIIX_UDMA_66 ? XFER_UDMA_66 : 0) |
(w80 && (piix_config->flags & PIIX_UDMA) >= PIIX_UDMA_100 ? XFER_UDMA_100 : 0) |
(w80 && (piix_config->flags & PIIX_UDMA) >= PIIX_UDMA_133 ? XFER_UDMA_133 : 0)));
return map;
}
#endif
/*
......@@ -360,9 +352,10 @@ static void __init piix_init_channel(struct ata_channel *ch)
{
int i;
ch->udma_four = piix_ata66_check(ch);
ch->tuneproc = &piix_tune_drive;
ch->speedproc = &piix_set_drive;
ch->autodma = 0;
ch->io_32bit = 1;
ch->unmask = 1;
for (i = 0; i < 2; i++) {
......@@ -373,11 +366,8 @@ static void __init piix_init_channel(struct ata_channel *ch)
#ifdef CONFIG_BLK_DEV_IDEDMA
if (ch->dma_base) {
ch->highmem = 1;
ch->udma_setup = piix_udma_setup;
# ifdef CONFIG_IDEDMA_AUTO
if (!noautodma)
ch->autodma = 1;
# endif
ch->modes_map = piix_modes_map(ch);
ch->udma_setup = udma_generic_setup;
}
#endif
}
......@@ -401,7 +391,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_INTEL,
device: PCI_DEVICE_ID_INTEL_82371FB_1,
init_chipset: piix_init_chipset,
ata66_check: piix_ata66_check,
init_channel: piix_init_channel,
init_dma: piix_init_dma,
enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}},
......@@ -411,7 +400,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_INTEL,
device: PCI_DEVICE_ID_INTEL_82371SB_1,
init_chipset: piix_init_chipset,
ata66_check: piix_ata66_check,
init_channel: piix_init_channel,
init_dma: piix_init_dma,
enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}},
......@@ -421,7 +409,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_INTEL,
device: PCI_DEVICE_ID_INTEL_82371AB,
init_chipset: piix_init_chipset,
ata66_check: piix_ata66_check,
init_channel: piix_init_channel,
init_dma: piix_init_dma,
enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}},
......@@ -431,7 +418,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_INTEL,
device: PCI_DEVICE_ID_INTEL_82443MX_1,
init_chipset: piix_init_chipset,
ata66_check: piix_ata66_check,
init_channel: piix_init_channel,
init_dma: piix_init_dma,
enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}},
......@@ -441,7 +427,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_INTEL,
device: PCI_DEVICE_ID_INTEL_82372FB_1,
init_chipset: piix_init_chipset,
ata66_check: piix_ata66_check,
init_channel: piix_init_channel,
init_dma: piix_init_dma,
enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}},
......@@ -451,7 +436,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_INTEL,
device: PCI_DEVICE_ID_INTEL_82801AA_1,
init_chipset: piix_init_chipset,
ata66_check: piix_ata66_check,
init_channel: piix_init_channel,
init_dma: piix_init_dma,
enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}},
......@@ -461,7 +445,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_INTEL,
device: PCI_DEVICE_ID_INTEL_82801AB_1,
init_chipset: piix_init_chipset,
ata66_check: piix_ata66_check,
init_channel: piix_init_channel,
init_dma: piix_init_dma,
enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}},
......@@ -471,7 +454,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_INTEL,
device: PCI_DEVICE_ID_INTEL_82801BA_9,
init_chipset: piix_init_chipset,
ata66_check: piix_ata66_check,
init_channel: piix_init_channel,
init_dma: piix_init_dma,
enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}},
......@@ -481,7 +463,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_INTEL,
device: PCI_DEVICE_ID_INTEL_82801BA_8,
init_chipset: piix_init_chipset,
ata66_check: piix_ata66_check,
init_channel: piix_init_channel,
init_dma: piix_init_dma,
enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}},
......@@ -491,7 +472,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_INTEL,
device: PCI_DEVICE_ID_INTEL_82801E_9,
init_chipset: piix_init_chipset,
ata66_check: piix_ata66_check,
init_channel: piix_init_channel,
init_dma: piix_init_dma,
enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}},
......@@ -501,7 +481,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_INTEL,
device: PCI_DEVICE_ID_INTEL_82801CA_10,
init_chipset: piix_init_chipset,
ata66_check: piix_ata66_check,
init_channel: piix_init_channel,
init_dma: piix_init_dma,
enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}},
......@@ -511,7 +490,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_INTEL,
device: PCI_DEVICE_ID_INTEL_82801CA_11,
init_chipset: piix_init_chipset,
ata66_check: piix_ata66_check,
init_channel: piix_init_channel,
init_dma: piix_init_dma,
enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}},
......@@ -521,7 +499,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_INTEL,
device: PCI_DEVICE_ID_INTEL_82801DB_9,
init_chipset: piix_init_chipset,
ata66_check: piix_ata66_check,
init_channel: piix_init_channel,
init_dma: piix_init_dma,
enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}},
......@@ -531,7 +508,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_EFAR,
device: PCI_DEVICE_ID_EFAR_SLC90E66_1,
init_chipset: piix_init_chipset,
ata66_check: piix_ata66_check,
init_channel: piix_init_channel,
init_dma: piix_init_dma,
enablebits: {{0x41,0x80,0x80}, {0x43,0x80,0x80}},
......
......@@ -312,8 +312,7 @@ byte eighty_ninty_three(struct ata_device *drive)
int ide_config_drive_speed(struct ata_device *drive, byte speed)
{
struct ata_channel *ch = drive->channel;
int i;
int error = 1;
int ret;
#if defined(CONFIG_BLK_DEV_IDEDMA) && !defined(__CRIS__)
u8 unit = (drive->select.b.unit & 0x01);
......@@ -338,33 +337,14 @@ int ide_config_drive_speed(struct ata_device *drive, byte speed)
if (drive->quirk_list == 2)
ata_irq_enable(drive, 1);
udelay(1);
/* FIXME: use ata_status_poll() --bkz */
ata_busy_poll(drive, WAIT_CMD);
/*
* Allow status to settle, then read it again.
* A few rare drives vastly violate the 400ns spec here,
* so we'll wait up to 10usec for a "good" status
* rather than expensively fail things immediately.
* This fix courtesy of Matthew Faupel & Niccolo Rigacci.
*/
for (i = 0; i < 10; i++) {
udelay(1);
if (ata_status(drive, DRIVE_READY, BUSY_STAT | DRQ_STAT | ERR_STAT)) {
error = 0;
break;
}
}
ret = ata_status_poll(drive, 0, BUSY_STAT, WAIT_CMD, NULL);
ata_mask(drive);
enable_irq(ch->irq);
if (error) {
if (ret != ATA_OP_READY) {
ata_dump(drive, NULL, "set drive speed");
return error;
return 1;
}
drive->id->dma_ultra &= ~0xFF00;
......@@ -399,7 +379,7 @@ int ide_config_drive_speed(struct ata_device *drive, byte speed)
drive->current_speed = speed;
return error;
return 0;
}
static inline void do_identify(struct ata_device *drive, u8 cmd)
......
......@@ -103,15 +103,11 @@ static u8 svwks_revision;
static struct pci_dev *isa_dev;
static int svwks_ratemask(struct ata_device *drive)
static int __init svwks_modes_map(struct ata_channel *ch)
{
struct pci_dev *dev = drive->channel->pci_dev;
int map = 0;
int map = XFER_EPIO | XFER_MWDMA;
if (!eighty_ninty_three(drive))
return XFER_UDMA;
switch(dev->device) {
switch(ch->pci_dev->device) {
case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
if (svwks_revision >= SVWKS_CSB5_REVISION_NEW)
map |= XFER_UDMA_100;
......@@ -120,6 +116,7 @@ static int svwks_ratemask(struct ata_device *drive)
map |= XFER_UDMA;
break;
}
return map;
}
......@@ -176,6 +173,7 @@ static int svwks_tune_chipset(struct ata_device *drive, u8 speed)
csb5_pio |= ((speed - XFER_PIO_0) << (4*drive->dn));
break;
/* FIXME: check SWDMA modes --bkz */
#ifdef CONFIG_BLK_DEV_IDEDMA
case XFER_MW_DMA_2:
case XFER_MW_DMA_1:
......@@ -224,79 +222,13 @@ static int svwks_tune_chipset(struct ata_device *drive, u8 speed)
return ide_config_drive_speed(drive, speed);
}
/* FIXME: pio == 255 -> ata_best_pio_mode(drive) --bkz */
static void svwks_tune_drive(struct ata_device *drive, u8 pio)
{
(void) svwks_tune_chipset(drive, XFER_PIO_0 + min_t(u8, pio, 4));
}
#ifdef CONFIG_BLK_DEV_IDEDMA
static int config_chipset_for_dma(struct ata_device *drive)
{
int map;
u8 mode;
/* FIXME: check SWDMA modes --bkz */
map = XFER_MWDMA | svwks_ratemask(drive);
mode = ata_timing_mode(drive, map);
return !svwks_tune_chipset(drive, mode);
}
static int svwks_udma_setup(struct ata_device *drive)
{
struct hd_driveid *id = drive->id;
int on = 1;
int verbose = 1;
if (id && (id->capability & 1) && drive->channel->autodma) {
/* Consult the list of known "bad" drives */
if (udma_black_list(drive)) {
on = 0;
goto fast_ata_pio;
}
on = 0;
verbose = 0;
if (id->field_valid & 4) {
if (id->dma_ultra & 0x003F) {
/* Force if Capable UltraDMA */
on = config_chipset_for_dma(drive);
if ((id->field_valid & 2) &&
(!on))
goto try_dma_modes;
}
} else if (id->field_valid & 2) {
try_dma_modes:
if ((id->dma_mword & 0x0007) ||
(id->dma_1word & 0x007)) {
/* Force if Capable regular DMA modes */
on = config_chipset_for_dma(drive);
if (!on)
goto no_dma_set;
}
} else if (udma_white_list(drive)) {
if (id->eide_dma_time > 150) {
goto no_dma_set;
}
/* Consult the list of known "good" drives */
on = config_chipset_for_dma(drive);
if (!on)
goto no_dma_set;
} else {
goto fast_ata_pio;
}
} else if ((id->capability & 8) || (id->field_valid & 2)) {
fast_ata_pio:
on = 0;
verbose = 0;
no_dma_set:
svwks_tune_chipset(drive, ata_best_pio_mode(drive));
}
udma_enable(drive, on, verbose);
return 0;
}
static int svwks_udma_stop(struct ata_device *drive)
{
struct ata_channel *ch = drive->channel;
......@@ -437,24 +369,21 @@ static void __init ide_init_svwks(struct ata_channel *hwif)
if (!hwif->irq)
hwif->irq = hwif->unit ? 15 : 14;
hwif->udma_four = svwks_ata66_check(hwif);
hwif->tuneproc = &svwks_tune_drive;
hwif->speedproc = &svwks_tune_chipset;
#ifndef CONFIG_BLK_DEV_IDEDMA
hwif->drives[0].autotune = 1;
hwif->drives[1].autotune = 1;
hwif->autodma = 0;
#else
if (hwif->dma_base) {
#ifdef CONFIG_IDEDMA_AUTO
if (!noautodma)
hwif->autodma = 1;
#endif
hwif->modes_map = svwks_modes_map(hwif);
hwif->udma_setup = udma_generic_setup;
hwif->udma_stop = svwks_udma_stop;
hwif->udma_setup = svwks_udma_setup;
hwif->highmem = 1;
} else {
hwif->autodma = 0;
hwif->drives[0].autotune = 1;
hwif->drives[1].autotune = 1;
}
......@@ -468,7 +397,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_SERVERWORKS,
device: PCI_DEVICE_ID_SERVERWORKS_OSB4IDE,
init_chipset: svwks_init_chipset,
ata66_check: svwks_ata66_check,
init_channel: ide_init_svwks,
bootable: ON_BOARD,
flags: ATA_F_DMA
......@@ -477,7 +405,6 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_SERVERWORKS,
device: PCI_DEVICE_ID_SERVERWORKS_CSB5IDE,
init_chipset: svwks_init_chipset,
ata66_check: svwks_ata66_check,
init_channel: ide_init_svwks,
bootable: ON_BOARD,
flags: ATA_F_SIMPLEX
......
......@@ -207,9 +207,9 @@ static byte cycle_time_value[][XFER_UDMA_5 - XFER_UDMA_0 + 1] = {
static struct pci_dev *host_dev = NULL;
static int sis5513_ratemask(struct ata_device *drive)
static int __init sis5513_modes_map(struct ata_channel *ch)
{
int map = 0;
int map = XFER_EPIO | XFER_SWDMA | XFER_MWDMA;
switch(chipset_family) {
case ATA_133: /* map |= XFER_UDMA_133; */
......@@ -221,15 +221,8 @@ static int sis5513_ratemask(struct ata_device *drive)
case ATA_33:
map |= XFER_UDMA;
break;
case ATA_16:
case ATA_00:
default:
return 0;
}
if (!eighty_ninty_three(drive))
return XFER_UDMA;
return map;
}
......@@ -280,9 +273,6 @@ static int config_art_rwp_pio(struct ata_device *drive, u8 pio)
drive->dn, pio, timing);
#endif
if (drive->dn > 3) /* FIXME: remove this --bkz */
return 1;
drive_pci = 0x40 + (drive->dn << 1);
/* register layout changed with newer ATA100 chips */
......@@ -404,86 +394,6 @@ static void sis5513_tune_drive(struct ata_device *drive, u8 pio)
(void)config_art_rwp_pio(drive, min_t(u8, pio, 4));
}
#ifdef CONFIG_BLK_DEV_IDEDMA
static int config_chipset_for_dma(struct ata_device *drive, u8 udma)
{
int map;
u8 mode;
#ifdef DEBUG
printk("SIS5513: config_chipset_for_dma, drive %d, udma %d\n",
drive->dn, udma);
#endif
if (udma)
map = sis5513_ratemask(drive);
else
map = XFER_SWDMA | XFER_MWDMA;
mode = ata_timing_mode(drive, map);
if (mode < XFER_SW_DMA_0)
return 0;
return !sis5513_tune_chipset(drive, mode);
}
static int sis5513_udma_setup(struct ata_device *drive)
{
struct hd_driveid *id = drive->id;
int on = 0;
int verbose = 1;
config_drive_art_rwp(drive);
sis5513_tune_drive(drive, 255);
if (id && (id->capability & 1) && drive->channel->autodma) {
/* Consult the list of known "bad" drives */
if (udma_black_list(drive)) {
on = 0;
goto fast_ata_pio;
}
on = 0;
verbose = 0;
if (id->field_valid & 4) {
if (id->dma_ultra & 0x003F) {
/* Force if Capable UltraDMA */
on = config_chipset_for_dma(drive, 1);
if ((id->field_valid & 2) &&
(!on))
goto try_dma_modes;
}
} else if (id->field_valid & 2) {
try_dma_modes:
if ((id->dma_mword & 0x0007) ||
(id->dma_1word & 0x0007)) {
/* Force if Capable regular DMA modes */
on = config_chipset_for_dma(drive, 0);
if (!on)
goto no_dma_set;
}
} else if ((udma_white_list(drive)) &&
(id->eide_dma_time > 150)) {
/* Consult the list of known "good" drives */
on = config_chipset_for_dma(drive, 0);
if (!on)
goto no_dma_set;
} else {
goto fast_ata_pio;
}
} else if ((id->capability & 8) || (id->field_valid & 2)) {
fast_ata_pio:
on = 0;
verbose = 0;
no_dma_set:
sis5513_tune_drive(drive, 255);
}
udma_enable(drive, on, verbose);
return 0;
}
#endif
/* Chip detection and general config */
static unsigned int __init pci_init_sis5513(struct pci_dev *dev)
{
......@@ -576,25 +486,19 @@ static void __init ide_init_sis5513(struct ata_channel *hwif)
hwif->irq = hwif->unit ? 15 : 14;
hwif->udma_four = ata66_sis5513(hwif);
hwif->tuneproc = &sis5513_tune_drive;
hwif->speedproc = &sis5513_tune_chipset;
if (!(hwif->dma_base))
return;
if (host_dev) {
#ifdef CONFIG_BLK_DEV_IDEDMA
if (chipset_family > ATA_16) {
hwif->autodma = noautodma ? 0 : 1;
hwif->highmem = 1;
hwif->udma_setup = sis5513_udma_setup;
} else {
#endif
hwif->autodma = 0;
#ifdef CONFIG_BLK_DEV_IDEDMA
}
#endif
if (hwif->dma_base && host_dev && chipset_family > ATA_16) {
hwif->highmem = 1;
hwif->modes_map = sis5513_modes_map(hwif);
hwif->udma_setup = udma_generic_setup;
}
#endif
return;
}
......@@ -604,11 +508,9 @@ static struct ata_pci_device chipset __initdata = {
vendor: PCI_VENDOR_ID_SI,
device: PCI_DEVICE_ID_SI_5513,
init_chipset: pci_init_sis5513,
ata66_check: ata66_sis5513,
init_channel: ide_init_sis5513,
enablebits: {{0x4a,0x02,0x02}, {0x4a,0x04,0x04} },
bootable: ON_BOARD,
flags: ATA_F_NOADMA
};
int __init init_sis5513(void)
......
......@@ -130,7 +130,7 @@ static int config_for_dma(struct ata_device *drive)
* Check to see if the drive and
* chipset is capable of DMA mode
*/
static int sl82c105_dma_setup(struct ata_device *drive)
static int sl82c105_dma_setup(struct ata_device *drive, int map)
{
int on = 0;
......@@ -333,7 +333,6 @@ static void __init sl82c105_init_dma(struct ata_channel *ch, unsigned long dma_b
dma_state &= ~0x60;
} else {
dma_state |= 0x60;
ch->autodma = 1;
}
outb(dma_state, dma_base + 2);
......
......@@ -57,22 +57,23 @@ static ide_startstop_t service(struct ata_device *drive, struct request *rq);
static ide_startstop_t tcq_nop_handler(struct ata_device *drive, struct request *rq)
{
struct ata_taskfile *args = rq->special;
unsigned long flags;
struct ata_taskfile *args = rq->special;
struct ata_channel *ch = drive->channel;
ide__sti();
spin_lock_irqsave(drive->channel->lock, flags);
spin_lock_irqsave(ch->lock, flags);
blkdev_dequeue_request(rq);
drive->rq = NULL;
end_that_request_last(rq);
spin_unlock_irqrestore(drive->channel->lock, flags);
spin_unlock_irqrestore(ch->lock, flags);
kfree(args);
return ide_stopped;
return ATA_OP_FINISHED;
}
/*
......@@ -116,9 +117,9 @@ static void tcq_invalidate_queue(struct ata_device *drive)
goto out;
}
rq = blk_get_request(&drive->queue, READ, GFP_ATOMIC);
rq = __blk_get_request(&drive->queue, READ);
if (!rq)
rq = blk_get_request(&drive->queue, WRITE, GFP_ATOMIC);
rq = __blk_get_request(&drive->queue, WRITE);
/*
* blk_queue_invalidate_tags() just added back at least one command
......@@ -126,6 +127,8 @@ static void tcq_invalidate_queue(struct ata_device *drive)
*/
BUG_ON(!rq);
/* WIN_NOP is a special request so set it's flags ?? */
rq->flags = REQ_SPECIAL;
rq->special = ar;
ar->cmd = WIN_NOP;
ar->XXX_handler = tcq_nop_handler;
......@@ -168,7 +171,7 @@ static void ata_tcq_irq_timeout(unsigned long data)
* if pending commands, try service before giving up
*/
if (ata_pending_commands(drive) && !ata_status(drive, 0, SERVICE_STAT))
if (service(drive, drive->rq) == ide_started)
if (service(drive, drive->rq) == ATA_OP_CONTINUES)
return;
if (drive)
......@@ -228,13 +231,10 @@ static ide_startstop_t udma_tcq_start(struct ata_device *drive, struct request *
* and it must have reported a need for service (status has SERVICE_STAT set)
*
* Also, nIEN must be set as not to need protection against ide_dmaq_intr
*
* Channel lock should be held.
*/
static ide_startstop_t service(struct ata_device *drive, struct request *rq)
{
struct ata_channel *ch = drive->channel;
ide_startstop_t ret;
unsigned long flags;
u8 feat, stat;
int tag;
......@@ -246,7 +246,7 @@ static ide_startstop_t service(struct ata_device *drive, struct request *rq)
* handler, refuse to do anything.
*/
if (test_bit(IDE_DMA, drive->channel->active))
return ide_stopped;
return ATA_OP_FINISHED;
/*
* need to select the right drive first...
......@@ -266,7 +266,7 @@ static ide_startstop_t service(struct ata_device *drive, struct request *rq)
ata_dump(drive, rq, "BUSY clear took too long");
tcq_invalidate_queue(drive);
return ide_stopped;
return ATA_OP_FINISHED;
}
#ifdef IDE_TCQ_NIEN
......@@ -280,7 +280,7 @@ static ide_startstop_t service(struct ata_device *drive, struct request *rq)
ata_dump(drive, rq, "ERR condition");
tcq_invalidate_queue(drive);
return ide_stopped;
return ATA_OP_FINISHED;
}
/*
......@@ -289,7 +289,7 @@ static ide_startstop_t service(struct ata_device *drive, struct request *rq)
if ((feat = GET_FEAT()) & NSEC_REL) {
drive->rq = NULL;
printk("%s: release in service\n", drive->name);
return ide_stopped;
return ATA_OP_FINISHED;
}
tag = feat >> 3;
......@@ -302,20 +302,19 @@ static ide_startstop_t service(struct ata_device *drive, struct request *rq)
if (!rq) {
printk(KERN_ERR"%s: missing request for tag %d\n", __FUNCTION__, tag);
spin_unlock_irqrestore(ch->lock, flags);
return ide_stopped;
return ATA_OP_FINISHED;
}
drive->rq = rq;
spin_unlock_irqrestore(ch->lock, flags);
/*
* we'll start a dma read or write, device will trigger
* interrupt to indicate end of transfer, release is not allowed
*/
TCQ_PRINTK("%s: starting command %x\n", __FUNCTION__, stat);
ret = udma_tcq_start(drive, rq);
spin_unlock_irqrestore(ch->lock, flags);
return ret;
return udma_tcq_start(drive, rq);
}
static ide_startstop_t check_service(struct ata_device *drive, struct request *rq)
......@@ -323,7 +322,7 @@ static ide_startstop_t check_service(struct ata_device *drive, struct request *r
TCQ_PRINTK("%s: %s\n", drive->name, __FUNCTION__);
if (!ata_pending_commands(drive))
return ide_stopped;
return ATA_OP_FINISHED;
if (!ata_status(drive, 0, SERVICE_STAT))
return service(drive, rq);
......@@ -333,13 +332,11 @@ static ide_startstop_t check_service(struct ata_device *drive, struct request *r
*/
set_irq(drive, ide_dmaq_intr);
return ide_started;
return ATA_OP_CONTINUES;
}
static ide_startstop_t dmaq_complete(struct ata_device *drive, struct request *rq)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
u8 dma_stat;
/*
......@@ -354,7 +351,7 @@ static ide_startstop_t dmaq_complete(struct ata_device *drive, struct request *r
ata_dump(drive, rq, __FUNCTION__);
tcq_invalidate_queue(drive);
return ide_stopped;
return ATA_OP_FINISHED;
}
if (dma_stat)
......@@ -362,13 +359,7 @@ static ide_startstop_t dmaq_complete(struct ata_device *drive, struct request *r
TCQ_PRINTK("%s: ending %p, tag %d\n", __FUNCTION__, rq, rq->tag);
/* FIXME: this locking should encompass the above register
* file access too.
*/
spin_lock_irqsave(ch->lock, flags);
__ata_end_request(drive, rq, !dma_stat, rq->nr_sectors);
spin_unlock_irqrestore(ch->lock, flags);
/*
* we completed this command, check if we can service a new command
......@@ -441,7 +432,7 @@ static int check_autopoll(struct ata_device *drive)
memset(&args, 0, sizeof(args));
args.taskfile.feature = 0x01;
args.cmd = WIN_NOP;
ide_raw_taskfile(drive, &args);
ide_raw_taskfile(drive, &args, NULL);
if (args.taskfile.feature & ABRT_ERR)
return 1;
......@@ -469,7 +460,7 @@ static int configure_tcq(struct ata_device *drive)
memset(&args, 0, sizeof(args));
args.taskfile.feature = SETFEATURES_EN_WCACHE;
args.cmd = WIN_SETFEATURES;
if (ide_raw_taskfile(drive, &args)) {
if (ide_raw_taskfile(drive, &args, NULL)) {
printk("%s: failed to enable write cache\n", drive->name);
return 1;
}
......@@ -481,7 +472,7 @@ static int configure_tcq(struct ata_device *drive)
memset(&args, 0, sizeof(args));
args.taskfile.feature = SETFEATURES_DIS_RI;
args.cmd = WIN_SETFEATURES;
if (ide_raw_taskfile(drive, &args)) {
if (ide_raw_taskfile(drive, &args, NULL)) {
printk("%s: disabling release interrupt fail\n", drive->name);
return 1;
}
......@@ -493,7 +484,7 @@ static int configure_tcq(struct ata_device *drive)
memset(&args, 0, sizeof(args));
args.taskfile.feature = SETFEATURES_EN_SI;
args.cmd = WIN_SETFEATURES;
if (ide_raw_taskfile(drive, &args)) {
if (ide_raw_taskfile(drive, &args, NULL)) {
printk("%s: enabling service interrupt fail\n", drive->name);
return 1;
}
......@@ -532,8 +523,6 @@ static int tcq_wait_dataphase(struct ata_device *drive)
/*
* Invoked from a SERVICE interrupt, command etc already known. Just need to
* start the dma engine for this tag.
*
* Channel lock should be held.
*/
static ide_startstop_t udma_tcq_start(struct ata_device *drive, struct request *rq)
{
......@@ -544,21 +533,19 @@ static ide_startstop_t udma_tcq_start(struct ata_device *drive, struct request *
printk("queued_rw: IDE_BUSY not set\n");
if (tcq_wait_dataphase(drive))
return ide_stopped;
return ATA_OP_FINISHED;
if (ata_start_dma(drive, rq))
return ide_stopped;
return ATA_OP_FINISHED;
__set_irq(ch, ide_dmaq_intr);
udma_start(drive, rq);
return ide_started;
return ATA_OP_CONTINUES;
}
/*
* Start a queued command from scratch.
*
* Channel lock should be held.
*/
ide_startstop_t udma_tcq_init(struct ata_device *drive, struct request *rq)
{
......@@ -582,7 +569,7 @@ ide_startstop_t udma_tcq_init(struct ata_device *drive, struct request *rq)
if (wait_altstat(drive, &stat, BUSY_STAT)) {
ata_dump(drive, rq, "queued start");
tcq_invalidate_queue(drive);
return ide_stopped;
return ATA_OP_FINISHED;
}
#ifdef IDE_TCQ_NIEN
......@@ -591,7 +578,7 @@ ide_startstop_t udma_tcq_init(struct ata_device *drive, struct request *rq)
if (stat & ERR_STAT) {
ata_dump(drive, rq, "tcq_start");
return ide_stopped;
return ATA_OP_FINISHED;
}
/*
......@@ -601,14 +588,14 @@ ide_startstop_t udma_tcq_init(struct ata_device *drive, struct request *rq)
if ((feat = GET_FEAT()) & NSEC_REL) {
drive->immed_rel++;
drive->rq = NULL;
__set_irq(drive->channel, ide_dmaq_intr);
set_irq(drive, ide_dmaq_intr);
TCQ_PRINTK("REL in queued_start\n");
if (!ata_status(drive, 0, SERVICE_STAT))
return service(drive, rq);
return ide_released;
return ATA_OP_RELEASED;
}
TCQ_PRINTK("IMMED in queued_start\n");
......
......@@ -209,7 +209,7 @@ static int trm290_udma_init(struct ata_device *drive, struct request *rq)
#ifdef TRM290_NO_DMA_WRITES
trm290_prepare_drive(drive, 0); /* select PIO xfer */
return ide_stopped;
return ATA_OP_FINISHED;
#endif
} else {
reading = 2;
......@@ -218,7 +218,7 @@ static int trm290_udma_init(struct ata_device *drive, struct request *rq)
if (!(count = udma_new_table(drive, rq))) {
trm290_prepare_drive(drive, 0); /* select PIO xfer */
return ide_stopped; /* try PIO instead of DMA */
return ATA_OP_FINISHED; /* try PIO instead of DMA */
}
trm290_prepare_drive(drive, 1); /* select DMA xfer */
......@@ -231,7 +231,7 @@ static int trm290_udma_init(struct ata_device *drive, struct request *rq)
outb(reading ? WIN_READDMA : WIN_WRITEDMA, IDE_COMMAND_REG);
}
return ide_started;
return ATA_OP_CONTINUES;
}
static int trm290_udma_irq_status(struct ata_device *drive)
......@@ -239,9 +239,9 @@ static int trm290_udma_irq_status(struct ata_device *drive)
return (inw(drive->channel->dma_base + 2) == 0x00ff);
}
static int trm290_udma_setup(struct ata_device *drive)
static int trm290_udma_setup(struct ata_device *drive, int map)
{
return udma_pci_setup(drive);
return udma_pci_setup(drive, map);
}
#endif
......@@ -303,7 +303,6 @@ static void __init trm290_init_channel(struct ata_channel *hwif)
#endif
hwif->selectproc = &trm290_selectproc;
hwif->autodma = 0; /* play it safe for now */
#if 1
{
/*
......
......@@ -221,22 +221,16 @@ static void via82cxxx_tune_drive(struct ata_device *drive, unsigned char pio)
}
#ifdef CONFIG_BLK_DEV_IDEDMA
static int via82cxxx_udma_setup(struct ata_device *drive)
static int __init via_modes_map(struct ata_channel *ch)
{
short w80 = drive->channel->udma_four;
short speed = ata_timing_mode(drive,
XFER_PIO | XFER_EPIO | XFER_SWDMA | XFER_MWDMA |
(via_config->flags & VIA_UDMA ? XFER_UDMA : 0) |
(w80 && (via_config->flags & VIA_UDMA) >= VIA_UDMA_66 ? XFER_UDMA_66 : 0) |
(w80 && (via_config->flags & VIA_UDMA) >= VIA_UDMA_100 ? XFER_UDMA_100 : 0) |
(w80 && (via_config->flags & VIA_UDMA) >= VIA_UDMA_133 ? XFER_UDMA_133 : 0));
via_set_drive(drive, speed);
udma_enable(drive, drive->channel->autodma && (speed & XFER_MODE) != XFER_PIO, 0);
return 0;
short w80 = ch->udma_four;
int map = XFER_EPIO | XFER_SWDMA | XFER_MWDMA |
(via_config->flags & VIA_UDMA ? XFER_UDMA : 0) |
(w80 && (via_config->flags & VIA_UDMA) >= VIA_UDMA_66 ? XFER_UDMA_66 : 0) |
(w80 && (via_config->flags & VIA_UDMA) >= VIA_UDMA_100 ? XFER_UDMA_100 : 0) |
(w80 && (via_config->flags & VIA_UDMA) >= VIA_UDMA_133 ? XFER_UDMA_133 : 0);
return map;
}
#endif
......@@ -352,9 +346,10 @@ static void __init via82cxxx_init_channel(struct ata_channel *hwif)
{
int i;
hwif->udma_four = via82cxxx_ata66_check(hwif);
hwif->tuneproc = &via82cxxx_tune_drive;
hwif->speedproc = &via_set_drive;
hwif->autodma = 0;
hwif->io_32bit = 1;
hwif->unmask = (via_config->flags & VIA_NO_UNMASK) ? 0 : 1;
......@@ -366,11 +361,8 @@ static void __init via82cxxx_init_channel(struct ata_channel *hwif)
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_base) {
hwif->highmem = 1;
hwif->udma_setup = via82cxxx_udma_setup;
# ifdef CONFIG_IDEDMA_AUTO
if (!noautodma)
hwif->autodma = 1;
# endif
hwif->modes_map = via_modes_map(hwif);
hwif->udma_setup = udma_generic_setup;
}
#endif
}
......@@ -391,23 +383,19 @@ static struct ata_pci_device chipsets[] __initdata = {
vendor: PCI_VENDOR_ID_VIA,
device: PCI_DEVICE_ID_VIA_82C576_1,
init_chipset: via82cxxx_init_chipset,
ata66_check: via82cxxx_ata66_check,
init_channel: via82cxxx_init_channel,
init_dma: via82cxxx_init_dma,
enablebits: {{0x40,0x02,0x02}, {0x40,0x01,0x01}},
bootable: ON_BOARD,
flags: ATA_F_NOADMA
},
{
vendor: PCI_VENDOR_ID_VIA,
device: PCI_DEVICE_ID_VIA_82C586_1,
init_chipset: via82cxxx_init_chipset,
ata66_check: via82cxxx_ata66_check,
init_channel: via82cxxx_init_channel,
init_dma: via82cxxx_init_dma,
enablebits: {{0x40,0x02,0x02}, {0x40,0x01,0x01}},
bootable: ON_BOARD,
flags: ATA_F_NOADMA
},
};
......
......@@ -236,22 +236,26 @@ static inline idescsi_scsi_t *idescsi_private(struct Scsi_Host *host)
static int idescsi_end_request(struct ata_device *drive, struct request *rq, int uptodate)
{
unsigned long flags;
struct Scsi_Host *host = drive->driver_data;
idescsi_scsi_t *scsi = idescsi_private(host);
struct atapi_packet_command *pc = (struct atapi_packet_command *) rq->special;
int log = test_bit(IDESCSI_LOG_CMD, &scsi->log);
u8 *scsi_buf;
unsigned long flags;
if (!(rq->flags & REQ_PC)) {
ata_end_request(drive, rq, uptodate);
__ata_end_request(drive, rq, uptodate, 0);
return 0;
}
spin_lock_irqsave(drive->channel->lock, flags);
blkdev_dequeue_request(rq);
drive->rq = NULL;
end_that_request_last(rq);
spin_unlock_irqrestore(drive->channel->lock, flags);
if (rq->errors >= ERROR_MAX) {
pc->s.scsi_cmd->result = DID_ERROR << 16;
if (log)
......@@ -273,9 +277,7 @@ static int idescsi_end_request(struct ata_device *drive, struct request *rq, int
}
}
host = pc->s.scsi_cmd->host;
spin_lock_irqsave(host->host_lock, flags);
pc->s.done(pc->s.scsi_cmd);
spin_unlock_irqrestore(host->host_lock, flags);
idescsi_free_bio(rq->bio);
kfree(pc); kfree(rq);
scsi->pc = NULL;
......@@ -293,8 +295,6 @@ static inline unsigned long get_timeout(struct atapi_packet_command *pc)
*/
static ide_startstop_t idescsi_pc_intr(struct ata_device *drive, struct request *rq)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
struct Scsi_Host *host = drive->driver_data;
idescsi_scsi_t *scsi = idescsi_private(host);
u8 ireason;
......@@ -322,21 +322,19 @@ static ide_startstop_t idescsi_pc_intr(struct ata_device *drive, struct request
if (drive->status & ERR_STAT)
rq->errors++;
idescsi_end_request(drive, rq, 1);
return ide_stopped;
return ATA_OP_FINISHED;
}
bcount = IN_BYTE (IDE_BCOUNTH_REG) << 8 | IN_BYTE (IDE_BCOUNTL_REG);
ireason = IN_BYTE (IDE_IREASON_REG);
if (ireason & IDESCSI_IREASON_COD) {
printk (KERN_ERR "ide-scsi: CoD != 0 in idescsi_pc_intr\n");
return ide_stopped;
return ATA_OP_FINISHED;
}
if (ireason & IDESCSI_IREASON_IO) {
temp = pc->actually_transferred + bcount;
if ( temp > pc->request_transfer) {
if (temp > pc->buffer_size) {
unsigned long flags;
struct ata_channel *ch = drive->channel;
printk (KERN_ERR "ide-scsi: The scsi wants to send us more data than expected - discarding data\n");
temp = pc->buffer_size - pc->actually_transferred;
if (temp) {
......@@ -350,15 +348,10 @@ static ide_startstop_t idescsi_pc_intr(struct ata_device *drive, struct request
pc->actually_transferred += temp;
pc->current_position += temp;
atapi_discard_data(drive,bcount - temp);
/* FIXME: this locking should encompass the above register
* file access too.
*/
spin_lock_irqsave(ch->lock, flags);
ata_set_handler(drive, idescsi_pc_intr, get_timeout(pc), NULL);
spin_unlock_irqrestore(ch->lock, flags);
return ide_started;
return ATA_OP_CONTINUES;
}
#ifdef DEBUG
printk (KERN_NOTICE "ide-scsi: The scsi wants to send us more data than expected - allowing transfer\n");
......@@ -381,48 +374,38 @@ static ide_startstop_t idescsi_pc_intr(struct ata_device *drive, struct request
pc->actually_transferred+=bcount; /* Update the current position */
pc->current_position+=bcount;
/* FIXME: this locking should encompass the above register
* file access too.
*/
spin_lock_irqsave(ch->lock, flags);
ata_set_handler(drive, idescsi_pc_intr, get_timeout(pc), NULL); /* And set the interrupt handler again */
spin_unlock_irqrestore(ch->lock, flags);
/* And set the interrupt handler again */
ata_set_handler(drive, idescsi_pc_intr, get_timeout(pc), NULL);
return ide_started;
return ATA_OP_CONTINUES;
}
static ide_startstop_t idescsi_transfer_pc(struct ata_device *drive, struct request *rq)
{
unsigned long flags;
struct ata_channel *ch = drive->channel;
struct Scsi_Host *host = drive->driver_data;
idescsi_scsi_t *scsi = idescsi_private(host);
struct atapi_packet_command *pc = scsi->pc;
u8 ireason;
ide_startstop_t startstop;
int ret;
/* FIXME: Move this lock upwards.
*/
spin_lock_irqsave(ch->lock, flags);
if (ata_status_poll(drive, DRQ_STAT, BUSY_STAT,
WAIT_READY, rq, &startstop)) {
ret = ata_status_poll(drive, DRQ_STAT, BUSY_STAT,
WAIT_READY, rq);
if (ret != ATA_OP_READY) {
printk (KERN_ERR "ide-scsi: Strange, packet command initiated yet DRQ isn't asserted\n");
ret = startstop;
return ret;
}
ireason = IN_BYTE(IDE_IREASON_REG);
if ((ireason & (IDESCSI_IREASON_IO | IDESCSI_IREASON_COD)) != IDESCSI_IREASON_COD) {
printk (KERN_ERR "ide-scsi: (IO,CoD) != (0,1) while issuing a packet command\n");
ret = ATA_OP_FINISHED;
} else {
ireason = IN_BYTE(IDE_IREASON_REG);
if ((ireason & (IDESCSI_IREASON_IO | IDESCSI_IREASON_COD)) != IDESCSI_IREASON_COD) {
printk (KERN_ERR "ide-scsi: (IO,CoD) != (0,1) while issuing a packet command\n");
ret = ide_stopped;
} else {
ata_set_handler(drive, idescsi_pc_intr, get_timeout(pc), NULL);
atapi_write(drive, scsi->pc->c, 12);
ret = ide_started;
}
ata_set_handler(drive, idescsi_pc_intr, get_timeout(pc), NULL);
atapi_write(drive, scsi->pc->c, 12);
ret = ATA_OP_CONTINUES;
}
spin_unlock_irqrestore(ch->lock, flags);
return ret;
}
......@@ -457,19 +440,10 @@ static ide_startstop_t idescsi_issue_pc(struct ata_device *drive, struct request
udma_start(drive, rq);
}
if (test_bit(IDESCSI_DRQ_INTERRUPT, &scsi->flags)) {
unsigned long flags;
struct ata_channel *ch = drive->channel;
/* FIXME: this locking should encompass the above register
* file access too.
*/
spin_lock_irqsave(ch->lock, flags);
ata_set_handler(drive, idescsi_transfer_pc, get_timeout(pc), NULL);
spin_unlock_irqrestore(ch->lock, flags);
OUT_BYTE (WIN_PACKETCMD, IDE_COMMAND_REG); /* Issue the packet command */
return ide_started;
OUT_BYTE (WIN_PACKETCMD, IDE_COMMAND_REG);
return ATA_OP_CONTINUES;
} else {
OUT_BYTE (WIN_PACKETCMD, IDE_COMMAND_REG);
return idescsi_transfer_pc(drive, rq);
......@@ -481,7 +455,6 @@ static ide_startstop_t idescsi_issue_pc(struct ata_device *drive, struct request
*/
static ide_startstop_t idescsi_do_request(struct ata_device *drive, struct request *rq, sector_t block)
{
struct ata_channel *ch = drive->channel;
int ret;
#ifdef DEBUG
......@@ -496,16 +469,13 @@ static ide_startstop_t idescsi_do_request(struct ata_device *drive, struct reque
rq->current_nr_sectors);
#endif
/* FIXME: make this unlocking go away*/
spin_unlock_irq(ch->lock);
if (rq->flags & REQ_PC) {
ret = idescsi_issue_pc(drive, rq, (struct atapi_packet_command *) rq->special);
} else {
blk_dump_rq_flags(rq, "ide-scsi: unsup command");
idescsi_end_request(drive, rq, 0);
ret = ide_stopped;
ret = ATA_OP_FINISHED;
}
spin_lock_irq(ch->lock);
return ret;
}
......@@ -720,9 +690,7 @@ static int idescsi_queue(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
rq->flags = REQ_PC;
rq->special = (char *) pc;
rq->bio = idescsi_dma_bio (drive, pc);
spin_unlock_irq(cmd->host->host_lock);
ide_do_drive_cmd (drive, rq, ide_end);
spin_lock_irq(cmd->host->host_lock);
ide_do_drive_cmd(drive, rq, ide_end);
return 0;
abort:
......
......@@ -12,6 +12,9 @@
* more details.
*/
#include <linux/types.h>
#include <asm/byteorder.h>
/*
* With each packet command, we allocate a buffer.
* This is used for several packet
......@@ -79,3 +82,281 @@ extern void atapi_write_zeros(struct ata_device *, unsigned int);
extern void atapi_read(struct ata_device *, u8 *, unsigned int);
extern void atapi_write(struct ata_device *, u8 *, unsigned int);
/*
* ATAPI Status Register.
*/
typedef union {
u8 all : 8;
struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 check : 1; /* Error occurred */
u8 idx : 1; /* Reserved */
u8 corr : 1; /* Correctable error occurred */
u8 drq : 1; /* Data is request by the device */
u8 dsc : 1; /* Media access command finished / Buffer availability */
u8 reserved5 : 1; /* Reserved */
u8 drdy : 1; /* Ignored for ATAPI commands (ready to accept ATA command) */
u8 bsy : 1; /* The device has access to the command block */
#elif defined(__BIG_ENDIAN_BITFIELD)
u8 bsy : 1;
u8 drdy : 1;
u8 reserved5 : 1;
u8 dsc : 1;
u8 drq : 1;
u8 corr : 1;
u8 idx : 1;
u8 check : 1;
#else
#error "Please fix <asm/byteorder.h>"
#endif
} b;
} atapi_status_reg_t;
/*
* ATAPI error register.
*/
typedef union {
u8 all : 8;
struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 ili : 1; /* Illegal Length Indication */
u8 eom : 1; /* End Of Media Detected */
u8 abrt : 1; /* Aborted command - As defined by ATA */
u8 mcr : 1; /* Media Change Requested - As defined by ATA */
u8 sense_key : 4; /* Sense key of the last failed packet command */
#elif defined(__BIG_ENDIAN_BITFIELD)
u8 sense_key : 4;
u8 mcr : 1;
u8 abrt : 1;
u8 eom : 1;
u8 ili : 1;
#else
#error "Please fix <asm/byteorder.h>"
#endif
} b;
} atapi_error_reg_t;
/* Currently unused, but please do not remove. --bkz */
/*
* ATAPI Feature Register.
*/
typedef union {
u8 all : 8;
struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 dma : 1; /* Using DMA or PIO */
u8 reserved321 : 3; /* Reserved */
u8 reserved654 : 3; /* Reserved (Tag Type) */
u8 reserved7 : 1; /* Reserved */
#elif defined(__BIG_ENDIAN_BITFIELD)
u8 reserved7 : 1;
u8 reserved654 : 3;
u8 reserved321 : 3;
u8 dma : 1;
#else
#error "Please fix <asm/byteorder.h>"
#endif
} b;
} atapi_feature_reg_t;
/*
* ATAPI Byte Count Register.
*/
typedef union {
u16 all : 16;
struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 low; /* LSB */
u8 high; /* MSB */
#elif defined(__BIG_ENDIAN_BITFIELD)
u8 high;
u8 low;
#else
#error "Please fix <asm/byteorder.h>"
#endif
} b;
} atapi_bcount_reg_t;
/*
* ATAPI Interrupt Reason Register.
*/
typedef union {
u8 all : 8;
struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 cod : 1; /* Information transferred is command (1) or data (0) */
u8 io : 1; /* The device requests us to read (1) or write (0) */
u8 reserved : 6; /* Reserved */
#elif defined(__BIG_ENDIAN_BITFIELD)
u8 reserved : 6;
u8 io : 1;
u8 cod : 1;
#else
#error "Please fix <asm/byteorder.h>"
#endif
} b;
} atapi_ireason_reg_t;
/* Currently unused, but please do not remove. --bkz */
/*
* ATAPI Drive Select Register.
*/
typedef union {
u8 all :8;
struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 sam_lun :3; /* Logical unit number */
u8 reserved3 :1; /* Reserved */
u8 drv :1; /* The responding drive will be drive 0 (0) or drive 1 (1) */
u8 one5 :1; /* Should be set to 1 */
u8 reserved6 :1; /* Reserved */
u8 one7 :1; /* Should be set to 1 */
#elif defined(__BIG_ENDIAN_BITFIELD)
u8 one7 :1;
u8 reserved6 :1;
u8 one5 :1;
u8 drv :1;
u8 reserved3 :1;
u8 sam_lun :3;
#else
#error "Please fix <asm/byteorder.h>"
#endif
} b;
} atapi_drivesel_reg_t;
/* Currently unused, but please do not remove. --bkz */
/*
* ATAPI Device Control Register.
*/
typedef union {
u8 all : 8;
struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 zero0 : 1; /* Should be set to zero */
u8 nien : 1; /* Device interrupt is disabled (1) or enabled (0) */
u8 srst : 1; /* ATA software reset. ATAPI devices should use the new ATAPI srst. */
u8 one3 : 1; /* Should be set to 1 */
u8 reserved4567 : 4; /* Reserved */
#elif defined(__BIG_ENDIAN_BITFIELD)
u8 reserved4567 : 4;
u8 one3 : 1;
u8 srst : 1;
u8 nien : 1;
u8 zero0 : 1;
#else
#error "Please fix <asm/byteorder.h>"
#endif
} b;
} atapi_control_reg_t;
/*
* The following is used to format the general configuration word
* of the ATAPI IDENTIFY DEVICE command.
*/
struct atapi_id_gcw {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 packet_size : 2; /* Packet Size */
u8 reserved234 : 3; /* Reserved */
u8 drq_type : 2; /* Command packet DRQ type */
u8 removable : 1; /* Removable media */
u8 device_type : 5; /* Device type */
u8 reserved13 : 1; /* Reserved */
u8 protocol : 2; /* Protocol type */
#elif defined(__BIG_ENDIAN_BITFIELD)
u8 protocol : 2;
u8 reserved13 : 1;
u8 device_type : 5;
u8 removable : 1;
u8 drq_type : 2;
u8 reserved234 : 3;
u8 packet_size : 2;
#else
#error "Please fix <asm/byteorder.h>"
#endif
};
/*
* INQUIRY packet command - Data Format.
*/
typedef struct {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 device_type : 5; /* Peripheral Device Type */
u8 reserved0_765 : 3; /* Peripheral Qualifier - Reserved */
u8 reserved1_6t0 : 7; /* Reserved */
u8 rmb : 1; /* Removable Medium Bit */
u8 ansi_version : 3; /* ANSI Version */
u8 ecma_version : 3; /* ECMA Version */
u8 iso_version : 2; /* ISO Version */
u8 response_format : 4; /* Response Data Format */
u8 reserved3_45 : 2; /* Reserved */
u8 reserved3_6 : 1; /* TrmIOP - Reserved */
u8 reserved3_7 : 1; /* AENC - Reserved */
#elif defined(__BIG_ENDIAN_BITFIELD)
u8 reserved0_765 : 3;
u8 device_type : 5;
u8 rmb : 1;
u8 reserved1_6t0 : 7;
u8 iso_version : 2;
u8 ecma_version : 3;
u8 ansi_version : 3;
u8 reserved3_7 : 1;
u8 reserved3_6 : 1;
u8 reserved3_45 : 2;
u8 response_format : 4;
#else
#error "Please fix <asm/byteorder.h>"
#endif
u8 additional_length; /* Additional Length (total_length-4) */
u8 rsv5, rsv6, rsv7; /* Reserved */
u8 vendor_id[8]; /* Vendor Identification */
u8 product_id[16]; /* Product Identification */
u8 revision_level[4]; /* Revision Level */
u8 vendor_specific[20]; /* Vendor Specific - Optional */
u8 reserved56t95[40]; /* Reserved - Optional */
/* Additional information may be returned */
} atapi_inquiry_result_t;
/*
* REQUEST SENSE packet command result - Data Format.
*/
typedef struct atapi_request_sense {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 error_code : 7; /* Error Code (0x70 - current or 0x71 - deferred) */
u8 valid : 1; /* The information field conforms to standard */
u8 reserved1 : 8; /* Reserved (Segment Number) */
u8 sense_key : 4; /* Sense Key */
u8 reserved2_4 : 1; /* Reserved */
u8 ili : 1; /* Incorrect Length Indicator */
u8 eom : 1; /* End Of Medium */
u8 filemark : 1; /* Filemark */
#elif defined(__BIG_ENDIAN_BITFIELD)
u8 valid : 1;
u8 error_code : 7;
u8 reserved1 : 8;
u8 filemark : 1;
u8 eom : 1;
u8 ili : 1;
u8 reserved2_4 : 1;
u8 sense_key : 4;
#else
#error "Please fix <asm/byteorder.h>"
#endif
u32 information __attribute__ ((packed));
u8 asl; /* Additional sense length (n-7) */
u32 command_specific; /* Additional command specific information */
u8 asc; /* Additional Sense Code */
u8 ascq; /* Additional Sense Code Qualifier */
u8 replaceable_unit_code; /* Field Replaceable Unit Code */
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 sk_specific1 : 7; /* Sense Key Specific */
u8 sksv : 1; /* Sense Key Specific information is valid */
#elif defined(__BIG_ENDIAN_BITFIELD)
u8 sksv : 1; /* Sense Key Specific information is valid */
u8 sk_specific1 : 7; /* Sense Key Specific */
#else
#error "Please fix <asm/byteorder.h>"
#endif
u8 sk_specific[2]; /* Sense Key Specific */
u8 pad[2]; /* Padding to 20 bytes */
} atapi_request_sense_result_t;
......@@ -283,7 +283,9 @@ extern void generic_make_request(struct bio *bio);
extern inline request_queue_t *bdev_get_queue(struct block_device *bdev);
extern void blkdev_release_request(struct request *);
extern void blk_attempt_remerge(request_queue_t *, struct request *);
extern void __blk_attempt_remerge(request_queue_t *, struct request *);
extern struct request *blk_get_request(request_queue_t *, int, int);
extern struct request *__blk_get_request(request_queue_t *, int);
extern void blk_put_request(struct request *);
extern void blk_plug_device(request_queue_t *);
extern int blk_remove_plug(request_queue_t *);
......
......@@ -372,16 +372,17 @@ struct ata_device {
* Status returned by various functions.
*/
typedef enum {
ide_stopped, /* no drive operation was started */
ide_started, /* a drive operation was started, and a handler was set */
ide_released /* started and released bus */
ATA_OP_FINISHED, /* no drive operation was started */
ATA_OP_CONTINUES, /* a drive operation was started, and a handler was set */
ATA_OP_RELEASED, /* started and released bus */
ATA_OP_READY, /* indicate status poll finished fine */
} ide_startstop_t;
/*
* Interrupt and timeout handler type.
*/
typedef ide_startstop_t (ata_handler_t)(struct ata_device *, struct request *);
typedef int (ata_expiry_t)(struct ata_device *, struct request *);
typedef ide_startstop_t (ata_expiry_t)(struct ata_device *, struct request *, unsigned long *);
enum {
ATA_PRIMARY = 0,
......@@ -406,7 +407,7 @@ struct ata_channel {
ide_startstop_t (*handler)(struct ata_device *, struct request *); /* irq handler, if active */
struct timer_list timer; /* failsafe timer */
int (*expiry)(struct ata_device *, struct request *); /* irq handler, if active */
ide_startstop_t (*expiry)(struct ata_device *, struct request *, unsigned long *); /* irq handler, if active */
unsigned long poll_timeout; /* timeout value during polled operations */
struct ata_device *drive; /* last serviced drive */
......@@ -456,7 +457,7 @@ struct ata_channel {
void (*atapi_read)(struct ata_device *, void *, unsigned int);
void (*atapi_write)(struct ata_device *, void *, unsigned int);
int (*udma_setup)(struct ata_device *);
int (*udma_setup)(struct ata_device *, int);
void (*udma_enable)(struct ata_device *, int, int);
void (*udma_start) (struct ata_device *, struct request *);
......@@ -496,7 +497,9 @@ struct ata_channel {
unsigned unmask : 1; /* flag: okay to unmask other irqs */
unsigned slow : 1; /* flag: slow data port */
unsigned io_32bit : 1; /* 0=16-bit, 1=32-bit */
unsigned no_atapi_autodma : 1; /* flag: use auto DMA only for disks */
unsigned char bus_state; /* power state of the IDE bus */
int modes_map; /* map of supported transfer modes */
};
/*
......@@ -602,9 +605,7 @@ extern int noautodma;
#define DEVICE_NR(device) (minor(device) >> PARTN_BITS)
#include <linux/blk.h>
/* Not locking and locking variant: */
extern int __ata_end_request(struct ata_device *, struct request *, int, unsigned int);
extern int ata_end_request(struct ata_device *drive, struct request *, int);
extern void ata_set_handler(struct ata_device *drive, ata_handler_t handler,
unsigned long timeout, ata_expiry_t expiry);
......@@ -625,12 +626,6 @@ int ide_xlate_1024(kdev_t, int, int, const char *);
*/
struct ata_device *get_info_ptr(kdev_t i_rdev);
/*
* Re-Start an operation for an IDE interface.
* The caller should return immediately after invoking this.
*/
ide_startstop_t restart_request(struct ata_device *);
/*
* "action" parameter type for ide_do_drive_cmd() below.
*/
......@@ -658,31 +653,7 @@ struct ata_taskfile {
extern void ata_read(struct ata_device *, void *, unsigned int);
extern void ata_write(struct ata_device *, void *, unsigned int);
/*
* Special Flagged Register Validation Caller
*/
/*
* for now, taskfile requests are special :/
*/
static inline char *ide_map_rq(struct request *rq, unsigned long *flags)
{
if (rq->bio)
return bio_kmap_irq(rq->bio, flags) + ide_rq_offset(rq);
else
return rq->buffer + ((rq)->nr_sectors - (rq)->current_nr_sectors) * SECTOR_SIZE;
}
static inline void ide_unmap_rq(struct request *rq, char *to,
unsigned long *flags)
{
if (rq->bio)
bio_kunmap_irq(to, flags);
}
extern ide_startstop_t ata_special_intr(struct ata_device *, struct request *);
extern int ide_raw_taskfile(struct ata_device *, struct ata_taskfile *);
extern int ide_raw_taskfile(struct ata_device *, struct ata_taskfile *, char *);
extern void ide_fix_driveid(struct hd_driveid *id);
extern int ide_config_drive_speed(struct ata_device *, byte);
extern byte eighty_ninty_three(struct ata_device *);
......@@ -756,9 +727,12 @@ static inline void udma_start(struct ata_device *drive, struct request *rq)
static inline int udma_stop(struct ata_device *drive)
{
int ret;
ret = drive->channel->udma_stop(drive);
clear_bit(IDE_DMA, drive->channel->active);
return drive->channel->udma_stop(drive);
return ret;
}
/*
......@@ -766,9 +740,12 @@ static inline int udma_stop(struct ata_device *drive)
*/
static inline ide_startstop_t udma_init(struct ata_device *drive, struct request *rq)
{
int ret = drive->channel->udma_init(drive, rq);
if (ret == ide_started)
set_bit(IDE_DMA, drive->channel->active);
int ret;
set_bit(IDE_DMA, drive->channel->active);
ret = drive->channel->udma_init(drive, rq);
if (ret != ATA_OP_CONTINUES)
clear_bit(IDE_DMA, drive->channel->active);
return ret;
}
......@@ -797,7 +774,9 @@ extern int udma_pci_init(struct ata_device *drive, struct request *rq);
extern int udma_pci_irq_status(struct ata_device *drive);
extern void udma_pci_timeout(struct ata_device *drive);
extern void udma_pci_irq_lost(struct ata_device *);
extern int udma_pci_setup(struct ata_device *);
extern int udma_pci_setup(struct ata_device *, int);
extern int udma_generic_setup(struct ata_device *, int);
extern int udma_new_table(struct ata_device *, struct request *);
extern void udma_destroy_table(struct ata_channel *);
......@@ -830,10 +809,9 @@ extern int drive_is_ready(struct ata_device *drive);
extern void ata_select(struct ata_device *, unsigned long);
extern void ata_mask(struct ata_device *);
extern int ata_busy_poll(struct ata_device *, unsigned long);
extern int ata_status(struct ata_device *, u8, u8);
extern int ata_status_poll( struct ata_device *, u8, u8,
unsigned long, struct request *rq, ide_startstop_t *);
unsigned long, struct request *rq);
extern int ata_irq_enable(struct ata_device *, int);
extern void ata_reset(struct ata_channel *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment