Commit aae7d141 authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://linux-scsi.bkbits.net/scsi-for-linus-2.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 8d3f5223 e47da193
...@@ -339,7 +339,8 @@ static int sg_scsi_ioctl(struct file *file, request_queue_t *q, ...@@ -339,7 +339,8 @@ static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
struct gendisk *bd_disk, Scsi_Ioctl_Command __user *sic) struct gendisk *bd_disk, Scsi_Ioctl_Command __user *sic)
{ {
struct request *rq; struct request *rq;
int err, in_len, out_len, bytes, opcode, cmdlen; int err;
unsigned int in_len, out_len, bytes, opcode, cmdlen;
char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE]; char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
/* /*
......
...@@ -768,28 +768,6 @@ void aac_printf(struct aac_dev *dev, u32 val) ...@@ -768,28 +768,6 @@ void aac_printf(struct aac_dev *dev, u32 val)
memset(cp, 0, 256); memset(cp, 0, 256);
} }
/**
* aac_handle_aif - Handle a message from the firmware
* @dev: Which adapter this fib is from
* @fibptr: Pointer to fibptr from adapter
*
* This routine handles a driver notify fib from the adapter and
* dispatches it to the appropriate routine for handling.
*/
static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
{
struct hw_fib * hw_fib = fibptr->hw_fib;
/*
* Set the status of this FIB to be Invalid parameter.
*
* *(u32 *)fib->data = ST_INVAL;
*/
*(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
fib_adapter_complete(fibptr, sizeof(u32));
}
/** /**
* aac_command_thread - command processing thread * aac_command_thread - command processing thread
* @dev: Adapter to monitor * @dev: Adapter to monitor
...@@ -859,7 +837,6 @@ int aac_command_thread(struct aac_dev * dev) ...@@ -859,7 +837,6 @@ int aac_command_thread(struct aac_dev * dev)
aifcmd = (struct aac_aifcmd *) hw_fib->data; aifcmd = (struct aac_aifcmd *) hw_fib->data;
if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) { if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
/* Handle Driver Notify Events */ /* Handle Driver Notify Events */
aac_handle_aif(dev, fib);
*(u32 *)hw_fib->data = cpu_to_le32(ST_OK); *(u32 *)hw_fib->data = cpu_to_le32(ST_OK);
fib_adapter_complete(fib, sizeof(u32)); fib_adapter_complete(fib, sizeof(u32));
} else { } else {
...@@ -870,10 +847,6 @@ int aac_command_thread(struct aac_dev * dev) ...@@ -870,10 +847,6 @@ int aac_command_thread(struct aac_dev * dev)
u32 time_now, time_last; u32 time_now, time_last;
unsigned long flagv; unsigned long flagv;
/* Sniff events */
if (aifcmd->command == cpu_to_le32(AifCmdEventNotify))
aac_handle_aif(dev, fib);
time_now = jiffies/HZ; time_now = jiffies/HZ;
spin_lock_irqsave(&dev->fib_lock, flagv); spin_lock_irqsave(&dev->fib_lock, flagv);
......
...@@ -125,14 +125,8 @@ ahc_linux_eisa_init(void) ...@@ -125,14 +125,8 @@ ahc_linux_eisa_init(void)
uint32_t eisa_id; uint32_t eisa_id;
size_t id_size; size_t id_size;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
if (check_region(eisaBase, AHC_EISA_IOSIZE) != 0)
continue;
request_region(eisaBase, AHC_EISA_IOSIZE, "aic7xxx");
#else
if (request_region(eisaBase, AHC_EISA_IOSIZE, "aic7xxx") == 0) if (request_region(eisaBase, AHC_EISA_IOSIZE, "aic7xxx") == 0)
continue; continue;
#endif
eisa_id = 0; eisa_id = 0;
id_size = sizeof(eisa_id); id_size = sizeof(eisa_id);
...@@ -207,14 +201,8 @@ aic7770_map_registers(struct ahc_softc *ahc, u_int port) ...@@ -207,14 +201,8 @@ aic7770_map_registers(struct ahc_softc *ahc, u_int port)
/* /*
* Lock out other contenders for our i/o space. * Lock out other contenders for our i/o space.
*/ */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
if (check_region(port, AHC_EISA_IOSIZE) != 0)
return (ENOMEM);
request_region(port, AHC_EISA_IOSIZE, "aic7xxx");
#else
if (request_region(port, AHC_EISA_IOSIZE, "aic7xxx") == 0) if (request_region(port, AHC_EISA_IOSIZE, "aic7xxx") == 0)
return (ENOMEM); return (ENOMEM);
#endif
ahc->tag = BUS_SPACE_PIO; ahc->tag = BUS_SPACE_PIO;
ahc->bsh.ioport = port; ahc->bsh.ioport = port;
return (0); return (0);
......
...@@ -831,8 +831,6 @@ static inline void ahc_linux_eisa_exit(void) { ...@@ -831,8 +831,6 @@ static inline void ahc_linux_eisa_exit(void) {
/******************************* PCI Routines *********************************/ /******************************* PCI Routines *********************************/
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
void ahc_power_state_change(struct ahc_softc *ahc,
ahc_power_state new_state);
int ahc_linux_pci_init(void); int ahc_linux_pci_init(void);
void ahc_linux_pci_exit(void); void ahc_linux_pci_exit(void);
int ahc_pci_map_registers(struct ahc_softc *ahc); int ahc_pci_map_registers(struct ahc_softc *ahc);
......
...@@ -42,12 +42,6 @@ ...@@ -42,12 +42,6 @@
#include "aic7xxx_osm.h" #include "aic7xxx_osm.h"
#include "aic7xxx_pci.h" #include "aic7xxx_pci.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
struct pci_device_id
{
};
#endif
static int ahc_linux_pci_dev_probe(struct pci_dev *pdev, static int ahc_linux_pci_dev_probe(struct pci_dev *pdev,
const struct pci_device_id *ent); const struct pci_device_id *ent);
static int ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, static int ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc,
...@@ -55,7 +49,6 @@ static int ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, ...@@ -55,7 +49,6 @@ static int ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc,
static int ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc, static int ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc,
u_long *bus_addr, u_long *bus_addr,
uint8_t __iomem **maddr); uint8_t __iomem **maddr);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
static void ahc_linux_pci_dev_remove(struct pci_dev *pdev); static void ahc_linux_pci_dev_remove(struct pci_dev *pdev);
/* Define the macro locally since it's different for different class of chips. /* Define the macro locally since it's different for different class of chips.
...@@ -169,7 +162,6 @@ ahc_linux_pci_dev_remove(struct pci_dev *pdev) ...@@ -169,7 +162,6 @@ ahc_linux_pci_dev_remove(struct pci_dev *pdev)
} else } else
ahc_list_unlock(&l); ahc_list_unlock(&l);
} }
#endif /* !LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0) */
static int static int
ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
...@@ -219,7 +211,6 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -219,7 +211,6 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ahc = ahc_alloc(NULL, name); ahc = ahc_alloc(NULL, name);
if (ahc == NULL) if (ahc == NULL)
return (-ENOMEM); return (-ENOMEM);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
if (pci_enable_device(pdev)) { if (pci_enable_device(pdev)) {
ahc_free(ahc); ahc_free(ahc);
return (-ENODEV); return (-ENODEV);
...@@ -238,14 +229,12 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -238,14 +229,12 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} }
ahc->platform_data->hw_dma_mask = DMA_32BIT_MASK; ahc->platform_data->hw_dma_mask = DMA_32BIT_MASK;
} }
#endif
ahc->dev_softc = pci; ahc->dev_softc = pci;
error = ahc_pci_config(ahc, entry); error = ahc_pci_config(ahc, entry);
if (error != 0) { if (error != 0) {
ahc_free(ahc); ahc_free(ahc);
return (-error); return (-error);
} }
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
pci_set_drvdata(pdev, ahc); pci_set_drvdata(pdev, ahc);
if (aic7xxx_detect_complete) { if (aic7xxx_detect_complete) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
...@@ -256,39 +245,14 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -256,39 +245,14 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return (-ENODEV); return (-ENODEV);
#endif #endif
} }
#endif
return (0); return (0);
} }
int int
ahc_linux_pci_init(void) ahc_linux_pci_init(void)
{ {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
/* Translate error or zero return into zero or one */ /* Translate error or zero return into zero or one */
return pci_module_init(&aic7xxx_pci_driver) ? 0 : 1; return pci_module_init(&aic7xxx_pci_driver) ? 0 : 1;
#else
struct pci_dev *pdev;
u_int class;
int found;
/* If we don't have a PCI bus, we can't find any adapters. */
if (pci_present() == 0)
return (0);
found = 0;
pdev = NULL;
class = PCI_CLASS_STORAGE_SCSI << 8;
while ((pdev = pci_find_class(class, pdev)) != NULL) {
ahc_dev_softc_t pci;
int error;
pci = pdev;
error = ahc_linux_pci_dev_probe(pdev, /*pci_devid*/NULL);
if (error == 0)
found++;
}
return (found);
#endif
} }
void void
...@@ -303,22 +267,11 @@ ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, u_long *base) ...@@ -303,22 +267,11 @@ ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, u_long *base)
if (aic7xxx_allow_memio == 0) if (aic7xxx_allow_memio == 0)
return (ENOMEM); return (ENOMEM);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,3,0)
*base = pci_resource_start(ahc->dev_softc, 0); *base = pci_resource_start(ahc->dev_softc, 0);
#else
*base = ahc_pci_read_config(ahc->dev_softc, PCIR_MAPS, 4);
*base &= PCI_BASE_ADDRESS_IO_MASK;
#endif
if (*base == 0) if (*base == 0)
return (ENOMEM); return (ENOMEM);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,0)
if (check_region(*base, 256) != 0)
return (ENOMEM);
request_region(*base, 256, "aic7xxx");
#else
if (request_region(*base, 256, "aic7xxx") == 0) if (request_region(*base, 256, "aic7xxx") == 0)
return (ENOMEM); return (ENOMEM);
#endif
return (0); return (0);
} }
...@@ -334,17 +287,13 @@ ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc, ...@@ -334,17 +287,13 @@ ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc,
start = pci_resource_start(ahc->dev_softc, 1); start = pci_resource_start(ahc->dev_softc, 1);
if (start != 0) { if (start != 0) {
*bus_addr = start; *bus_addr = start;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
if (request_mem_region(start, 0x1000, "aic7xxx") == 0) if (request_mem_region(start, 0x1000, "aic7xxx") == 0)
error = ENOMEM; error = ENOMEM;
#endif
if (error == 0) { if (error == 0) {
*maddr = ioremap_nocache(start, 256); *maddr = ioremap_nocache(start, 256);
if (*maddr == NULL) { if (*maddr == NULL) {
error = ENOMEM; error = ENOMEM;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
release_mem_region(start, 0x1000); release_mem_region(start, 0x1000);
#endif
} }
} }
} else } else
...@@ -387,10 +336,8 @@ ahc_pci_map_registers(struct ahc_softc *ahc) ...@@ -387,10 +336,8 @@ ahc_pci_map_registers(struct ahc_softc *ahc)
ahc_get_pci_slot(ahc->dev_softc), ahc_get_pci_slot(ahc->dev_softc),
ahc_get_pci_function(ahc->dev_softc)); ahc_get_pci_function(ahc->dev_softc));
iounmap(maddr); iounmap(maddr);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
release_mem_region(ahc->platform_data->mem_busaddr, release_mem_region(ahc->platform_data->mem_busaddr,
0x1000); 0x1000);
#endif
ahc->bsh.maddr = NULL; ahc->bsh.maddr = NULL;
maddr = NULL; maddr = NULL;
} else } else
...@@ -440,41 +387,3 @@ ahc_pci_map_int(struct ahc_softc *ahc) ...@@ -440,41 +387,3 @@ ahc_pci_map_int(struct ahc_softc *ahc)
return (-error); return (-error);
} }
void
ahc_power_state_change(struct ahc_softc *ahc, ahc_power_state new_state)
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
pci_set_power_state(ahc->dev_softc, new_state);
#else
uint32_t cap;
u_int cap_offset;
/*
* Traverse the capability list looking for
* the power management capability.
*/
cap = 0;
cap_offset = ahc_pci_read_config(ahc->dev_softc,
PCIR_CAP_PTR, /*bytes*/1);
while (cap_offset != 0) {
cap = ahc_pci_read_config(ahc->dev_softc,
cap_offset, /*bytes*/4);
if ((cap & 0xFF) == 1
&& ((cap >> 16) & 0x3) > 0) {
uint32_t pm_control;
pm_control = ahc_pci_read_config(ahc->dev_softc,
cap_offset + 4,
/*bytes*/4);
pm_control &= ~0x3;
pm_control |= new_state;
ahc_pci_write_config(ahc->dev_softc,
cap_offset + 4,
pm_control, /*bytes*/2);
break;
}
cap_offset = (cap >> 8) & 0xFF;
}
#endif
}
...@@ -721,7 +721,7 @@ ahc_pci_config(struct ahc_softc *ahc, struct ahc_pci_identity *entry) ...@@ -721,7 +721,7 @@ ahc_pci_config(struct ahc_softc *ahc, struct ahc_pci_identity *entry)
ahc->chip |= AHC_PCI; ahc->chip |= AHC_PCI;
ahc->description = entry->name; ahc->description = entry->name;
ahc_power_state_change(ahc, AHC_POWER_STATE_D0); pci_set_power_state(ahc->dev_softc, AHC_POWER_STATE_D0);
error = ahc_pci_map_registers(ahc); error = ahc_pci_map_registers(ahc);
if (error != 0) if (error != 0)
...@@ -2016,7 +2016,7 @@ static int ...@@ -2016,7 +2016,7 @@ static int
ahc_pci_resume(struct ahc_softc *ahc) ahc_pci_resume(struct ahc_softc *ahc)
{ {
ahc_power_state_change(ahc, AHC_POWER_STATE_D0); pci_set_power_state(ahc->dev_softc, AHC_POWER_STATE_D0);
/* /*
* We assume that the OS has restored our register * We assume that the OS has restored our register
......
...@@ -1156,17 +1156,14 @@ scsi_show_extd_sense(unsigned char asc, unsigned char ascq) ...@@ -1156,17 +1156,14 @@ scsi_show_extd_sense(unsigned char asc, unsigned char ascq)
} }
/* Print sense information */ /* Print sense information */
static void void
print_sense_internal(const char *devclass, __scsi_print_sense(const char *name, const unsigned char *sense_buffer,
const unsigned char *sense_buffer, int sense_len)
int sense_len,
struct request *req)
{ {
int k, num, res; int k, num, res;
unsigned int info; unsigned int info;
const char *error; const char *error;
const char *sense_txt; const char *sense_txt;
const char *name = req->rq_disk ? req->rq_disk->disk_name : devclass;
struct scsi_sense_hdr ssh; struct scsi_sense_hdr ssh;
res = scsi_normalize_sense(sense_buffer, sense_len, &ssh); res = scsi_normalize_sense(sense_buffer, sense_len, &ssh);
...@@ -1254,18 +1251,25 @@ print_sense_internal(const char *devclass, ...@@ -1254,18 +1251,25 @@ print_sense_internal(const char *devclass,
printk("\n"); printk("\n");
} }
} }
EXPORT_SYMBOL(__scsi_print_sense);
void scsi_print_sense(const char *devclass, struct scsi_cmnd *cmd) void scsi_print_sense(const char *devclass, struct scsi_cmnd *cmd)
{ {
print_sense_internal(devclass, cmd->sense_buffer, const char *name = devclass;
SCSI_SENSE_BUFFERSIZE, cmd->request);
if (cmd->request->rq_disk)
name = cmd->request->rq_disk->disk_name;
__scsi_print_sense(name, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE);
} }
EXPORT_SYMBOL(scsi_print_sense); EXPORT_SYMBOL(scsi_print_sense);
void scsi_print_req_sense(const char *devclass, struct scsi_request *sreq) void scsi_print_req_sense(const char *devclass, struct scsi_request *sreq)
{ {
print_sense_internal(devclass, sreq->sr_sense_buffer, const char *name = devclass;
SCSI_SENSE_BUFFERSIZE, sreq->sr_request);
if (sreq->sr_request->rq_disk)
name = sreq->sr_request->rq_disk->disk_name;
__scsi_print_sense(name, sreq->sr_sense_buffer, SCSI_SENSE_BUFFERSIZE);
} }
EXPORT_SYMBOL(scsi_print_req_sense); EXPORT_SYMBOL(scsi_print_req_sense);
......
...@@ -96,7 +96,6 @@ ...@@ -96,7 +96,6 @@
#include "scsi.h" #include "scsi.h"
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include "fd_mcs.h"
#define DRIVER_VERSION "v0.2 by ZP Gu<zpg@castle.net>" #define DRIVER_VERSION "v0.2 by ZP Gu<zpg@castle.net>"
...@@ -104,14 +103,12 @@ ...@@ -104,14 +103,12 @@
#define DEBUG 0 /* Enable debugging output */ #define DEBUG 0 /* Enable debugging output */
#define ENABLE_PARITY 1 /* Enable SCSI Parity */ #define ENABLE_PARITY 1 /* Enable SCSI Parity */
#define DO_DETECT 0 /* Do device detection here (see scsi.c) */
/* END OF USER DEFINABLE OPTIONS */ /* END OF USER DEFINABLE OPTIONS */
#if DEBUG #if DEBUG
#define EVERY_ACCESS 0 /* Write a line on every scsi access */ #define EVERY_ACCESS 0 /* Write a line on every scsi access */
#define ERRORS_ONLY 1 /* Only write a line if there is an error */ #define ERRORS_ONLY 1 /* Only write a line if there is an error */
#define DEBUG_DETECT 1 /* Debug fd_mcs_detect() */
#define DEBUG_MESSAGES 1 /* Debug MESSAGE IN phase */ #define DEBUG_MESSAGES 1 /* Debug MESSAGE IN phase */
#define DEBUG_ABORT 1 /* Debug abort() routine */ #define DEBUG_ABORT 1 /* Debug abort() routine */
#define DEBUG_RESET 1 /* Debug reset() routine */ #define DEBUG_RESET 1 /* Debug reset() routine */
...@@ -119,7 +116,6 @@ ...@@ -119,7 +116,6 @@
#else #else
#define EVERY_ACCESS 0 /* LEAVE THESE ALONE--CHANGE THE ONES ABOVE */ #define EVERY_ACCESS 0 /* LEAVE THESE ALONE--CHANGE THE ONES ABOVE */
#define ERRORS_ONLY 0 #define ERRORS_ONLY 0
#define DEBUG_DETECT 0
#define DEBUG_MESSAGES 0 #define DEBUG_MESSAGES 0
#define DEBUG_ABORT 0 #define DEBUG_ABORT 0
#define DEBUG_RESET 0 #define DEBUG_RESET 0
...@@ -432,6 +428,7 @@ static int fd_mcs_detect(Scsi_Host_Template * tpnt) ...@@ -432,6 +428,7 @@ static int fd_mcs_detect(Scsi_Host_Template * tpnt)
FIFO_COUNT = user_fifo_count ? user_fifo_count : fd_mcs_adapters[loop].fifo_count; FIFO_COUNT = user_fifo_count ? user_fifo_count : fd_mcs_adapters[loop].fifo_count;
FIFO_Size = user_fifo_size ? user_fifo_size : fd_mcs_adapters[loop].fifo_size; FIFO_Size = user_fifo_size ? user_fifo_size : fd_mcs_adapters[loop].fifo_size;
/* FIXME: Do we need to keep this bit of code inside NOT_USED around at all? */
#ifdef NOT_USED #ifdef NOT_USED
/* *************************************************** */ /* *************************************************** */
/* Try to toggle 32-bit mode. This only /* Try to toggle 32-bit mode. This only
...@@ -510,59 +507,6 @@ static int fd_mcs_detect(Scsi_Host_Template * tpnt) ...@@ -510,59 +507,6 @@ static int fd_mcs_detect(Scsi_Host_Template * tpnt)
outb(0, SCSI_Mode_Cntl_port); outb(0, SCSI_Mode_Cntl_port);
outb(PARITY_MASK, TMC_Cntl_port); outb(PARITY_MASK, TMC_Cntl_port);
/* done reset */ /* done reset */
#if DO_DETECT
/* scan devices attached */
{
const int buflen = 255;
int i, j, retcode;
Scsi_Cmnd SCinit;
unsigned char do_inquiry[] = { INQUIRY, 0, 0, 0, buflen, 0 };
unsigned char do_request_sense[] = { REQUEST_SENSE,
0, 0, 0, buflen, 0
};
unsigned char do_read_capacity[] = { READ_CAPACITY,
0, 0, 0, 0, 0, 0, 0, 0, 0
};
unsigned char buf[buflen];
SCinit.request_buffer = SCinit.buffer = buf;
SCinit.request_bufflen = SCinit.bufflen = sizeof(buf) - 1;
SCinit.use_sg = 0;
SCinit.lun = 0;
SCinit.host = shpnt;
printk("fd_mcs: detection routine scanning for devices:\n");
for (i = 0; i < 8; i++) {
if (i == shpnt->this_id) /* Skip host adapter */
continue;
SCinit.target = i;
memcpy(SCinit.cmnd, do_request_sense, sizeof(do_request_sense));
retcode = fd_mcs_command(&SCinit);
if (!retcode) {
memcpy(SCinit.cmnd, do_inquiry, sizeof(do_inquiry));
retcode = fd_mcs_command(&SCinit);
if (!retcode) {
printk(" SCSI ID %d: ", i);
for (j = 8; j < (buf[4] < 32 ? buf[4] : 32); j++)
printk("%c", buf[j] >= 20 ? buf[j] : ' ');
memcpy(SCinit.cmnd, do_read_capacity, sizeof(do_read_capacity));
retcode = fd_mcs_command(&SCinit);
if (!retcode) {
unsigned long blocks, size, capacity;
blocks = (buf[0] << 24) | (buf[1] << 16)
| (buf[2] << 8) | buf[3];
size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
capacity = +(+(blocks / 1024L) * +(size * 10L)) / 1024L;
printk("%lu MB (%lu byte blocks)\n", ((capacity + 5L) / 10L), size);
}
}
}
}
}
#endif
} }
} }
......
/* fd_mcs.h -- Header for Future Domain MCS 600/700 (or IBM OEM) driver
*
* fd_mcs.h v0.2 03/11/1998 ZP Gu (zpg@castle.net)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _FD_MCS_H
#define _FD_MCS_H
static int fd_mcs_detect(Scsi_Host_Template *);
static int fd_mcs_release(struct Scsi_Host *);
static int fd_mcs_command(Scsi_Cmnd *);
static int fd_mcs_abort(Scsi_Cmnd *);
static int fd_mcs_bus_reset(Scsi_Cmnd *);
static int fd_mcs_device_reset(Scsi_Cmnd *);
static int fd_mcs_host_reset(Scsi_Cmnd *);
static int fd_mcs_queue(Scsi_Cmnd *, void (*done) (Scsi_Cmnd *));
static int fd_mcs_biosparam(struct scsi_device *, struct block_device *,
sector_t, int *);
static const char *fd_mcs_info(struct Scsi_Host *);
#endif /* _FD_MCS_H */
This diff is collapsed.
#ifndef IRQ_HANDLED
typedef void irqreturn_t;
#define IRQ_NONE
#define IRQ_HANDLED
#endif
#ifndef MODULE_LICENSE
#define MODULE_LICENSE(x)
#endif
#ifndef SERVICE_ACTION_IN
#define SERVICE_ACTION_IN 0x9e
#endif
#ifndef READ_16
#define READ_16 0x88
#endif
#ifndef WRITE_16
#define WRITE_16 0x8a
#endif
This diff is collapsed.
...@@ -14,14 +14,10 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length, ...@@ -14,14 +14,10 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
static void gdth_do_req(Scsi_Request *srp, gdth_cmd_str *cmd, static void gdth_do_req(Scsi_Request *srp, gdth_cmd_str *cmd,
char *cmnd, int timeout); char *cmnd, int timeout);
static int gdth_set_asc_info(char *buffer,int length,int hanum,Scsi_Request *scp); static int gdth_set_asc_info(char *buffer,int length,int hanum,Scsi_Request *scp);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
static void gdth_do_cmd(Scsi_Cmnd *scp, gdth_cmd_str *cmd,
char *cmnd, int timeout);
static int gdth_set_asc_info(char *buffer,int length,int hanum,Scsi_Cmnd *scp);
#else #else
static void gdth_do_cmd(Scsi_Cmnd *scp, gdth_cmd_str *cmd, static void gdth_do_cmd(Scsi_Cmnd *scp, gdth_cmd_str *cmd,
char *cmnd, int timeout); char *cmnd, int timeout);
static int gdth_set_asc_info(char *buffer,int length,int hanum,Scsi_Cmnd scp); static int gdth_set_asc_info(char *buffer,int length,int hanum,Scsi_Cmnd *scp);
#endif #endif
static char *gdth_ioctl_alloc(int hanum, int size, int scratch, static char *gdth_ioctl_alloc(int hanum, int size, int scratch,
......
...@@ -87,7 +87,7 @@ static int max_channel = 3; ...@@ -87,7 +87,7 @@ static int max_channel = 3;
static int init_timeout = 5; static int init_timeout = 5;
static int max_requests = 50; static int max_requests = 50;
#define IBMVSCSI_VERSION "1.5.1" #define IBMVSCSI_VERSION "1.5.5"
MODULE_DESCRIPTION("IBM Virtual SCSI"); MODULE_DESCRIPTION("IBM Virtual SCSI");
MODULE_AUTHOR("Dave Boutcher"); MODULE_AUTHOR("Dave Boutcher");
...@@ -256,6 +256,7 @@ static void init_event_struct(struct srp_event_struct *evt_struct, ...@@ -256,6 +256,7 @@ static void init_event_struct(struct srp_event_struct *evt_struct,
{ {
evt_struct->cmnd = NULL; evt_struct->cmnd = NULL;
evt_struct->cmnd_done = NULL; evt_struct->cmnd_done = NULL;
evt_struct->sync_srp = NULL;
evt_struct->crq.format = format; evt_struct->crq.format = format;
evt_struct->crq.timeout = timeout; evt_struct->crq.timeout = timeout;
evt_struct->done = done; evt_struct->done = done;
...@@ -467,7 +468,7 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd, ...@@ -467,7 +468,7 @@ static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
struct ibmvscsi_host_data *hostdata) struct ibmvscsi_host_data *hostdata)
{ {
struct scsi_cmnd *cmnd = evt_struct->cmnd; struct scsi_cmnd *cmnd;
u64 *crq_as_u64 = (u64 *) &evt_struct->crq; u64 *crq_as_u64 = (u64 *) &evt_struct->crq;
int rc; int rc;
...@@ -479,23 +480,16 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, ...@@ -479,23 +480,16 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
if ((evt_struct->crq.format == VIOSRP_SRP_FORMAT) && if ((evt_struct->crq.format == VIOSRP_SRP_FORMAT) &&
(atomic_dec_if_positive(&hostdata->request_limit) < 0)) { (atomic_dec_if_positive(&hostdata->request_limit) < 0)) {
/* See if the adapter is disabled */ /* See if the adapter is disabled */
if (atomic_read(&hostdata->request_limit) < 0) { if (atomic_read(&hostdata->request_limit) < 0)
if (cmnd) goto send_error;
cmnd->result = DID_ERROR << 16;
if (evt_struct->cmnd_done) printk(KERN_WARNING
evt_struct->cmnd_done(cmnd); "ibmvscsi: Warning, request_limit exceeded\n");
unmap_cmd_data(&evt_struct->iu.srp.cmd,
hostdata->dev);
free_event_struct(&hostdata->pool, evt_struct);
return 0;
} else {
printk("ibmvscsi: Warning, request_limit exceeded\n");
unmap_cmd_data(&evt_struct->iu.srp.cmd, unmap_cmd_data(&evt_struct->iu.srp.cmd,
hostdata->dev); hostdata->dev);
free_event_struct(&hostdata->pool, evt_struct); free_event_struct(&hostdata->pool, evt_struct);
return SCSI_MLQUEUE_HOST_BUSY; return SCSI_MLQUEUE_HOST_BUSY;
} }
}
/* Copy the IU into the transfer area */ /* Copy the IU into the transfer area */
*evt_struct->xfer_iu = evt_struct->iu; *evt_struct->xfer_iu = evt_struct->iu;
...@@ -511,17 +505,23 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, ...@@ -511,17 +505,23 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
list_del(&evt_struct->list); list_del(&evt_struct->list);
cmnd = evt_struct->cmnd;
printk(KERN_ERR "ibmvscsi: failed to send event struct rc %d\n", printk(KERN_ERR "ibmvscsi: failed to send event struct rc %d\n",
rc); rc);
goto send_error;
}
return 0;
send_error:
unmap_cmd_data(&evt_struct->iu.srp.cmd, hostdata->dev); unmap_cmd_data(&evt_struct->iu.srp.cmd, hostdata->dev);
free_event_struct(&hostdata->pool, evt_struct);
if (cmnd) if ((cmnd = evt_struct->cmnd) != NULL) {
cmnd->result = DID_ERROR << 16; cmnd->result = DID_ERROR << 16;
if (evt_struct->cmnd_done)
evt_struct->cmnd_done(cmnd); evt_struct->cmnd_done(cmnd);
} } else if (evt_struct->done)
evt_struct->done(evt_struct);
free_event_struct(&hostdata->pool, evt_struct);
return 0; return 0;
} }
...@@ -537,6 +537,13 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct) ...@@ -537,6 +537,13 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp; struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;
struct scsi_cmnd *cmnd = evt_struct->cmnd; struct scsi_cmnd *cmnd = evt_struct->cmnd;
if (unlikely(rsp->type != SRP_RSP_TYPE)) {
if (printk_ratelimit())
printk(KERN_WARNING
"ibmvscsi: bad SRP RSP type %d\n",
rsp->type);
}
if (cmnd) { if (cmnd) {
cmnd->result = rsp->status; cmnd->result = rsp->status;
if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION) if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
...@@ -641,11 +648,16 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct) ...@@ -641,11 +648,16 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct)
evt_struct->xfer_iu->mad.adapter_info.common.status); evt_struct->xfer_iu->mad.adapter_info.common.status);
} else { } else {
printk("ibmvscsi: host srp version: %s, " printk("ibmvscsi: host srp version: %s, "
"host partition %s (%d), OS %d\n", "host partition %s (%d), OS %d, max io %u\n",
hostdata->madapter_info.srp_version, hostdata->madapter_info.srp_version,
hostdata->madapter_info.partition_name, hostdata->madapter_info.partition_name,
hostdata->madapter_info.partition_number, hostdata->madapter_info.partition_number,
hostdata->madapter_info.os_type); hostdata->madapter_info.os_type,
hostdata->madapter_info.port_max_txu[0]);
if (hostdata->madapter_info.port_max_txu[0])
hostdata->host->max_sectors =
hostdata->madapter_info.port_max_txu[0] >> 9;
} }
} }
...@@ -796,6 +808,10 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) ...@@ -796,6 +808,10 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
*/ */
static void sync_completion(struct srp_event_struct *evt_struct) static void sync_completion(struct srp_event_struct *evt_struct)
{ {
/* copy the response back */
if (evt_struct->sync_srp)
*evt_struct->sync_srp = *evt_struct->xfer_iu;
complete(&evt_struct->comp); complete(&evt_struct->comp);
} }
...@@ -810,6 +826,8 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) ...@@ -810,6 +826,8 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
struct srp_tsk_mgmt *tsk_mgmt; struct srp_tsk_mgmt *tsk_mgmt;
struct srp_event_struct *evt; struct srp_event_struct *evt;
struct srp_event_struct *tmp_evt, *found_evt; struct srp_event_struct *tmp_evt, *found_evt;
union viosrp_iu srp_rsp;
int rsp_rc;
u16 lun = lun_from_dev(cmd->device); u16 lun = lun_from_dev(cmd->device);
/* First, find this command in our sent list so we can figure /* First, find this command in our sent list so we can figure
...@@ -849,6 +867,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) ...@@ -849,6 +867,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n", printk(KERN_INFO "ibmvscsi: aborting command. lun 0x%lx, tag 0x%lx\n",
tsk_mgmt->lun, tsk_mgmt->managed_task_tag); tsk_mgmt->lun, tsk_mgmt->managed_task_tag);
evt->sync_srp = &srp_rsp;
init_completion(&evt->comp); init_completion(&evt->comp);
if (ibmvscsi_send_srp_event(evt, hostdata) != 0) { if (ibmvscsi_send_srp_event(evt, hostdata) != 0) {
printk(KERN_ERR "ibmvscsi: failed to send abort() event\n"); printk(KERN_ERR "ibmvscsi: failed to send abort() event\n");
...@@ -859,6 +878,29 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) ...@@ -859,6 +878,29 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
wait_for_completion(&evt->comp); wait_for_completion(&evt->comp);
spin_lock_irq(hostdata->host->host_lock); spin_lock_irq(hostdata->host->host_lock);
/* make sure we got a good response */
if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) {
if (printk_ratelimit())
printk(KERN_WARNING
"ibmvscsi: abort bad SRP RSP type %d\n",
srp_rsp.srp.generic.type);
return FAILED;
}
if (srp_rsp.srp.rsp.rspvalid)
rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data);
else
rsp_rc = srp_rsp.srp.rsp.status;
if (rsp_rc) {
if (printk_ratelimit())
printk(KERN_WARNING
"ibmvscsi: abort code %d for task tag 0x%lx\n",
rsp_rc,
tsk_mgmt->managed_task_tag);
return FAILED;
}
/* Because we dropped the spinlock above, it's possible /* Because we dropped the spinlock above, it's possible
* The event is no longer in our list. Make sure it didn't * The event is no longer in our list. Make sure it didn't
* complete while we were aborting * complete while we were aborting
...@@ -871,12 +913,16 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) ...@@ -871,12 +913,16 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
} }
} }
if (found_evt == NULL) {
printk(KERN_INFO printk(KERN_INFO
"ibmvscsi: successfully aborted task tag 0x%lx\n", "ibmvscsi: aborted task tag 0x%lx completed\n",
tsk_mgmt->managed_task_tag); tsk_mgmt->managed_task_tag);
if (found_evt == NULL)
return SUCCESS; return SUCCESS;
}
printk(KERN_INFO
"ibmvscsi: successfully aborted task tag 0x%lx\n",
tsk_mgmt->managed_task_tag);
cmd->result = (DID_ABORT << 16); cmd->result = (DID_ABORT << 16);
list_del(&found_evt->list); list_del(&found_evt->list);
...@@ -899,6 +945,8 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) ...@@ -899,6 +945,8 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
struct srp_tsk_mgmt *tsk_mgmt; struct srp_tsk_mgmt *tsk_mgmt;
struct srp_event_struct *evt; struct srp_event_struct *evt;
struct srp_event_struct *tmp_evt, *pos; struct srp_event_struct *tmp_evt, *pos;
union viosrp_iu srp_rsp;
int rsp_rc;
u16 lun = lun_from_dev(cmd->device); u16 lun = lun_from_dev(cmd->device);
evt = get_event_struct(&hostdata->pool); evt = get_event_struct(&hostdata->pool);
...@@ -923,6 +971,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) ...@@ -923,6 +971,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n", printk(KERN_INFO "ibmvscsi: resetting device. lun 0x%lx\n",
tsk_mgmt->lun); tsk_mgmt->lun);
evt->sync_srp = &srp_rsp;
init_completion(&evt->comp); init_completion(&evt->comp);
if (ibmvscsi_send_srp_event(evt, hostdata) != 0) { if (ibmvscsi_send_srp_event(evt, hostdata) != 0) {
printk(KERN_ERR "ibmvscsi: failed to send reset event\n"); printk(KERN_ERR "ibmvscsi: failed to send reset event\n");
...@@ -933,6 +982,29 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) ...@@ -933,6 +982,29 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
wait_for_completion(&evt->comp); wait_for_completion(&evt->comp);
spin_lock_irq(hostdata->host->host_lock); spin_lock_irq(hostdata->host->host_lock);
/* make sure we got a good response */
if (unlikely(srp_rsp.srp.generic.type != SRP_RSP_TYPE)) {
if (printk_ratelimit())
printk(KERN_WARNING
"ibmvscsi: reset bad SRP RSP type %d\n",
srp_rsp.srp.generic.type);
return FAILED;
}
if (srp_rsp.srp.rsp.rspvalid)
rsp_rc = *((int *)srp_rsp.srp.rsp.sense_and_response_data);
else
rsp_rc = srp_rsp.srp.rsp.status;
if (rsp_rc) {
if (printk_ratelimit())
printk(KERN_WARNING
"ibmvscsi: reset code %d for task tag 0x%lx\n",
rsp_rc,
tsk_mgmt->managed_task_tag);
return FAILED;
}
/* We need to find all commands for this LUN that have not yet been /* We need to find all commands for this LUN that have not yet been
* responded to, and fail them with DID_RESET * responded to, and fail them with DID_RESET
*/ */
...@@ -1048,6 +1120,13 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq, ...@@ -1048,6 +1120,13 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
return; return;
} }
if (atomic_read(&evt_struct->free)) {
printk(KERN_ERR
"ibmvscsi: received duplicate correlation_token 0x%p!\n",
(void *)crq->IU_data_ptr);
return;
}
if (crq->format == VIOSRP_SRP_FORMAT) if (crq->format == VIOSRP_SRP_FORMAT)
atomic_add(evt_struct->xfer_iu->srp.rsp.request_limit_delta, atomic_add(evt_struct->xfer_iu->srp.rsp.request_limit_delta,
&hostdata->request_limit); &hostdata->request_limit);
...@@ -1295,6 +1374,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) ...@@ -1295,6 +1374,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
hostdata->host = host; hostdata->host = host;
hostdata->dev = dev; hostdata->dev = dev;
atomic_set(&hostdata->request_limit, -1); atomic_set(&hostdata->request_limit, -1);
hostdata->host->max_sectors = 32 * 8; /* default max I/O 32 pages */
if (ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, if (ibmvscsi_init_crq_queue(&hostdata->queue, hostdata,
max_requests) != 0) { max_requests) != 0) {
...@@ -1326,7 +1406,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id) ...@@ -1326,7 +1406,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
*/ */
for (wait_switch = jiffies + (init_timeout * HZ); for (wait_switch = jiffies + (init_timeout * HZ);
time_before(jiffies, wait_switch) && time_before(jiffies, wait_switch) &&
atomic_read(&hostdata->request_limit) < 0;) { atomic_read(&hostdata->request_limit) < 2;) {
msleep(10); msleep(10);
} }
......
...@@ -67,6 +67,7 @@ struct srp_event_struct { ...@@ -67,6 +67,7 @@ struct srp_event_struct {
union viosrp_iu iu; union viosrp_iu iu;
void (*cmnd_done) (struct scsi_cmnd *); void (*cmnd_done) (struct scsi_cmnd *);
struct completion comp; struct completion comp;
union viosrp_iu *sync_srp;
}; };
/* a pool of event structs for use */ /* a pool of event structs for use */
......
...@@ -2610,23 +2610,19 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; }; ...@@ -2610,23 +2610,19 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
#endif #endif
/** /**
* ipr_store_queue_depth - Change the device's queue depth * ipr_change_queue_depth - Change the device's queue depth
* @dev: device struct * @sdev: scsi device struct
* @buf: buffer * @qdepth: depth to set
* *
* Return value: * Return value:
* number of bytes printed to buffer * actual depth set
**/ **/
static ssize_t ipr_store_queue_depth(struct device *dev, static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
const char *buf, size_t count)
{ {
struct scsi_device *sdev = to_scsi_device(dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata; struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
struct ipr_resource_entry *res; struct ipr_resource_entry *res;
int qdepth = simple_strtoul(buf, NULL, 10);
int tagged = 0; int tagged = 0;
unsigned long lock_flags = 0; unsigned long lock_flags = 0;
ssize_t len = -ENXIO;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
res = (struct ipr_resource_entry *)sdev->hostdata; res = (struct ipr_resource_entry *)sdev->hostdata;
...@@ -2635,23 +2631,13 @@ static ssize_t ipr_store_queue_depth(struct device *dev, ...@@ -2635,23 +2631,13 @@ static ssize_t ipr_store_queue_depth(struct device *dev,
if (ipr_is_gscsi(res) && res->tcq_active) if (ipr_is_gscsi(res) && res->tcq_active)
tagged = MSG_ORDERED_TAG; tagged = MSG_ORDERED_TAG;
len = strlen(buf);
} }
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
scsi_adjust_queue_depth(sdev, tagged, qdepth); scsi_adjust_queue_depth(sdev, tagged, qdepth);
return len; return qdepth;
} }
static struct device_attribute ipr_queue_depth_attr = {
.attr = {
.name = "queue_depth",
.mode = S_IRUSR | S_IWUSR,
},
.store = ipr_store_queue_depth
};
/** /**
* ipr_show_tcq_enable - Show if the device is enabled for tcqing * ipr_show_tcq_enable - Show if the device is enabled for tcqing
* @dev: device struct * @dev: device struct
...@@ -2760,7 +2746,6 @@ static struct device_attribute ipr_adapter_handle_attr = { ...@@ -2760,7 +2746,6 @@ static struct device_attribute ipr_adapter_handle_attr = {
}; };
static struct device_attribute *ipr_dev_attrs[] = { static struct device_attribute *ipr_dev_attrs[] = {
&ipr_queue_depth_attr,
&ipr_tcqing_attr, &ipr_tcqing_attr,
&ipr_adapter_handle_attr, &ipr_adapter_handle_attr,
NULL, NULL,
...@@ -3961,6 +3946,7 @@ static struct scsi_host_template driver_template = { ...@@ -3961,6 +3946,7 @@ static struct scsi_host_template driver_template = {
.slave_alloc = ipr_slave_alloc, .slave_alloc = ipr_slave_alloc,
.slave_configure = ipr_slave_configure, .slave_configure = ipr_slave_configure,
.slave_destroy = ipr_slave_destroy, .slave_destroy = ipr_slave_destroy,
.change_queue_depth = ipr_change_queue_depth,
.bios_param = ipr_biosparam, .bios_param = ipr_biosparam,
.can_queue = IPR_MAX_COMMANDS, .can_queue = IPR_MAX_COMMANDS,
.this_id = -1, .this_id = -1,
......
...@@ -54,13 +54,33 @@ ...@@ -54,13 +54,33 @@
#include <scsi/scsi_transport.h> #include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_spi.h> #include <scsi/scsi_transport_spi.h>
#include "lasi700.h"
#include "53c700.h" #include "53c700.h"
MODULE_AUTHOR("James Bottomley"); MODULE_AUTHOR("James Bottomley");
MODULE_DESCRIPTION("lasi700 SCSI Driver"); MODULE_DESCRIPTION("lasi700 SCSI Driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
#define LASI_700_SVERSION 0x00071
#define LASI_710_SVERSION 0x00082
#define LASI700_ID_TABLE { \
.hw_type = HPHW_FIO, \
.sversion = LASI_700_SVERSION, \
.hversion = HVERSION_ANY_ID, \
.hversion_rev = HVERSION_REV_ANY_ID, \
}
#define LASI710_ID_TABLE { \
.hw_type = HPHW_FIO, \
.sversion = LASI_710_SVERSION, \
.hversion = HVERSION_ANY_ID, \
.hversion_rev = HVERSION_REV_ANY_ID, \
}
#define LASI700_CLOCK 25
#define LASI710_CLOCK 40
#define LASI_SCSI_CORE_OFFSET 0x100
static struct parisc_device_id lasi700_ids[] = { static struct parisc_device_id lasi700_ids[] = {
LASI700_ID_TABLE, LASI700_ID_TABLE,
LASI710_ID_TABLE, LASI710_ID_TABLE,
......
/* -*- mode: c; c-basic-offset: 8 -*- */
/* PARISC LASI driver for the 53c700 chip
*
* Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
**-----------------------------------------------------------------------------
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License as published by
** the Free Software Foundation; either version 2 of the License, or
** (at your option) any later version.
**
** This program is distributed in the hope that it will be useful,
** but WITHOUT ANY WARRANTY; without even the implied warranty of
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
** GNU General Public License for more details.
**
** You should have received a copy of the GNU General Public License
** along with this program; if not, write to the Free Software
** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
**
**-----------------------------------------------------------------------------
*/
#ifndef _LASI700_H
#define _LASI700_H
#define LASI_710_SVERSION 0x082
#define LASI_700_SVERSION 0x071
#define LASI700_ID_TABLE { \
.hw_type = HPHW_FIO, \
.sversion = LASI_700_SVERSION, \
.hversion = HVERSION_ANY_ID, \
.hversion_rev = HVERSION_REV_ANY_ID, \
}
#define LASI710_ID_TABLE { \
.hw_type = HPHW_FIO, \
.sversion = LASI_710_SVERSION, \
.hversion = HVERSION_ANY_ID, \
.hversion_rev = HVERSION_REV_ANY_ID, \
}
#define LASI700_CLOCK 25
#define LASI710_CLOCK 40
#define LASI_SCSI_CORE_OFFSET 0x100
#endif
This diff is collapsed.
/* /*
* $Header: /cvsroot/osst/Driver/osst.h,v 1.14 2003/12/14 14:34:38 wriede Exp $ * $Header: /cvsroot/osst/Driver/osst.h,v 1.16 2005/01/01 21:13:35 wriede Exp $
*/ */
#include <asm/byteorder.h> #include <asm/byteorder.h>
...@@ -70,7 +70,7 @@ typedef struct { ...@@ -70,7 +70,7 @@ typedef struct {
#define BLOCK_SIZE_PAGE_LENGTH 4 #define BLOCK_SIZE_PAGE_LENGTH 4
#define BUFFER_FILLING_PAGE 0x33 #define BUFFER_FILLING_PAGE 0x33
#define BUFFER_FILLING_PAGE_LENGTH #define BUFFER_FILLING_PAGE_LENGTH 4
#define VENDOR_IDENT_PAGE 0x36 #define VENDOR_IDENT_PAGE 0x36
#define VENDOR_IDENT_PAGE_LENGTH 8 #define VENDOR_IDENT_PAGE_LENGTH 8
...@@ -508,7 +508,7 @@ typedef struct os_header_s { ...@@ -508,7 +508,7 @@ typedef struct os_header_s {
//#define OSST_MAX_SG 2 //#define OSST_MAX_SG 2
/* The OnStream tape buffer descriptor. */ /* The OnStream tape buffer descriptor. */
typedef struct { struct osst_buffer {
unsigned char in_use; unsigned char in_use;
unsigned char dma; /* DMA-able buffer */ unsigned char dma; /* DMA-able buffer */
int buffer_size; int buffer_size;
...@@ -525,16 +525,16 @@ typedef struct { ...@@ -525,16 +525,16 @@ typedef struct {
unsigned short sg_segs; /* number of segments in s/g list */ unsigned short sg_segs; /* number of segments in s/g list */
unsigned short orig_sg_segs; /* number of segments allocated at first try */ unsigned short orig_sg_segs; /* number of segments allocated at first try */
struct scatterlist sg[1]; /* MUST BE last item */ struct scatterlist sg[1]; /* MUST BE last item */
} OSST_buffer; } ;
/* The OnStream tape drive descriptor */ /* The OnStream tape drive descriptor */
typedef struct { struct osst_tape {
struct scsi_driver *driver; struct scsi_driver *driver;
unsigned capacity; unsigned capacity;
Scsi_Device* device; Scsi_Device* device;
struct semaphore lock; /* for serialization */ struct semaphore lock; /* for serialization */
struct completion wait; /* for SCSI commands */ struct completion wait; /* for SCSI commands */
OSST_buffer * buffer; struct osst_buffer * buffer;
/* Drive characteristics */ /* Drive characteristics */
unsigned char omit_blklims; unsigned char omit_blklims;
...@@ -577,6 +577,7 @@ typedef struct { ...@@ -577,6 +577,7 @@ typedef struct {
int min_block; int min_block;
int max_block; int max_block;
int recover_count; /* from tape opening */ int recover_count; /* from tape opening */
int abort_count;
int write_count; int write_count;
int read_count; int read_count;
int recover_erreg; /* from last status call */ int recover_erreg; /* from last status call */
...@@ -623,7 +624,7 @@ typedef struct { ...@@ -623,7 +624,7 @@ typedef struct {
unsigned char last_sense[16]; unsigned char last_sense[16];
#endif #endif
struct gendisk *drive; struct gendisk *drive;
} OS_Scsi_Tape; } ;
/* Values of write_type */ /* Values of write_type */
#define OS_WRITE_DATA 0 #define OS_WRITE_DATA 0
......
...@@ -3400,7 +3400,8 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) ...@@ -3400,7 +3400,8 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
sp->flags |= SRB_SENT; sp->flags |= SRB_SENT;
ha->actthreads++; ha->actthreads++;
WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index); WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
(void) RD_REG_WORD(&reg->mailbox4); /* PCI posted write flush */ /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
mmiowb();
out: out:
if (status) if (status)
...@@ -3668,7 +3669,8 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) ...@@ -3668,7 +3669,8 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
sp->flags |= SRB_SENT; sp->flags |= SRB_SENT;
ha->actthreads++; ha->actthreads++;
WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index); WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
(void) RD_REG_WORD(&reg->mailbox4); /* PCI posted write flush */ /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
mmiowb();
out: out:
if (status) if (status)
...@@ -3778,9 +3780,21 @@ qla1280_isp_cmd(struct scsi_qla_host *ha) ...@@ -3778,9 +3780,21 @@ qla1280_isp_cmd(struct scsi_qla_host *ha)
} else } else
ha->request_ring_ptr++; ha->request_ring_ptr++;
/* Set chip new ring index. */ /*
* Update request index to mailbox4 (Request Queue In).
* The mmiowb() ensures that this write is ordered with writes by other
* CPUs. Without the mmiowb(), it is possible for the following:
* CPUA posts write of index 5 to mailbox4
* CPUA releases host lock
* CPUB acquires host lock
* CPUB posts write of index 6 to mailbox4
* On PCI bus, order reverses and write of 6 posts, then index 5,
* causing chip to issue full queue of stale commands
* The mmiowb() prevents future writes from crossing the barrier.
* See Documentation/DocBook/deviceiobook.tmpl for more information.
*/
WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index); WRT_REG_WORD(&reg->mailbox4, ha->req_ring_index);
(void) RD_REG_WORD(&reg->mailbox4); /* PCI posted write flush */ mmiowb();
LEAVE("qla1280_isp_cmd"); LEAVE("qla1280_isp_cmd");
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -1082,9 +1082,12 @@ sd_read_capacity(struct scsi_disk *sdkp, char *diskname, ...@@ -1082,9 +1082,12 @@ sd_read_capacity(struct scsi_disk *sdkp, char *diskname,
" READ CAPACITY(16).\n", diskname); " READ CAPACITY(16).\n", diskname);
longrc = 1; longrc = 1;
goto repeat; goto repeat;
} else {
printk(KERN_ERR "%s: too big for kernel. Assuming maximum 2Tb\n", diskname);
} }
printk(KERN_ERR "%s: too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n", diskname);
sdkp->capacity = 0;
goto got_data;
} }
sdkp->capacity = 1 + (((sector_t)buffer[0] << 24) | sdkp->capacity = 1 + (((sector_t)buffer[0] << 24) |
(buffer[1] << 16) | (buffer[1] << 16) |
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -170,7 +170,7 @@ static void *__sym_calloc2(m_pool_p mp, int size, char *name, int uflags) ...@@ -170,7 +170,7 @@ static void *__sym_calloc2(m_pool_p mp, int size, char *name, int uflags)
} }
if (p) if (p)
bzero(p, size); memset(p, 0, size);
else if (uflags & SYM_MEM_WARN) else if (uflags & SYM_MEM_WARN)
printf ("__sym_calloc2: failed to allocate %s[%d]\n", name, size); printf ("__sym_calloc2: failed to allocate %s[%d]\n", name, size);
return p; return p;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment