Commit f10378ff authored by Markus Lidel's avatar Markus Lidel Committed by Linus Torvalds

[PATCH] I2O: new sysfs attributes and Adaptec specific block device access and 64-bit DMA support

Changes:
 - Added Bus-OSM which could be used by user space programs to reset a
   channel on the controller
 - Make ioctl's in Config-OSM obsolete in prefer for sysfs attributes and
   move those to its own file
 - Added sysfs attribute for firmware read and write access for I2O
   controllers
 - Added special handling of firmware read and write access for Adaptec
   controllers
 - Added vendor id and product id as sysfs-attribute to Executive classes
 - Added automatic notification of LCT change handling to Exec-OSM
 - Added flushing function to Block-OSM for later barrier implementation
 - Use PRIVATE messages for Block access on Adaptec controllers, which are
   faster then BLOCK class access
 - Cleaned up support for Promise controller
 - New messages are now detected using the IRQ status register as
   suggested by the I2O spec
 - Added i2o_dma_high() and i2o_dma_low() functions
 - Added facility for SG tablesize calculation when using 32-bit and
   64-bit DMA addresses
 - Added i2o_dma_map_single() and i2o_dma_map_sg() which could build the
   SG list for 32-bit as well as 64-bit DMA addresses
Signed-off-by: default avatarMarkus Lidel <Markus.Lidel@shadowconnect.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent f88e119c
......@@ -35,6 +35,24 @@ config I2O_CONFIG
To compile this support as a module, choose M here: the
module will be called i2o_config.
config I2O_CONFIG_OLD_IOCTL
bool "Enable ioctls (OBSOLETE)"
depends on I2O_CONFIG
default y
---help---
Enables old ioctls.
config I2O_BUS
tristate "I2O Bus Adapter OSM"
depends on I2O
---help---
Include support for the I2O Bus Adapter OSM. The Bus Adapter OSM
provides access to the busses on the I2O controller. The main purpose
is to rescan the bus to find new devices.
To compile this support as a module, choose M here: the
module will be called i2o_bus.
config I2O_BLOCK
tristate "I2O Block OSM"
depends on I2O
......
......@@ -6,8 +6,11 @@
#
i2o_core-y += iop.o driver.o device.o debug.o pci.o exec-osm.o
i2o_bus-y += bus-osm.o
i2o_config-y += config-osm.o
obj-$(CONFIG_I2O) += i2o_core.o
obj-$(CONFIG_I2O_CONFIG)+= i2o_config.o
obj-$(CONFIG_I2O_BUS) += i2o_bus.o
obj-$(CONFIG_I2O_BLOCK) += i2o_block.o
obj-$(CONFIG_I2O_SCSI) += i2o_scsi.o
obj-$(CONFIG_I2O_PROC) += i2o_proc.o
/*
* Bus Adapter OSM
*
* Copyright (C) 2005 Markus Lidel <Markus.Lidel@shadowconnect.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Fixes/additions:
* Markus Lidel <Markus.Lidel@shadowconnect.com>
* initial version.
*/
#include <linux/module.h>
#include <linux/i2o.h>
#define OSM_NAME "bus-osm"
#define OSM_VERSION "$Rev$"
#define OSM_DESCRIPTION "I2O Bus Adapter OSM"
static struct i2o_driver i2o_bus_driver;
/* Bus OSM class handling definition */
static struct i2o_class_id i2o_bus_class_id[] = {
{I2O_CLASS_BUS_ADAPTER},
{I2O_CLASS_END}
};
/**
* i2o_bus_scan - Scan the bus for new devices
* @dev: I2O device of the bus, which should be scanned
*
* Scans the bus dev for new / removed devices. After the scan a new LCT
* will be fetched automatically.
*
* Returns 0 on success or negative error code on failure.
*/
static int i2o_bus_scan(struct i2o_device *dev)
{
struct i2o_message __iomem *msg;
u32 m;
m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
if (m == I2O_QUEUE_EMPTY)
return -ETIMEDOUT;
writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
writel(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.tid,
&msg->u.head[1]);
return i2o_msg_post_wait(dev->iop, m, 60);
};
/**
* i2o_bus_store_scan - Scan the I2O Bus Adapter
* @d: device which should be scanned
*
* Returns count.
*/
static ssize_t i2o_bus_store_scan(struct device *d, const char *buf,
size_t count)
{
struct i2o_device *i2o_dev = to_i2o_device(d);
int rc;
if ((rc = i2o_bus_scan(i2o_dev)))
osm_warn("bus scan failed %d\n", rc);
return count;
}
/* Bus Adapter OSM device attributes */
static DEVICE_ATTR(scan, S_IWUSR, NULL, i2o_bus_store_scan);
/**
* i2o_bus_probe - verify if dev is a I2O Bus Adapter device and install it
* @dev: device to verify if it is a I2O Bus Adapter device
*
* Because we want all Bus Adapters always return 0.
*
* Returns 0.
*/
static int i2o_bus_probe(struct device *dev)
{
struct i2o_device *i2o_dev = to_i2o_device(get_device(dev));
device_create_file(dev, &dev_attr_scan);
osm_info("device added (TID: %03x)\n", i2o_dev->lct_data.tid);
return 0;
};
/**
* i2o_bus_remove - remove the I2O Bus Adapter device from the system again
* @dev: I2O Bus Adapter device which should be removed
*
* Always returns 0.
*/
static int i2o_bus_remove(struct device *dev)
{
struct i2o_device *i2o_dev = to_i2o_device(dev);
device_remove_file(dev, &dev_attr_scan);
put_device(dev);
osm_info("device removed (TID: %03x)\n", i2o_dev->lct_data.tid);
return 0;
};
/* Bus Adapter OSM driver struct */
static struct i2o_driver i2o_bus_driver = {
.name = OSM_NAME,
.classes = i2o_bus_class_id,
.driver = {
.probe = i2o_bus_probe,
.remove = i2o_bus_remove,
},
};
/**
* i2o_bus_init - Bus Adapter OSM initialization function
*
* Only register the Bus Adapter OSM in the I2O core.
*
* Returns 0 on success or negative error code on failure.
*/
static int __init i2o_bus_init(void)
{
int rc;
printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
/* Register Bus Adapter OSM into I2O core */
rc = i2o_driver_register(&i2o_bus_driver);
if (rc) {
osm_err("Could not register Bus Adapter OSM\n");
return rc;
}
return 0;
};
/**
* i2o_bus_exit - Bus Adapter OSM exit function
*
* Unregisters Bus Adapter OSM from I2O core.
*/
static void __exit i2o_bus_exit(void)
{
i2o_driver_unregister(&i2o_bus_driver);
};
MODULE_AUTHOR("Markus Lidel <Markus.Lidel@shadowconnect.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(OSM_DESCRIPTION);
MODULE_VERSION(OSM_VERSION);
module_init(i2o_bus_init);
module_exit(i2o_bus_exit);
/*
* Configuration OSM
*
* Copyright (C) 2005 Markus Lidel <Markus.Lidel@shadowconnect.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Fixes/additions:
* Markus Lidel <Markus.Lidel@shadowconnect.com>
* initial version.
*/
#include <linux/module.h>
#include <linux/i2o.h>
#include <linux/namei.h>
#include <asm/uaccess.h>
#define OSM_NAME "config-osm"
#define OSM_VERSION "1.248"
#define OSM_DESCRIPTION "I2O Configuration OSM"
/* access mode user rw */
#define S_IWRSR (S_IRUSR | S_IWUSR)
static struct i2o_driver i2o_config_driver;
/* Special file operations for sysfs */
struct fops_attribute {
struct bin_attribute bin;
struct file_operations fops;
};
/**
* sysfs_read_dummy
*/
static ssize_t sysfs_read_dummy(struct kobject *kobj, char *buf, loff_t offset,
size_t count)
{
return 0;
};
/**
* sysfs_write_dummy
*/
static ssize_t sysfs_write_dummy(struct kobject *kobj, char *buf, loff_t offset,
size_t count)
{
return 0;
};
/**
* sysfs_create_fops_file - Creates attribute with special file operations
* @kobj: kobject which should contains the attribute
* @attr: attributes which should be used to create file
*
* First creates attribute @attr in kobject @kobj. If it is the first time
* this function is called, merge old fops from sysfs with new one and
* write it back. Afterwords the new fops will be set for the created
* attribute.
*
* Returns 0 on success or negative error code on failure.
*/
static int sysfs_create_fops_file(struct kobject *kobj,
struct fops_attribute *attr)
{
struct file_operations tmp, *fops;
struct dentry *d;
struct qstr qstr;
int rc;
fops = &attr->fops;
if (fops->read)
attr->bin.read = sysfs_read_dummy;
if (fops->write)
attr->bin.write = sysfs_write_dummy;
if ((rc = sysfs_create_bin_file(kobj, &attr->bin)))
return rc;
qstr.name = attr->bin.attr.name;
qstr.len = strlen(qstr.name);
qstr.hash = full_name_hash(qstr.name, qstr.len);
if ((d = lookup_hash(&qstr, kobj->dentry))) {
if (!fops->owner) {
memcpy(&tmp, d->d_inode->i_fop, sizeof(tmp));
if (fops->read)
tmp.read = fops->read;
if (fops->write)
tmp.write = fops->write;
memcpy(fops, &tmp, sizeof(tmp));
}
d->d_inode->i_fop = fops;
} else
sysfs_remove_bin_file(kobj, &attr->bin);
return -ENOENT;
};
/**
* sysfs_remove_fops_file - Remove attribute with special file operations
* @kobj: kobject which contains the attribute
* @attr: attributes which are used to create file
*
* Only wrapper arround sysfs_remove_bin_file()
*
* Returns 0 on success or negative error code on failure.
*/
static inline int sysfs_remove_fops_file(struct kobject *kobj,
struct fops_attribute *attr)
{
return sysfs_remove_bin_file(kobj, &attr->bin);
};
/**
* i2o_config_read_hrt - Returns the HRT of the controller
* @kob: kernel object handle
* @buf: buffer into which the HRT should be copied
* @off: file offset
* @count: number of bytes to read
*
* Put @count bytes starting at @off into @buf from the HRT of the I2O
* controller corresponding to @kobj.
*
* Returns number of bytes copied into buffer.
*/
static ssize_t i2o_config_read_hrt(struct kobject *kobj, char *buf,
loff_t offset, size_t count)
{
struct i2o_controller *c = kobj_to_i2o_device(kobj)->iop;
i2o_hrt *hrt = c->hrt.virt;
u32 size = (hrt->num_entries * hrt->entry_len + 2) * 4;
if (offset > size)
return 0;
if (offset + count > size)
count = size - offset;
memcpy(buf, (u8 *) hrt + offset, count);
return count;
};
/**
* i2o_config_read_lct - Returns the LCT of the controller
* @kob: kernel object handle
* @buf: buffer into which the LCT should be copied
* @off: file offset
* @count: number of bytes to read
*
* Put @count bytes starting at @off into @buf from the LCT of the I2O
* controller corresponding to @kobj.
*
* Returns number of bytes copied into buffer.
*/
static ssize_t i2o_config_read_lct(struct kobject *kobj, char *buf,
loff_t offset, size_t count)
{
struct i2o_controller *c = kobj_to_i2o_device(kobj)->iop;
u32 size = c->lct->table_size * 4;
if (offset > size)
return 0;
if (offset + count > size)
count = size - offset;
memcpy(buf, (u8 *) c->lct + offset, count);
return count;
};
#define I2O_CONFIG_SW_ATTR(_name,_mode,_type,_swid) \
static ssize_t i2o_config_##_name##_read(struct file *file, char __user *buf, size_t count, loff_t * offset) { \
return i2o_config_sw_read(file, buf, count, offset, _type, _swid); \
};\
\
static ssize_t i2o_config_##_name##_write(struct file *file, const char __user *buf, size_t count, loff_t * offset) { \
return i2o_config_sw_write(file, buf, count, offset, _type, _swid); \
}; \
\
static struct fops_attribute i2o_config_attr_##_name = { \
.bin = { .attr = { .name = __stringify(_name), .mode = _mode, \
.owner = THIS_MODULE }, \
.size = 0, }, \
.fops = { .write = i2o_config_##_name##_write, \
.read = i2o_config_##_name##_read} \
};
#ifdef CONFIG_I2O_EXT_ADAPTEC
/**
* i2o_config_dpt_reagion - Converts type and id to flash region
* @swtype: type of software module reading
* @swid: id of software which should be read
*
* Converts type and id from I2O spec to the matching region for DPT /
* Adaptec controllers.
*
* Returns region which match type and id or -1 on error.
*/
static u32 i2o_config_dpt_region(u8 swtype, u8 swid)
{
switch (swtype) {
case I2O_SOFTWARE_MODULE_IRTOS:
/*
* content: operation firmware
* region size:
* 0xbc000 for 2554, 3754, 2564, 3757
* 0x170000 for 2865
* 0x17c000 for 3966
*/
if (!swid)
return 0;
break;
case I2O_SOFTWARE_MODULE_IOP_PRIVATE:
/*
* content: BIOS and SMOR
* BIOS size: first 0x8000 bytes
* region size:
* 0x40000 for 2554, 3754, 2564, 3757
* 0x80000 for 2865, 3966
*/
if (!swid)
return 1;
break;
case I2O_SOFTWARE_MODULE_IOP_CONFIG:
switch (swid) {
case 0:
/*
* content: NVRAM defaults
* region size: 0x2000 bytes
*/
return 2;
case 1:
/*
* content: serial number
* region size: 0x2000 bytes
*/
return 3;
}
break;
}
return -1;
};
#endif
/**
* i2o_config_sw_read - Read a software module from controller
* @file: file pointer
* @buf: buffer into which the data should be copied
* @count: number of bytes to read
* @off: file offset
* @swtype: type of software module reading
* @swid: id of software which should be read
*
* Transfers @count bytes at offset @offset from IOP into buffer using
* type @swtype and id @swid as described in I2O spec.
*
* Returns number of bytes copied into buffer or error code on failure.
*/
static ssize_t i2o_config_sw_read(struct file *file, char __user * buf,
size_t count, loff_t * offset, u8 swtype,
u32 swid)
{
struct sysfs_dirent *sd = file->f_dentry->d_parent->d_fsdata;
struct kobject *kobj = sd->s_element;
struct i2o_controller *c = kobj_to_i2o_device(kobj)->iop;
u32 m, function = I2O_CMD_SW_UPLOAD;
struct i2o_dma buffer;
struct i2o_message __iomem *msg;
u32 __iomem *mptr;
int rc, status;
m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
if (m == I2O_QUEUE_EMPTY)
return -EBUSY;
mptr = &msg->body[3];
if ((rc = i2o_dma_alloc(&c->pdev->dev, &buffer, count, GFP_KERNEL))) {
i2o_msg_nop(c, m);
return rc;
}
#ifdef CONFIG_I2O_EXT_ADAPTEC
if (c->adaptec) {
mptr = &msg->body[4];
function = I2O_CMD_PRIVATE;
writel(TEN_WORD_MSG_SIZE | SGL_OFFSET_8, &msg->u.head[0]);
writel(I2O_VENDOR_DPT << 16 | I2O_DPT_FLASH_READ,
&msg->body[0]);
writel(i2o_config_dpt_region(swtype, swid), &msg->body[1]);
writel(*offset, &msg->body[2]);
writel(count, &msg->body[3]);
} else
#endif
writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]);
writel(0xD0000000 | count, mptr++);
writel(buffer.phys, mptr);
writel(function << 24 | HOST_TID << 12 | ADAPTER_TID, &msg->u.head[1]);
writel(i2o_config_driver.context, &msg->u.head[2]);
writel(0, &msg->u.head[3]);
#ifdef CONFIG_I2O_EXT_ADAPTEC
if (!c->adaptec)
#endif
{
writel((u32) swtype << 16 | (u32) 1 << 8, &msg->body[0]);
writel(0, &msg->body[1]);
writel(swid, &msg->body[2]);
}
status = i2o_msg_post_wait_mem(c, m, 60, &buffer);
if (status == I2O_POST_WAIT_OK) {
if (!(rc = copy_to_user(buf, buffer.virt, count))) {
rc = count;
*offset += count;
}
} else
rc = -EIO;
if (status != -ETIMEDOUT)
i2o_dma_free(&c->pdev->dev, &buffer);
return rc;
};
/**
* i2o_config_sw_write - Write a software module to controller
* @file: file pointer
* @buf: buffer into which the data should be copied
* @count: number of bytes to read
* @off: file offset
* @swtype: type of software module writing
* @swid: id of software which should be written
*
* Transfers @count bytes at offset @offset from buffer to IOP using
* type @swtype and id @swid as described in I2O spec.
*
* Returns number of bytes copied from buffer or error code on failure.
*/
static ssize_t i2o_config_sw_write(struct file *file, const char __user * buf,
size_t count, loff_t * offset, u8 swtype,
u32 swid)
{
struct sysfs_dirent *sd = file->f_dentry->d_parent->d_fsdata;
struct kobject *kobj = sd->s_element;
struct i2o_controller *c = kobj_to_i2o_device(kobj)->iop;
u32 m, function = I2O_CMD_SW_DOWNLOAD;
struct i2o_dma buffer;
struct i2o_message __iomem *msg;
u32 __iomem *mptr;
int rc, status;
m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
if (m == I2O_QUEUE_EMPTY)
return -EBUSY;
mptr = &msg->body[3];
if ((rc = i2o_dma_alloc(&c->pdev->dev, &buffer, count, GFP_KERNEL)))
goto nop_msg;
if ((rc = copy_from_user(buffer.virt, buf, count)))
goto free_buffer;
#ifdef CONFIG_I2O_EXT_ADAPTEC
if (c->adaptec) {
mptr = &msg->body[4];
function = I2O_CMD_PRIVATE;
writel(TEN_WORD_MSG_SIZE | SGL_OFFSET_8, &msg->u.head[0]);
writel(I2O_VENDOR_DPT << 16 | I2O_DPT_FLASH_WRITE,
&msg->body[0]);
writel(i2o_config_dpt_region(swtype, swid), &msg->body[1]);
writel(*offset, &msg->body[2]);
writel(count, &msg->body[3]);
} else
#endif
writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]);
writel(0xD4000000 | count, mptr++);
writel(buffer.phys, mptr);
writel(function << 24 | HOST_TID << 12 | ADAPTER_TID, &msg->u.head[1]);
writel(i2o_config_driver.context, &msg->u.head[2]);
writel(0, &msg->u.head[3]);
#ifdef CONFIG_I2O_EXT_ADAPTEC
if (!c->adaptec)
#endif
{
writel((u32) swtype << 16 | (u32) 1 << 8, &msg->body[0]);
writel(0, &msg->body[1]);
writel(swid, &msg->body[2]);
}
status = i2o_msg_post_wait_mem(c, m, 60, &buffer);
if (status != -ETIMEDOUT)
i2o_dma_free(&c->pdev->dev, &buffer);
if (status != I2O_POST_WAIT_OK)
return -EIO;
*offset += count;
return count;
free_buffer:
i2o_dma_free(&c->pdev->dev, &buffer);
nop_msg:
i2o_msg_nop(c, m);
return rc;
};
/* attribute for HRT in sysfs */
static struct bin_attribute i2o_config_hrt_attr = {
.attr = {
.name = "hrt",
.mode = S_IRUGO,
.owner = THIS_MODULE},
.size = 0,
.read = i2o_config_read_hrt
};
/* attribute for LCT in sysfs */
static struct bin_attribute i2o_config_lct_attr = {
.attr = {
.name = "lct",
.mode = S_IRUGO,
.owner = THIS_MODULE},
.size = 0,
.read = i2o_config_read_lct
};
/* IRTOS firmware access */
I2O_CONFIG_SW_ATTR(irtos, S_IWRSR, I2O_SOFTWARE_MODULE_IRTOS, 0);
#ifdef CONFIG_I2O_EXT_ADAPTEC
/*
* attribute for BIOS / SMOR, nvram and serial number access on DPT / Adaptec
* controllers
*/
I2O_CONFIG_SW_ATTR(bios, S_IWRSR, I2O_SOFTWARE_MODULE_IOP_PRIVATE, 0);
I2O_CONFIG_SW_ATTR(nvram, S_IWRSR, I2O_SOFTWARE_MODULE_IOP_CONFIG, 0);
I2O_CONFIG_SW_ATTR(serial, S_IWRSR, I2O_SOFTWARE_MODULE_IOP_CONFIG, 1);
#endif
/**
* i2o_config_notify_controller_add - Notify of added controller
* @c: the controller which was added
*
* If a I2O controller is added, we catch the notification to add sysfs
* entries.
*/
static void i2o_config_notify_controller_add(struct i2o_controller *c)
{
struct kobject *kobj = &c->exec->device.kobj;
sysfs_create_bin_file(kobj, &i2o_config_hrt_attr);
sysfs_create_bin_file(kobj, &i2o_config_lct_attr);
sysfs_create_fops_file(kobj, &i2o_config_attr_irtos);
#ifdef CONFIG_I2O_EXT_ADAPTEC
if (c->adaptec) {
sysfs_create_fops_file(kobj, &i2o_config_attr_bios);
sysfs_create_fops_file(kobj, &i2o_config_attr_nvram);
sysfs_create_fops_file(kobj, &i2o_config_attr_serial);
}
#endif
};
/**
* i2o_config_notify_controller_remove - Notify of removed controller
* @c: the controller which was removed
*
* If a I2O controller is removed, we catch the notification to remove the
* sysfs entries.
*/
static void i2o_config_notify_controller_remove(struct i2o_controller *c)
{
struct kobject *kobj = &c->exec->device.kobj;
#ifdef CONFIG_I2O_EXT_ADAPTEC
if (c->adaptec) {
sysfs_remove_fops_file(kobj, &i2o_config_attr_serial);
sysfs_remove_fops_file(kobj, &i2o_config_attr_nvram);
sysfs_remove_fops_file(kobj, &i2o_config_attr_bios);
}
#endif
sysfs_remove_fops_file(kobj, &i2o_config_attr_irtos);
sysfs_remove_bin_file(kobj, &i2o_config_lct_attr);
sysfs_remove_bin_file(kobj, &i2o_config_hrt_attr);
};
/* Config OSM driver struct */
static struct i2o_driver i2o_config_driver = {
.name = OSM_NAME,
.notify_controller_add = i2o_config_notify_controller_add,
.notify_controller_remove = i2o_config_notify_controller_remove
};
#ifdef CONFIG_I2O_CONFIG_OLD_IOCTL
#include "i2o_config.c"
#endif
/**
* i2o_config_init - Configuration OSM initialization function
*
* Registers Configuration OSM in the I2O core and if old ioctl's are
* compiled in initialize them.
*
* Returns 0 on success or negative error code on failure.
*/
static int __init i2o_config_init(void)
{
printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
if (i2o_driver_register(&i2o_config_driver)) {
osm_err("handler register failed.\n");
return -EBUSY;
}
#ifdef CONFIG_I2O_CONFIG_OLD_IOCTL
if (i2o_config_old_init())
i2o_driver_unregister(&i2o_config_driver);
#endif
return 0;
}
/**
* i2o_config_exit - Configuration OSM exit function
*
* If old ioctl's are compiled in exit remove them and unregisters
* Configuration OSM from I2O core.
*/
static void i2o_config_exit(void)
{
#ifdef CONFIG_I2O_CONFIG_OLD_IOCTL
i2o_config_old_exit();
#endif
i2o_driver_unregister(&i2o_config_driver);
}
MODULE_AUTHOR("Markus Lidel <Markus.Lidel@shadowconnect.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(OSM_DESCRIPTION);
MODULE_VERSION(OSM_VERSION);
module_init(i2o_config_init);
module_exit(i2o_config_exit);
......@@ -180,7 +180,13 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m)
{
struct i2o_driver *drv;
struct i2o_message __iomem *msg = i2o_msg_out_to_virt(c, m);
u32 context = readl(&msg->u.s.icntxt);
u32 context;
unsigned long flags;
if(unlikely(!msg))
return -EIO;
context = readl(&msg->u.s.icntxt);
if (unlikely(context >= i2o_max_drivers)) {
osm_warn("%s: Spurious reply to unknown driver %d\n", c->name,
......@@ -188,9 +194,9 @@ int i2o_driver_dispatch(struct i2o_controller *c, u32 m)
return -EIO;
}
spin_lock(&i2o_drivers_lock);
spin_lock_irqsave(&i2o_drivers_lock, flags);
drv = i2o_drivers[context];
spin_unlock(&i2o_drivers_lock);
spin_unlock_irqrestore(&i2o_drivers_lock, flags);
if (unlikely(!drv)) {
osm_warn("%s: Spurious reply to unknown driver %d\n", c->name,
......
......@@ -206,6 +206,7 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
u32 context)
{
struct i2o_exec_wait *wait, *tmp;
unsigned long flags;
static spinlock_t lock = SPIN_LOCK_UNLOCKED;
int rc = 1;
......@@ -216,11 +217,13 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
* already expired. Not much we can do about that except log it for
* debug purposes, increase timeout, and recompile.
*/
spin_lock(&lock);
spin_lock_irqsave(&lock, flags);
list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) {
if (wait->tcntxt == context) {
list_del(&wait->list);
spin_unlock_irqrestore(&lock, flags);
wait->m = m;
wait->msg = msg;
wait->complete = 1;
......@@ -242,13 +245,11 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
rc = -1;
}
spin_unlock(&lock);
return rc;
}
}
spin_unlock(&lock);
spin_unlock_irqrestore(&lock, flags);
osm_warn("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name,
context);
......@@ -256,6 +257,50 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
return -1;
};
/**
* i2o_exec_show_vendor_id - Displays Vendor ID of controller
* @d: device of which the Vendor ID should be displayed
* @buf: buffer into which the Vendor ID should be printed
*
* Returns number of bytes printed into buffer.
*/
static ssize_t i2o_exec_show_vendor_id(struct device *d, char *buf)
{
struct i2o_device *dev = to_i2o_device(d);
u16 id;
if (i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) {
sprintf(buf, "0x%04x", id);
return strlen(buf) + 1;
}
return 0;
};
/**
* i2o_exec_show_product_id - Displays Product ID of controller
* @d: device of which the Product ID should be displayed
* @buf: buffer into which the Product ID should be printed
*
* Returns number of bytes printed into buffer.
*/
static ssize_t i2o_exec_show_product_id(struct device *d, char *buf)
{
struct i2o_device *dev = to_i2o_device(d);
u16 id;
if (i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) {
sprintf(buf, "0x%04x", id);
return strlen(buf) + 1;
}
return 0;
};
/* Exec-OSM device attributes */
static DEVICE_ATTR(vendor_id, S_IRUGO, i2o_exec_show_vendor_id, NULL);
static DEVICE_ATTR(product_id, S_IRUGO, i2o_exec_show_product_id, NULL);
/**
* i2o_exec_probe - Called if a new I2O device (executive class) appears
* @dev: I2O device which should be probed
......@@ -268,10 +313,16 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
static int i2o_exec_probe(struct device *dev)
{
struct i2o_device *i2o_dev = to_i2o_device(dev);
struct i2o_controller *c = i2o_dev->iop;
i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff);
i2o_dev->iop->exec = i2o_dev;
c->exec = i2o_dev;
i2o_exec_lct_notify(c, c->lct->change_ind + 1);
device_create_file(dev, &dev_attr_vendor_id);
device_create_file(dev, &dev_attr_product_id);
return 0;
};
......@@ -286,6 +337,9 @@ static int i2o_exec_probe(struct device *dev)
*/
static int i2o_exec_remove(struct device *dev)
{
device_remove_file(dev, &dev_attr_product_id);
device_remove_file(dev, &dev_attr_vendor_id);
i2o_event_register(to_i2o_device(dev), &i2o_exec_driver, 0, 0);
return 0;
......@@ -297,12 +351,16 @@ static int i2o_exec_remove(struct device *dev)
*
* This function handles asynchronus LCT NOTIFY replies. It parses the
* new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY
* again.
* again, otherwise send LCT NOTIFY to get informed on next LCT change.
*/
static void i2o_exec_lct_modified(struct i2o_controller *c)
{
if (i2o_device_parse_lct(c) == -EAGAIN)
i2o_exec_lct_notify(c, 0);
u32 change_ind = 0;
if (i2o_device_parse_lct(c) != -EAGAIN)
change_ind = c->lct->change_ind + 1;
i2o_exec_lct_notify(c, change_ind);
};
/**
......
......@@ -146,6 +146,29 @@ static int i2o_block_device_flush(struct i2o_device *dev)
return i2o_msg_post_wait(dev->iop, m, 60);
};
/**
* i2o_block_issue_flush - device-flush interface for block-layer
* @queue: the request queue of the device which should be flushed
* @disk: gendisk
* @error_sector: error offset
*
* Helper function to provide flush functionality to block-layer.
*
* Returns 0 on success or negative error code on failure.
*/
static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk,
sector_t * error_sector)
{
struct i2o_block_device *i2o_blk_dev = queue->queuedata;
int rc = -ENODEV;
if (likely(i2o_blk_dev))
rc = i2o_block_device_flush(i2o_blk_dev->i2o_dev);
return rc;
}
/**
* i2o_block_device_mount - Mount (load) the media of device dev
* @dev: I2O device which should receive the mount request
......@@ -299,28 +322,31 @@ static inline void i2o_block_request_free(struct i2o_block_request *ireq)
/**
* i2o_block_sglist_alloc - Allocate the SG list and map it
* @c: I2O controller to which the request belongs
* @ireq: I2O block request
*
* Builds the SG list and map it into to be accessable by the controller.
* Builds the SG list and map it to be accessable by the controller.
*
* Returns the number of elements in the SG list or 0 on failure.
* Returns 0 on failure or 1 on success.
*/
static inline int i2o_block_sglist_alloc(struct i2o_block_request *ireq)
static inline int i2o_block_sglist_alloc(struct i2o_controller *c,
struct i2o_block_request *ireq,
u32 __iomem ** mptr)
{
struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev;
int nents;
enum dma_data_direction direction;
ireq->dev = &c->pdev->dev;
nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table);
if (rq_data_dir(ireq->req) == READ)
ireq->sg_dma_direction = PCI_DMA_FROMDEVICE;
direction = PCI_DMA_FROMDEVICE;
else
ireq->sg_dma_direction = PCI_DMA_TODEVICE;
direction = PCI_DMA_TODEVICE;
ireq->sg_nents = dma_map_sg(dev, ireq->sg_table, nents,
ireq->sg_dma_direction);
ireq->sg_nents = nents;
return ireq->sg_nents;
return i2o_dma_map_sg(c, ireq->sg_table, nents, direction, mptr);
};
/**
......@@ -331,10 +357,14 @@ static inline int i2o_block_sglist_alloc(struct i2o_block_request *ireq)
*/
static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
{
struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev;
enum dma_data_direction direction;
if (rq_data_dir(ireq->req) == READ)
direction = PCI_DMA_FROMDEVICE;
else
direction = PCI_DMA_TODEVICE;
dma_unmap_sg(dev, ireq->sg_table, ireq->sg_nents,
ireq->sg_dma_direction);
dma_unmap_sg(ireq->dev, ireq->sg_table, ireq->sg_nents, direction);
};
/**
......@@ -352,6 +382,11 @@ static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
struct i2o_block_device *i2o_blk_dev = q->queuedata;
struct i2o_block_request *ireq;
if (unlikely(!i2o_blk_dev)) {
osm_err("block device already removed\n");
return BLKPREP_KILL;
}
/* request is already processed by us, so return */
if (req->flags & REQ_SPECIAL) {
osm_debug("REQ_SPECIAL already set!\n");
......@@ -414,11 +449,11 @@ static void i2o_block_end_request(struct request *req, int uptodate,
{
struct i2o_block_request *ireq = req->special;
struct i2o_block_device *dev = ireq->i2o_blk_dev;
request_queue_t *q = dev->gd->queue;
request_queue_t *q = req->q;
unsigned long flags;
if (end_that_request_chunk(req, uptodate, nr_bytes)) {
int leftover = (req->hard_nr_sectors << 9);
int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT);
if (blk_pc_request(req))
leftover = req->data_len;
......@@ -432,8 +467,11 @@ static void i2o_block_end_request(struct request *req, int uptodate,
spin_lock_irqsave(q->queue_lock, flags);
end_that_request_last(req);
if (likely(dev)) {
dev->open_queue_depth--;
list_del(&ireq->queue);
}
blk_start_queue(q);
......@@ -483,8 +521,8 @@ static int i2o_block_reply(struct i2o_controller *c, u32 m,
* Don't stick a supertrak100 into cache aggressive modes
*/
osm_err("%03x error status: %02x, detailed status: %04x\n",
(le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff),
osm_err("TID %03x error status: 0x%02x, detailed status: "
"0x%04x\n", (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff),
status >> 24, status & 0xffff);
req->errors++;
......@@ -705,18 +743,25 @@ static int i2o_block_media_changed(struct gendisk *disk)
static int i2o_block_transfer(struct request *req)
{
struct i2o_block_device *dev = req->rq_disk->private_data;
struct i2o_controller *c = dev->i2o_dev->iop;
struct i2o_controller *c;
int tid = dev->i2o_dev->lct_data.tid;
struct i2o_message __iomem *msg;
void __iomem *mptr;
u32 __iomem *mptr;
struct i2o_block_request *ireq = req->special;
struct scatterlist *sg;
int sgnum;
int i;
u32 m;
u32 tcntxt;
u32 sg_flags;
u32 sgl_offset = SGL_OFFSET_8;
u32 ctl_flags = 0x00000000;
int rc;
u32 cmd;
if (unlikely(!dev->i2o_dev)) {
osm_err("transfer to removed drive\n");
rc = -ENODEV;
goto exit;
}
c = dev->i2o_dev->iop;
m = i2o_msg_get(c, &msg);
if (m == I2O_QUEUE_EMPTY) {
......@@ -730,80 +775,109 @@ static int i2o_block_transfer(struct request *req)
goto nop_msg;
}
if ((sgnum = i2o_block_sglist_alloc(ireq)) <= 0) {
rc = -ENOMEM;
goto context_remove;
}
/* Build the message based on the request. */
writel(i2o_block_driver.context, &msg->u.s.icntxt);
writel(tcntxt, &msg->u.s.tcntxt);
writel(req->nr_sectors << 9, &msg->body[1]);
writel((((u64) req->sector) << 9) & 0xffffffff, &msg->body[2]);
writel(req->sector >> 23, &msg->body[3]);
mptr = &msg->body[4];
sg = ireq->sg_table;
mptr = &msg->body[0];
if (rq_data_dir(req) == READ) {
writel(I2O_CMD_BLOCK_READ << 24 | HOST_TID << 12 | tid,
&msg->u.head[1]);
sg_flags = 0x10000000;
cmd = I2O_CMD_BLOCK_READ << 24;
switch (dev->rcache) {
case CACHE_NULL:
writel(0, &msg->body[0]);
break;
case CACHE_PREFETCH:
writel(0x201F0008, &msg->body[0]);
ctl_flags = 0x201F0008;
break;
case CACHE_SMARTFETCH:
if (req->nr_sectors > 16)
writel(0x201F0008, &msg->body[0]);
ctl_flags = 0x201F0008;
else
writel(0x001F0000, &msg->body[0]);
ctl_flags = 0x001F0000;
break;
default:
break;
}
} else {
writel(I2O_CMD_BLOCK_WRITE << 24 | HOST_TID << 12 | tid,
&msg->u.head[1]);
sg_flags = 0x14000000;
cmd = I2O_CMD_BLOCK_WRITE << 24;
switch (dev->wcache) {
case CACHE_NULL:
writel(0, &msg->body[0]);
break;
case CACHE_WRITETHROUGH:
writel(0x001F0008, &msg->body[0]);
ctl_flags = 0x001F0008;
break;
case CACHE_WRITEBACK:
writel(0x001F0010, &msg->body[0]);
ctl_flags = 0x001F0010;
break;
case CACHE_SMARTBACK:
if (req->nr_sectors > 16)
writel(0x001F0004, &msg->body[0]);
ctl_flags = 0x001F0004;
else
writel(0x001F0010, &msg->body[0]);
ctl_flags = 0x001F0010;
break;
case CACHE_SMARTTHROUGH:
if (req->nr_sectors > 16)
writel(0x001F0004, &msg->body[0]);
ctl_flags = 0x001F0004;
else
writel(0x001F0010, &msg->body[0]);
ctl_flags = 0x001F0010;
default:
break;
}
}
for (i = sgnum; i > 0; i--) {
if (i == 1)
sg_flags |= 0x80000000;
writel(sg_flags | sg_dma_len(sg), mptr);
writel(sg_dma_address(sg), mptr + 4);
mptr += 8;
sg++;
#ifdef CONFIG_I2O_EXT_ADAPTEC
if (c->adaptec) {
u8 cmd[10];
u32 scsi_flags;
u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT;
memset(cmd, 0, 10);
sgl_offset = SGL_OFFSET_12;
writel(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid,
&msg->u.head[1]);
writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++);
writel(tid, mptr++);
/*
* ENABLE_DISCONNECT
* SIMPLE_TAG
* RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME
*/
if (rq_data_dir(req) == READ) {
cmd[0] = 0x28;
scsi_flags = 0x60a0000a;
} else {
cmd[0] = 0x2A;
scsi_flags = 0xa0a0000a;
}
writel(scsi_flags, mptr++);
*((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec);
*((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec);
memcpy_toio(mptr, cmd, 10);
mptr += 4;
writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++);
} else
#endif
{
writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]);
writel(ctl_flags, mptr++);
writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++);
writel((u32) (req->sector << KERNEL_SECTOR_SHIFT), mptr++);
writel(req->sector >> (32 - KERNEL_SECTOR_SHIFT), mptr++);
}
if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
rc = -ENOMEM;
goto context_remove;
}
writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | SGL_OFFSET_8,
&msg->u.head[0]);
writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) |
sgl_offset, &msg->u.head[0]);
list_add_tail(&ireq->queue, &dev->open_queue);
dev->open_queue_depth++;
......@@ -846,10 +920,12 @@ static void i2o_block_request_fn(struct request_queue *q)
queue_depth = ireq->i2o_blk_dev->open_queue_depth;
if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS)
if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) {
if (!i2o_block_transfer(req)) {
blkdev_dequeue_request(req);
continue;
} else
osm_info("transfer error\n");
}
if (queue_depth)
......@@ -933,6 +1009,7 @@ static struct i2o_block_device *i2o_block_device_alloc(void)
}
blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
blk_queue_issue_flush_fn(queue, i2o_block_issue_flush);
gd->major = I2O_MAJOR;
gd->queue = queue;
......@@ -974,7 +1051,18 @@ static int i2o_block_probe(struct device *dev)
u64 size;
u32 blocksize;
u32 flags, status;
int segments;
u16 body_size = 4;
unsigned short max_sectors;
#ifdef CONFIG_I2O_EXT_ADAPTEC
if (c->adaptec)
body_size = 8;
#endif
if (c->limit_sectors)
max_sectors = I2O_MAX_SECTORS_LIMITED;
else
max_sectors = I2O_MAX_SECTORS;
/* skip devices which are used by IOP */
if (i2o_dev->lct_data.user_tid != 0xfff) {
......@@ -1009,50 +1097,35 @@ static int i2o_block_probe(struct device *dev)
queue = gd->queue;
queue->queuedata = i2o_blk_dev;
blk_queue_max_phys_segments(queue, I2O_MAX_SEGMENTS);
blk_queue_max_sectors(queue, I2O_MAX_SECTORS);
if (c->short_req)
segments = 8;
else {
i2o_status_block *sb;
blk_queue_max_phys_segments(queue, I2O_MAX_PHYS_SEGMENTS);
blk_queue_max_sectors(queue, max_sectors);
blk_queue_max_hw_segments(queue, i2o_sg_tablesize(c, body_size));
sb = c->status_block.virt;
segments = (sb->inbound_frame_size -
sizeof(struct i2o_message) / 4 - 4) / 2;
}
blk_queue_max_hw_segments(queue, segments);
osm_debug("max sectors = %d\n", I2O_MAX_SECTORS);
osm_debug("phys segments = %d\n", I2O_MAX_SEGMENTS);
osm_debug("hw segments = %d\n", segments);
osm_debug("max sectors = %d\n", queue->max_phys_segments);
osm_debug("phys segments = %d\n", queue->max_sectors);
osm_debug("max hw segments = %d\n", queue->max_hw_segments);
/*
* Ask for the current media data. If that isn't supported
* then we ask for the device capacity data
*/
if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8))
if (!i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) {
osm_warn("could not get size of %s\n", gd->disk_name);
size = 0;
}
if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) ||
i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
blk_queue_hardsect_size(queue, blocksize);
} else
osm_warn("unable to get blocksize of %s\n", gd->disk_name);
if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4))
if (!i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) {
osm_warn("unable to get blocksize of %s\n",
gd->disk_name);
blocksize = 0;
}
if (i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) ||
i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) {
set_capacity(gd, size >> KERNEL_SECTOR_SHIFT);
} else
osm_warn("could not get size of %s\n", gd->disk_name);
if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &i2o_blk_dev->power, 2))
i2o_blk_dev->power = 0;
i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4);
i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4);
set_capacity(gd, size >> 9);
i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);
add_disk(gd);
......@@ -1109,7 +1182,7 @@ static int __init i2o_block_init(void)
goto exit;
}
i2o_blk_req_pool.pool = mempool_create(I2O_REQ_MEMPOOL_SIZE,
i2o_blk_req_pool.pool = mempool_create(I2O_BLOCK_REQ_MEMPOOL_SIZE,
mempool_alloc_slab,
mempool_free_slab,
i2o_blk_req_pool.slab);
......
......@@ -84,9 +84,9 @@ struct i2o_block_request
struct list_head queue;
struct request *req; /* corresponding request */
struct i2o_block_device *i2o_blk_dev; /* I2O block device */
int sg_dma_direction; /* direction of DMA buffer read/write */
struct device *dev; /* device used for DMA */
int sg_nents; /* number of SG elements */
struct scatterlist sg_table[I2O_MAX_SEGMENTS]; /* SG table */
struct scatterlist sg_table[I2O_MAX_PHYS_SEGMENTS]; /* SG table */
};
/* I2O Block device delayed request */
......
......@@ -30,27 +30,11 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/i2o.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <linux/ioctl32.h>
#include <linux/compat.h>
#include <linux/syscalls.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#define OSM_NAME "config-osm"
#define OSM_VERSION "$Rev$"
#define OSM_DESCRIPTION "I2O Configuration OSM"
extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int);
......@@ -80,125 +64,6 @@ struct i2o_cfg_info {
static struct i2o_cfg_info *open_files = NULL;
static ulong i2o_cfg_info_id = 0;
/**
* i2o_config_read_hrt - Returns the HRT of the controller
* @kob: kernel object handle
* @buf: buffer into which the HRT should be copied
* @off: file offset
* @count: number of bytes to read
*
* Put @count bytes starting at @off into @buf from the HRT of the I2O
* controller corresponding to @kobj.
*
* Returns number of bytes copied into buffer.
*/
static ssize_t i2o_config_read_hrt(struct kobject *kobj, char *buf,
loff_t offset, size_t count)
{
struct i2o_controller *c = to_i2o_controller(container_of(kobj,
struct device,
kobj));
i2o_hrt *hrt = c->hrt.virt;
u32 size = (hrt->num_entries * hrt->entry_len + 2) * 4;
if(offset > size)
return 0;
if(offset + count > size)
count = size - offset;
memcpy(buf, (u8 *) hrt + offset, count);
return count;
};
/**
* i2o_config_read_lct - Returns the LCT of the controller
* @kob: kernel object handle
* @buf: buffer into which the LCT should be copied
* @off: file offset
* @count: number of bytes to read
*
* Put @count bytes starting at @off into @buf from the LCT of the I2O
* controller corresponding to @kobj.
*
* Returns number of bytes copied into buffer.
*/
static ssize_t i2o_config_read_lct(struct kobject *kobj, char *buf,
loff_t offset, size_t count)
{
struct i2o_controller *c = to_i2o_controller(container_of(kobj,
struct device,
kobj));
u32 size = c->lct->table_size * 4;
if(offset > size)
return 0;
if(offset + count > size)
count = size - offset;
memcpy(buf, (u8 *) c->lct + offset, count);
return count;
};
/* attribute for HRT in sysfs */
static struct bin_attribute i2o_config_hrt_attr = {
.attr = {
.name = "hrt",
.mode = S_IRUGO,
.owner = THIS_MODULE
},
.size = 0,
.read = i2o_config_read_hrt
};
/* attribute for LCT in sysfs */
static struct bin_attribute i2o_config_lct_attr = {
.attr = {
.name = "lct",
.mode = S_IRUGO,
.owner = THIS_MODULE
},
.size = 0,
.read = i2o_config_read_lct
};
/**
* i2o_config_notify_controller_add - Notify of added controller
* @c: the controller which was added
*
* If a I2O controller is added, we catch the notification to add sysfs
* entries.
*/
static void i2o_config_notify_controller_add(struct i2o_controller *c)
{
sysfs_create_bin_file(&(c->device.kobj), &i2o_config_hrt_attr);
sysfs_create_bin_file(&(c->device.kobj), &i2o_config_lct_attr);
};
/**
* i2o_config_notify_controller_remove - Notify of removed controller
* @c: the controller which was removed
*
* If a I2O controller is removed, we catch the notification to remove the
* sysfs entries.
*/
static void i2o_config_notify_controller_remove(struct i2o_controller *c)
{
sysfs_remove_bin_file(&c->device.kobj, &i2o_config_lct_attr);
sysfs_remove_bin_file(&c->device.kobj, &i2o_config_hrt_attr);
};
/* Config OSM driver struct */
static struct i2o_driver i2o_config_driver = {
.name = OSM_NAME,
.notify_controller_add = i2o_config_notify_controller_add,
.notify_controller_remove = i2o_config_notify_controller_remove
};
static int i2o_cfg_getiops(unsigned long arg)
{
struct i2o_controller *c;
......@@ -1257,37 +1122,20 @@ static struct miscdevice i2o_miscdev = {
&config_fops
};
static int __init i2o_config_init(void)
static int __init i2o_config_old_init(void)
{
printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
spin_lock_init(&i2o_config_lock);
if (misc_register(&i2o_miscdev) < 0) {
osm_err("can't register device.\n");
return -EBUSY;
}
/*
* Install our handler
*/
if (i2o_driver_register(&i2o_config_driver)) {
osm_err("handler register failed.\n");
misc_deregister(&i2o_miscdev);
return -EBUSY;
}
return 0;
}
static void i2o_config_exit(void)
static void i2o_config_old_exit(void)
{
misc_deregister(&i2o_miscdev);
i2o_driver_unregister(&i2o_config_driver);
}
MODULE_AUTHOR("Red Hat Software");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(OSM_DESCRIPTION);
MODULE_VERSION(OSM_VERSION);
module_init(i2o_config_init);
module_exit(i2o_config_exit);
......@@ -228,7 +228,7 @@ static const char *i2o_get_class_name(int class)
case I2O_CLASS_FLOPPY_DEVICE:
idx = 12;
break;
case I2O_CLASS_BUS_ADAPTER_PORT:
case I2O_CLASS_BUS_ADAPTER:
idx = 13;
break;
case I2O_CLASS_PEER_TRANSPORT_AGENT:
......@@ -490,7 +490,7 @@ static int i2o_seq_show_lct(struct seq_file *seq, void *v)
seq_printf(seq, ", Unknown Device Type");
break;
case I2O_CLASS_BUS_ADAPTER_PORT:
case I2O_CLASS_BUS_ADAPTER:
if (lct->lct_entry[i].sub_class < BUS_TABLE_SIZE)
seq_printf(seq, ", %s",
bus_ports[lct->lct_entry[i].
......
......@@ -103,7 +103,7 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c)
i2o_status_block *sb;
list_for_each_entry(i2o_dev, &c->devices, list)
if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT) {
if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) {
if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1)
&& (type == 0x01)) /* SCSI bus */
max_channel++;
......@@ -139,7 +139,7 @@ static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c)
i = 0;
list_for_each_entry(i2o_dev, &c->devices, list)
if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT) {
if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER) {
if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) || (type == 1)) /* only SCSI bus */
i2o_shost->channel[i++] = i2o_dev;
......@@ -186,6 +186,7 @@ static int i2o_scsi_remove(struct device *dev)
shost_for_each_device(scsi_dev, i2o_shost->scsi_host)
if (scsi_dev->hostdata == i2o_dev) {
sysfs_remove_link(&i2o_dev->device.kobj, "scsi");
scsi_remove_device(scsi_dev);
scsi_device_put(scsi_dev);
break;
......@@ -259,12 +260,14 @@ static int i2o_scsi_probe(struct device *dev)
scsi_dev =
__scsi_add_device(i2o_shost->scsi_host, channel, id, lun, i2o_dev);
if (!scsi_dev) {
if (IS_ERR(scsi_dev)) {
osm_warn("can not add SCSI device %03x\n",
i2o_dev->lct_data.tid);
return -EFAULT;
return PTR_ERR(scsi_dev);
}
sysfs_create_link(&i2o_dev->device.kobj, &scsi_dev->sdev_gendev.kobj, "scsi");
osm_info("device added (TID: %03x) channel: %d, id: %d, lun: %d\n",
i2o_dev->lct_data.tid, channel, id, (unsigned int)lun);
......@@ -545,7 +548,13 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
int tid;
struct i2o_message __iomem *msg;
u32 m;
u32 scsi_flags, sg_flags;
/*
* ENABLE_DISCONNECT
* SIMPLE_TAG
* RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME
*/
u32 scsi_flags = 0x20a00000;
u32 sg_flags;
u32 __iomem *mptr;
u32 __iomem *lenptr;
u32 len;
......@@ -591,17 +600,19 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
switch (SCpnt->sc_data_direction) {
case PCI_DMA_NONE:
scsi_flags = 0x00000000; // DATA NO XFER
/* DATA NO XFER */
sg_flags = 0x00000000;
break;
case PCI_DMA_TODEVICE:
scsi_flags = 0x80000000; // DATA OUT (iop-->dev)
/* DATA OUT (iop-->dev) */
scsi_flags |= 0x80000000;
sg_flags = 0x14000000;
break;
case PCI_DMA_FROMDEVICE:
scsi_flags = 0x40000000; // DATA IN (iop<--dev)
/* DATA IN (iop<--dev) */
scsi_flags |= 0x40000000;
sg_flags = 0x10000000;
break;
......@@ -639,8 +650,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
}
*/
/* Direction, disconnect ok, tag, CDBLen */
writel(scsi_flags | 0x20200000 | SCpnt->cmd_len, mptr ++);
writel(scsi_flags | SCpnt->cmd_len, mptr++);
/* Write SCSI command into the message - always 16 byte block */
memcpy_toio(mptr, SCpnt->cmnd, 16);
......
......@@ -455,6 +455,70 @@ static int i2o_iop_clear(struct i2o_controller *c)
return rc;
}
/**
* i2o_iop_init_outbound_queue - setup the outbound message queue
* @c: I2O controller
*
* Clear and (re)initialize IOP's outbound queue and post the message
* frames to the IOP.
*
* Returns 0 on success or a negative errno code on failure.
*/
static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
{
u8 *status = c->status.virt;
u32 m;
struct i2o_message __iomem *msg;
ulong timeout;
int i;
osm_debug("%s: Initializing Outbound Queue...\n", c->name);
memset(status, 0, 4);
m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
if (m == I2O_QUEUE_EMPTY)
return -ETIMEDOUT;
writel(EIGHT_WORD_MSG_SIZE | TRL_OFFSET_6, &msg->u.head[0]);
writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID,
&msg->u.head[1]);
writel(i2o_exec_driver.context, &msg->u.s.icntxt);
writel(0x0106, &msg->u.s.tcntxt); /* FIXME: why 0x0106, maybe in
Spec? */
writel(PAGE_SIZE, &msg->body[0]);
/* Outbound msg frame size in words and Initcode */
writel(MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]);
writel(0xd0000004, &msg->body[2]);
writel(i2o_dma_low(c->status.phys), &msg->body[3]);
writel(i2o_dma_high(c->status.phys), &msg->body[4]);
i2o_msg_post(c, m);
timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ;
while (*status <= I2O_CMD_IN_PROGRESS) {
if (time_after(jiffies, timeout)) {
osm_warn("%s: Timeout Initializing\n", c->name);
return -ETIMEDOUT;
}
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1);
rmb();
}
m = c->out_queue.phys;
/* Post frames */
for (i = 0; i < NMBR_MSG_FRAMES; i++) {
i2o_flush_reply(c, m);
udelay(1); /* Promise */
m += MSG_FRAME_SIZE * 4;
}
return 0;
}
/**
* i2o_iop_reset - reset an I2O controller
* @c: controller to reset
......@@ -491,25 +555,16 @@ static int i2o_iop_reset(struct i2o_controller *c)
writel(0, &msg->u.s.tcntxt); //FIXME: use reasonable transaction context
writel(0, &msg->body[0]);
writel(0, &msg->body[1]);
writel(i2o_ptr_low((void *)c->status.phys), &msg->body[2]);
writel(i2o_ptr_high((void *)c->status.phys), &msg->body[3]);
writel(i2o_dma_low(c->status.phys), &msg->body[2]);
writel(i2o_dma_high(c->status.phys), &msg->body[3]);
i2o_msg_post(c, m);
/* Wait for a reply */
timeout = jiffies + I2O_TIMEOUT_RESET * HZ;
while (!*status) {
if (time_after(jiffies, timeout)) {
printk(KERN_ERR "%s: IOP reset timeout.\n", c->name);
rc = -ETIMEDOUT;
goto exit;
}
/* Promise bug */
if (status[1] || status[4]) {
*status = 0;
if (time_after(jiffies, timeout))
break;
}
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1);
......@@ -517,14 +572,20 @@ static int i2o_iop_reset(struct i2o_controller *c)
rmb();
}
if (*status == I2O_CMD_IN_PROGRESS) {
switch (*status) {
case I2O_CMD_REJECTED:
osm_warn("%s: IOP reset rejected\n", c->name);
rc = -EPERM;
break;
case I2O_CMD_IN_PROGRESS:
/*
* Once the reset is sent, the IOP goes into the INIT state
* which is indeterminate. We need to wait until the IOP
* has rebooted before we can let the system talk to
* it. We read the inbound Free_List until a message is
* available. If we can't read one in the given ammount of
* time, we assume the IOP could not reboot properly.
* which is indeterminate. We need to wait until the IOP has
* rebooted before we can let the system talk to it. We read
* the inbound Free_List until a message is available. If we
* can't read one in the given ammount of time, we assume the
* IOP could not reboot properly.
*/
pr_debug("%s: Reset in progress, waiting for reboot...\n",
c->name);
......@@ -543,19 +604,26 @@ static int i2o_iop_reset(struct i2o_controller *c)
m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET);
}
i2o_msg_nop(c, m);
}
/* from here all quiesce commands are safe */
c->no_quiesce = 0;
/* If IopReset was rejected or didn't perform reset, try IopClear */
/* verify if controller is in state RESET */
i2o_status_get(c);
if (*status == I2O_CMD_REJECTED || sb->iop_state != ADAPTER_STATE_RESET) {
printk(KERN_WARNING "%s: Reset rejected, trying to clear\n",
c->name);
i2o_iop_clear(c);
} else
pr_debug("%s: Reset completed.\n", c->name);
if (!c->promise && (sb->iop_state != ADAPTER_STATE_RESET))
osm_warn("%s: reset completed, but adapter not in RESET"
" state.\n", c->name);
else
osm_debug("%s: reset completed.\n", c->name);
break;
default:
osm_err("%s: IOP reset timeout.\n", c->name);
rc = -ETIMEDOUT;
break;
}
exit:
/* Enable all IOPs */
......@@ -564,87 +632,6 @@ static int i2o_iop_reset(struct i2o_controller *c)
return rc;
};
/**
* i2o_iop_init_outbound_queue - setup the outbound message queue
* @c: I2O controller
*
* Clear and (re)initialize IOP's outbound queue and post the message
* frames to the IOP.
*
* Returns 0 on success or a negative errno code on failure.
*/
static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
{
u8 *status = c->status.virt;
u32 m;
struct i2o_message __iomem *msg;
ulong timeout;
int i;
pr_debug("%s: Initializing Outbound Queue...\n", c->name);
memset(status, 0, 4);
m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
if (m == I2O_QUEUE_EMPTY)
return -ETIMEDOUT;
writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]);
writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID,
&msg->u.head[1]);
writel(i2o_exec_driver.context, &msg->u.s.icntxt);
writel(0x00000000, &msg->u.s.tcntxt);
writel(PAGE_SIZE, &msg->body[0]);
writel(MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]); /* Outbound msg frame
size in words and Initcode */
writel(0xd0000004, &msg->body[2]);
writel(i2o_ptr_low((void *)c->status.phys), &msg->body[3]);
writel(i2o_ptr_high((void *)c->status.phys), &msg->body[4]);
i2o_msg_post(c, m);
timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ;
while (*status <= I2O_CMD_IN_PROGRESS) {
if (time_after(jiffies, timeout)) {
printk(KERN_WARNING "%s: Timeout Initializing\n",
c->name);
return -ETIMEDOUT;
}
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1);
rmb();
}
m = c->out_queue.phys;
/* Post frames */
for (i = 0; i < NMBR_MSG_FRAMES; i++) {
i2o_flush_reply(c, m);
udelay(1); /* Promise */
m += MSG_FRAME_SIZE * 4;
}
return 0;
}
/**
* i2o_iop_send_nop - send a core NOP message
* @c: controller
*
* Send a no-operation message with a reply set to cause no
* action either. Needed for bringing up promise controllers.
*/
static int i2o_iop_send_nop(struct i2o_controller *c)
{
struct i2o_message __iomem *msg;
u32 m = i2o_msg_get_wait(c, &msg, HZ);
if (m == I2O_QUEUE_EMPTY)
return -ETIMEDOUT;
i2o_msg_nop(c, m);
return 0;
}
/**
* i2o_iop_activate - Bring controller up to HOLD
* @c: controller
......@@ -656,26 +643,9 @@ static int i2o_iop_send_nop(struct i2o_controller *c)
*/
static int i2o_iop_activate(struct i2o_controller *c)
{
struct pci_dev *i960 = NULL;
i2o_status_block *sb = c->status_block.virt;
int rc;
if (c->promise) {
/* Beat up the hardware first of all */
i960 =
pci_find_slot(c->pdev->bus->number,
PCI_DEVFN(PCI_SLOT(c->pdev->devfn), 0));
if (i960)
pci_write_config_word(i960, 0x42, 0);
/* Follow this sequence precisely or the controller
ceases to perform useful functions until reboot */
if ((rc = i2o_iop_send_nop(c)))
return rc;
if ((rc = i2o_iop_reset(c)))
return rc;
}
int state;
/* In INIT state, Wait Inbound Q to initialize (in i2o_status_get) */
/* In READY state, Get status */
......@@ -684,7 +654,8 @@ static int i2o_iop_activate(struct i2o_controller *c)
if (rc) {
printk(KERN_INFO "%s: Unable to obtain status, "
"attempting a reset.\n", c->name);
if (i2o_iop_reset(c))
rc = i2o_iop_reset(c);
if (rc)
return rc;
}
......@@ -697,37 +668,37 @@ static int i2o_iop_activate(struct i2o_controller *c)
switch (sb->iop_state) {
case ADAPTER_STATE_FAULTED:
printk(KERN_CRIT "%s: hardware fault\n", c->name);
return -ENODEV;
return -EFAULT;
case ADAPTER_STATE_READY:
case ADAPTER_STATE_OPERATIONAL:
case ADAPTER_STATE_HOLD:
case ADAPTER_STATE_FAILED:
pr_debug("%s: already running, trying to reset...\n", c->name);
if (i2o_iop_reset(c))
return -ENODEV;
rc = i2o_iop_reset(c);
if (rc)
return rc;
}
/* preserve state */
state = sb->iop_state;
rc = i2o_iop_init_outbound_queue(c);
if (rc)
return rc;
if (c->promise) {
if ((rc = i2o_iop_send_nop(c)))
return rc;
/* if adapter was not in RESET state clear now */
if (state != ADAPTER_STATE_RESET)
i2o_iop_clear(c);
if ((rc = i2o_status_get(c)))
return rc;
i2o_status_get(c);
if (i960)
pci_write_config_word(i960, 0x42, 0x3FF);
if (sb->iop_state != ADAPTER_STATE_HOLD) {
osm_err("%s: failed to bring IOP into HOLD state\n", c->name);
return -EIO;
}
/* In HOLD state */
rc = i2o_hrt_get(c);
return rc;
return i2o_hrt_get(c);
};
/**
......@@ -1030,8 +1001,8 @@ int i2o_status_get(struct i2o_controller *c)
writel(0, &msg->u.s.tcntxt); // FIXME: use resonable transaction context
writel(0, &msg->body[0]);
writel(0, &msg->body[1]);
writel(i2o_ptr_low((void *)c->status_block.phys), &msg->body[2]);
writel(i2o_ptr_high((void *)c->status_block.phys), &msg->body[3]);
writel(i2o_dma_low(c->status_block.phys), &msg->body[2]);
writel(i2o_dma_high(c->status_block.phys), &msg->body[3]);
writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */
i2o_msg_post(c, m);
......
......@@ -49,30 +49,6 @@ static struct pci_device_id __devinitdata i2o_pci_ids[] = {
{0}
};
/**
* i2o_dma_realloc - Realloc DMA memory
* @dev: struct device pointer to the PCI device of the I2O controller
* @addr: pointer to a i2o_dma struct DMA buffer
* @len: new length of memory
* @gfp_mask: GFP mask
*
* If there was something allocated in the addr, free it first. If len > 0
* than try to allocate it and write the addresses back to the addr
* structure. If len == 0 set the virtual address to NULL.
*
* Returns the 0 on success or negative error code on failure.
*/
int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, size_t len,
unsigned int gfp_mask)
{
i2o_dma_free(dev, addr);
if (len)
return i2o_dma_alloc(dev, addr, len, gfp_mask);
return 0;
};
/**
* i2o_pci_free - Frees the DMA memory for the I2O controller
* @c: I2O controller to free
......@@ -185,6 +161,7 @@ static int __devinit i2o_pci_alloc(struct i2o_controller *c)
} else
c->in_queue = c->base;
c->irq_status = c->base.virt + I2O_IRQ_STATUS;
c->irq_mask = c->base.virt + I2O_IRQ_MASK;
c->in_port = c->base.virt + I2O_IN_PORT;
c->out_port = c->base.virt + I2O_OUT_PORT;
......@@ -232,36 +209,30 @@ static int __devinit i2o_pci_alloc(struct i2o_controller *c)
static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id, struct pt_regs *r)
{
struct i2o_controller *c = dev_id;
struct device *dev = &c->pdev->dev;
u32 mv = readl(c->out_port);
u32 m;
irqreturn_t rc = IRQ_NONE;
while (readl(c->irq_status) & I2O_IRQ_OUTBOUND_POST) {
m = readl(c->out_port);
if (m == I2O_QUEUE_EMPTY) {
/*
* Old 960 steppings had a bug in the I2O unit that caused
* the queue to appear empty when it wasn't.
* Old 960 steppings had a bug in the I2O unit that
* caused the queue to appear empty when it wasn't.
*/
if (mv == I2O_QUEUE_EMPTY) {
mv = readl(c->out_port);
if (unlikely(mv == I2O_QUEUE_EMPTY))
return IRQ_NONE;
else
pr_debug("%s: 960 bug detected\n", c->name);
m = readl(c->out_port);
if (unlikely(m == I2O_QUEUE_EMPTY))
break;
}
while (mv != I2O_QUEUE_EMPTY) {
/* dispatch it */
if (i2o_driver_dispatch(c, mv))
if (i2o_driver_dispatch(c, m))
/* flush it if result != 0 */
i2o_flush_reply(c, mv);
i2o_flush_reply(c, m);
/*
* That 960 bug again...
*/
mv = readl(c->out_port);
if (mv == I2O_QUEUE_EMPTY)
mv = readl(c->out_port);
rc = IRQ_HANDLED;
}
return IRQ_HANDLED;
return rc;
}
/**
......
......@@ -32,6 +32,10 @@ typedef unsigned int u32;
#endif /* __KERNEL__ */
/*
* Vendors
*/
#define I2O_VENDOR_DPT 0x001b
/*
* I2O Control IOCTLs and structures
......@@ -333,7 +337,7 @@ typedef struct _i2o_status_block {
#define I2O_CLASS_ATE_PERIPHERAL 0x061
#define I2O_CLASS_FLOPPY_CONTROLLER 0x070
#define I2O_CLASS_FLOPPY_DEVICE 0x071
#define I2O_CLASS_BUS_ADAPTER_PORT 0x080
#define I2O_CLASS_BUS_ADAPTER 0x080
#define I2O_CLASS_PEER_TRANSPORT_AGENT 0x090
#define I2O_CLASS_PEER_TRANSPORT 0x091
#define I2O_CLASS_END 0xfff
......
......@@ -157,7 +157,8 @@ struct i2o_controller {
void __iomem *in_port; /* Inbout port address */
void __iomem *out_port; /* Outbound port address */
void __iomem *irq_mask; /* Interrupt register address */
void __iomem *irq_status; /* Interrupt status register address */
void __iomem *irq_mask; /* Interrupt mask register address */
/* Dynamic LCT related data */
......@@ -242,15 +243,6 @@ extern int i2o_msg_post_wait_mem(struct i2o_controller *, u32, unsigned long,
extern void i2o_msg_nop(struct i2o_controller *, u32);
static inline void i2o_flush_reply(struct i2o_controller *, u32);
/* DMA handling functions */
static inline int i2o_dma_alloc(struct device *, struct i2o_dma *, size_t,
unsigned int);
static inline void i2o_dma_free(struct device *, struct i2o_dma *);
int i2o_dma_realloc(struct device *, struct i2o_dma *, size_t, unsigned int);
static inline int i2o_dma_map(struct device *, struct i2o_dma *);
static inline void i2o_dma_unmap(struct device *, struct i2o_dma *);
/* IOP functions */
extern int i2o_status_get(struct i2o_controller *);
......@@ -275,6 +267,16 @@ static inline u32 i2o_ptr_high(void *ptr)
{
return (u32) ((u64) ptr >> 32);
};
static inline u32 i2o_dma_low(dma_addr_t dma_addr)
{
return (u32) (u64) dma_addr;
};
static inline u32 i2o_dma_high(dma_addr_t dma_addr)
{
return (u32) ((u64) dma_addr >> 32);
};
#else
static inline u32 i2o_cntxt_list_add(struct i2o_controller *c, void *ptr)
{
......@@ -305,7 +307,245 @@ static inline u32 i2o_ptr_high(void *ptr)
{
return 0;
};
static inline u32 i2o_dma_low(dma_addr_t dma_addr)
{
return (u32) dma_addr;
};
static inline u32 i2o_dma_high(dma_addr_t dma_addr)
{
return 0;
};
#endif
/**
* i2o_sg_tablesize - Calculate the maximum number of elements in a SGL
* @c: I2O controller for which the calculation should be done
* @body_size: maximum body size used for message in 32-bit words.
*
* Return the maximum number of SG elements in a SG list.
*/
static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size)
{
i2o_status_block *sb = c->status_block.virt;
u16 sg_count =
(sb->inbound_frame_size - sizeof(struct i2o_message) / 4) -
body_size;
if (c->pae_support) {
/*
* for 64-bit a SG attribute element must be added and each
* SG element needs 12 bytes instead of 8.
*/
sg_count -= 2;
sg_count /= 3;
} else
sg_count /= 2;
if (c->short_req && (sg_count > 8))
sg_count = 8;
return sg_count;
};
/**
* i2o_dma_map_single - Map pointer to controller and fill in I2O message.
* @c: I2O controller
* @ptr: pointer to the data which should be mapped
* @size: size of data in bytes
* @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
* @sg_ptr: pointer to the SG list inside the I2O message
*
* This function does all necessary DMA handling and also writes the I2O
* SGL elements into the I2O message. For details on DMA handling see also
* dma_map_single(). The pointer sg_ptr will only be set to the end of the
* SG list if the allocation was successful.
*
* Returns DMA address which must be checked for failures using
* dma_mapping_error().
*/
static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
size_t size,
enum dma_data_direction direction,
u32 __iomem ** sg_ptr)
{
u32 sg_flags;
u32 __iomem *mptr = *sg_ptr;
dma_addr_t dma_addr;
switch (direction) {
case DMA_TO_DEVICE:
sg_flags = 0xd4000000;
break;
case DMA_FROM_DEVICE:
sg_flags = 0xd0000000;
break;
default:
return 0;
}
dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
if (!dma_mapping_error(dma_addr)) {
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
writel(0x7C020002, mptr++);
writel(PAGE_SIZE, mptr++);
}
#endif
writel(sg_flags | size, mptr++);
writel(i2o_dma_low(dma_addr), mptr++);
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support)
writel(i2o_dma_high(dma_addr), mptr++);
#endif
*sg_ptr = mptr;
}
return dma_addr;
};
/**
* i2o_dma_map_sg - Map a SG List to controller and fill in I2O message.
* @c: I2O controller
* @sg: SG list to be mapped
* @sg_count: number of elements in the SG list
* @direction: DMA_TO_DEVICE / DMA_FROM_DEVICE
* @sg_ptr: pointer to the SG list inside the I2O message
*
* This function does all necessary DMA handling and also writes the I2O
* SGL elements into the I2O message. For details on DMA handling see also
* dma_map_sg(). The pointer sg_ptr will only be set to the end of the SG
* list if the allocation was successful.
*
* Returns 0 on failure or 1 on success.
*/
static inline int i2o_dma_map_sg(struct i2o_controller *c,
struct scatterlist *sg, int sg_count,
enum dma_data_direction direction,
u32 __iomem ** sg_ptr)
{
u32 sg_flags;
u32 __iomem *mptr = *sg_ptr;
switch (direction) {
case DMA_TO_DEVICE:
sg_flags = 0x14000000;
break;
case DMA_FROM_DEVICE:
sg_flags = 0x10000000;
break;
default:
return 0;
}
sg_count = dma_map_sg(&c->pdev->dev, sg, sg_count, direction);
if (!sg_count)
return 0;
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
writel(0x7C020002, mptr++);
writel(PAGE_SIZE, mptr++);
}
#endif
while (sg_count-- > 0) {
if (!sg_count)
sg_flags |= 0xC0000000;
writel(sg_flags | sg_dma_len(sg), mptr++);
writel(i2o_dma_low(sg_dma_address(sg)), mptr++);
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support)
writel(i2o_dma_high(sg_dma_address(sg)), mptr++);
#endif
sg++;
}
*sg_ptr = mptr;
return 1;
};
/**
* i2o_dma_alloc - Allocate DMA memory
* @dev: struct device pointer to the PCI device of the I2O controller
* @addr: i2o_dma struct which should get the DMA buffer
* @len: length of the new DMA memory
* @gfp_mask: GFP mask
*
* Allocate a coherent DMA memory and write the pointers into addr.
*
* Returns 0 on success or -ENOMEM on failure.
*/
static inline int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr,
size_t len, unsigned int gfp_mask)
{
struct pci_dev *pdev = to_pci_dev(dev);
int dma_64 = 0;
if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) {
dma_64 = 1;
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK))
return -ENOMEM;
}
addr->virt = dma_alloc_coherent(dev, len, &addr->phys, gfp_mask);
if ((sizeof(dma_addr_t) > 4) && dma_64)
if (pci_set_dma_mask(pdev, DMA_64BIT_MASK))
printk(KERN_WARNING "i2o: unable to set 64-bit DMA");
if (!addr->virt)
return -ENOMEM;
memset(addr->virt, 0, len);
addr->len = len;
return 0;
};
/**
* i2o_dma_free - Free DMA memory
* @dev: struct device pointer to the PCI device of the I2O controller
* @addr: i2o_dma struct which contains the DMA buffer
*
* Free a coherent DMA memory and set virtual address of addr to NULL.
*/
static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
{
if (addr->virt) {
if (addr->phys)
dma_free_coherent(dev, addr->len, addr->virt,
addr->phys);
else
kfree(addr->virt);
addr->virt = NULL;
}
};
/**
* i2o_dma_realloc - Realloc DMA memory
* @dev: struct device pointer to the PCI device of the I2O controller
* @addr: pointer to a i2o_dma struct DMA buffer
* @len: new length of memory
* @gfp_mask: GFP mask
*
* If there was something allocated in the addr, free it first. If len > 0
* than try to allocate it and write the addresses back to the addr
* structure. If len == 0 set the virtual address to NULL.
*
* Returns the 0 on success or negative error code on failure.
*/
static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr,
size_t len, unsigned int gfp_mask)
{
i2o_dma_free(dev, addr);
if (len)
return i2o_dma_alloc(dev, addr, len, gfp_mask);
return 0;
};
/* I2O driver (OSM) functions */
extern int i2o_driver_register(struct i2o_driver *);
......@@ -375,10 +615,11 @@ extern int i2o_device_claim_release(struct i2o_device *);
/* Exec OSM functions */
extern int i2o_exec_lct_get(struct i2o_controller *);
/* device / driver conversion functions */
/* device / driver / kobject conversion functions */
#define to_i2o_driver(drv) container_of(drv,struct i2o_driver, driver)
#define to_i2o_device(dev) container_of(dev, struct i2o_device, device)
#define to_i2o_controller(dev) container_of(dev, struct i2o_controller, device)
#define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj))
/**
* i2o_msg_get - obtain an I2O message from the IOP
......@@ -466,8 +707,10 @@ static inline struct i2o_message __iomem *i2o_msg_out_to_virt(struct
i2o_controller *c,
u32 m)
{
BUG_ON(m < c->out_queue.phys
|| m >= c->out_queue.phys + c->out_queue.len);
if (unlikely
(m < c->out_queue.phys
|| m >= c->out_queue.phys + c->out_queue.len))
return NULL;
return c->out_queue.virt + (m - c->out_queue.phys);
};
......@@ -532,48 +775,6 @@ static inline void i2o_dma_free(struct device *dev, struct i2o_dma *addr)
}
};
/**
* i2o_dma_map - Map the memory to DMA
* @dev: struct device pointer to the PCI device of the I2O controller
* @addr: i2o_dma struct which should be mapped
*
* Map the memory in addr->virt to coherent DMA memory and write the
* physical address into addr->phys.
*
* Returns 0 on success or -ENOMEM on failure.
*/
static inline int i2o_dma_map(struct device *dev, struct i2o_dma *addr)
{
if (!addr->virt)
return -EFAULT;
if (!addr->phys)
addr->phys = dma_map_single(dev, addr->virt, addr->len,
DMA_BIDIRECTIONAL);
if (!addr->phys)
return -ENOMEM;
return 0;
};
/**
* i2o_dma_unmap - Unmap the DMA memory
* @dev: struct device pointer to the PCI device of the I2O controller
* @addr: i2o_dma struct which should be unmapped
*
* Unmap the memory in addr->virt from DMA memory.
*/
static inline void i2o_dma_unmap(struct device *dev, struct i2o_dma *addr)
{
if (!addr->virt)
return;
if (addr->phys) {
dma_unmap_single(dev, addr->phys, addr->len, DMA_BIDIRECTIONAL);
addr->phys = 0;
}
};
/*
* Endian handling wrapped into the macro - keeps the core code
* cleaner.
......@@ -725,6 +926,14 @@ extern void i2o_debug_state(struct i2o_controller *c);
#define I2O_CMD_SCSI_ABORT 0x83
#define I2O_CMD_SCSI_BUSRESET 0x27
/*
* Bus Adapter Class
*/
#define I2O_CMD_BUS_ADAPTER_RESET 0x85
#define I2O_CMD_BUS_RESET 0x87
#define I2O_CMD_BUS_SCAN 0x89
#define I2O_CMD_BUS_QUIESCE 0x8b
/*
* Random Block Storage Class
*/
......@@ -948,7 +1157,7 @@ extern void i2o_debug_state(struct i2o_controller *c);
/* request queue sizes */
#define I2O_MAX_SECTORS 1024
#define I2O_MAX_SEGMENTS 128
#define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS
#define I2O_REQ_MEMPOOL_SIZE 32
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment