Commit cf638122 authored by Luben Tuikov's avatar Luben Tuikov Committed by James Bottomley

cmd_alloc54-3.patch [3/3]

this patch implements the new command allocation scheme for SCSI
Core, using the slab cache and a free_list for each host for a
backup store of one command (or many).

 o The three (3) subversion means that it has been updated to use
   ISA DMA and PCI DMA memory for scsi command allocation,
   i.e. there's two scsi command caches now.

 o The interface is, of course, unchanged; and this is the whole
   point of making this allocation scheme -- i.e. the allocator
   is abstracted.
parent a6eed7cd
......@@ -971,7 +971,7 @@ ahd_linux_queue(Scsi_Cmnd * cmd, void (*scsi_done) (Scsi_Cmnd *))
struct ahd_linux_device *dev;
u_long flags;
ahd = *(struct ahd_softc **)cmd->host->hostdata;
ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
/*
* Save the callback on completion function.
......@@ -995,8 +995,8 @@ ahd_linux_queue(Scsi_Cmnd * cmd, void (*scsi_done) (Scsi_Cmnd *))
ahd_midlayer_entrypoint_unlock(ahd, &flags);
return (0);
}
dev = ahd_linux_get_device(ahd, cmd->channel, cmd->target,
cmd->lun, /*alloc*/TRUE);
dev = ahd_linux_get_device(ahd, cmd->device->channel, cmd->device->id,
cmd->device->lun, /*alloc*/TRUE);
if (dev == NULL) {
ahd_midlayer_entrypoint_unlock(ahd, &flags);
printf("aic79xx_linux_queue: Unable to allocate device!\n");
......@@ -1217,7 +1217,7 @@ ahd_linux_abort(Scsi_Cmnd *cmd)
int found;
#endif
ahd = *(struct ahd_softc **)cmd->host->hostdata;
ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
#if NOTYET
int error;
......@@ -1251,7 +1251,7 @@ ahd_linux_dev_reset(Scsi_Cmnd *cmd)
int found;
#endif
ahd = *(struct ahd_softc **)cmd->host->hostdata;
ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
printf("%s: Dev reset called for cmd %p\n",
......@@ -1283,14 +1283,14 @@ ahd_linux_bus_reset(Scsi_Cmnd *cmd)
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
spin_unlock_irq(&io_request_lock);
#endif
ahd = *(struct ahd_softc **)cmd->host->hostdata;
ahd = *(struct ahd_softc **)cmd->device->host->hostdata;
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_RECOVERY) != 0)
printf("%s: Bus reset called for cmd %p\n",
ahd_name(ahd), cmd);
#endif
ahd_midlayer_entrypoint_lock(ahd, &s);
found = ahd_reset_channel(ahd, cmd->channel + 'A',
found = ahd_reset_channel(ahd, cmd->device->channel + 'A',
/*initiate reset*/TRUE);
acmd = TAILQ_FIRST(&ahd->platform_data->completeq);
TAILQ_INIT(&ahd->platform_data->completeq);
......@@ -1415,7 +1415,7 @@ static int ahd_linux_halt(struct notifier_block *nb, u_long event, void *buf)
/******************************** Macros **************************************/
#define BUILD_SCSIID(ahd, cmd) \
((((cmd)->target << TID_SHIFT) & TID) | (ahd)->our_id)
((((cmd)->device->id << TID_SHIFT) & TID) | (ahd)->our_id)
/******************************** Bus DMA *************************************/
int
......@@ -3668,7 +3668,7 @@ ahd_linux_dv_timeout(struct scsi_cmnd *cmd)
struct scb *scb;
u_long flags;
ahd = *((struct ahd_softc **)cmd->host->hostdata);
ahd = *((struct ahd_softc **)cmd->device->host->hostdata);
ahd_lock(ahd, &flags);
#ifdef AHD_DEBUG
......@@ -3696,7 +3696,7 @@ ahd_linux_dv_timeout(struct scsi_cmnd *cmd)
ahd_set_transaction_status(scb, CAM_AUTOSENSE_FAIL);
else
ahd_set_transaction_status(scb, CAM_CMD_TIMEOUT);
ahd_reset_channel(ahd, cmd->channel + 'A', /*initiate*/TRUE);
ahd_reset_channel(ahd, cmd->device->channel + 'A', /*initiate*/TRUE);
/*
* Add a minimal bus settle delay for devices that are slow to
......@@ -3746,7 +3746,7 @@ ahd_linux_dv_complete(struct scsi_cmnd *cmd)
{
struct ahd_softc *ahd;
ahd = *((struct ahd_softc **)cmd->host->hostdata);
ahd = *((struct ahd_softc **)cmd->device->host->hostdata);
/* Delete the DV timer before it goes off! */
scsi_delete_timer(cmd);
......@@ -3754,7 +3754,8 @@ ahd_linux_dv_complete(struct scsi_cmnd *cmd)
#ifdef AHD_DEBUG
if (ahd_debug & AHD_SHOW_DV)
printf("%s:%c:%d: Command completed, status= 0x%x\n",
ahd_name(ahd), cmd->channel, cmd->target, cmd->result);
ahd_name(ahd), cmd->device->channel,
cmd->device->id, cmd->result);
#endif
/* Wake up the state machine */
......@@ -3948,12 +3949,13 @@ ahd_linux_run_device_queue(struct ahd_softc *ahd, struct ahd_linux_device *dev)
* Get an scb to use.
*/
tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id,
cmd->target, &tstate);
cmd->device->id, &tstate);
if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) == 0
|| (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) {
col_idx = AHD_NEVER_COL_IDX;
} else {
col_idx = AHD_BUILD_COL_IDX(cmd->target, cmd->lun);
col_idx = AHD_BUILD_COL_IDX(cmd->device->id,
cmd->device->lun);
}
if ((scb = ahd_get_scb(ahd, col_idx)) == NULL) {
TAILQ_INSERT_TAIL(&ahd->platform_data->device_runq,
......@@ -3973,7 +3975,7 @@ ahd_linux_run_device_queue(struct ahd_softc *ahd, struct ahd_linux_device *dev)
*/
hscb->control = 0;
hscb->scsiid = BUILD_SCSIID(ahd, cmd);
hscb->lun = cmd->lun;
hscb->lun = cmd->device->lun;
mask = SCB_GET_TARGET_MASK(ahd, scb);
if ((ahd->user_discenable & mask) != 0)
......@@ -4641,8 +4643,9 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, Scsi_Cmnd *cmd)
struct ahd_devinfo devinfo;
uint32_t action;
dev = ahd_linux_get_device(ahd, cmd->channel,
cmd->target, cmd->lun,
dev = ahd_linux_get_device(ahd, cmd->device->channel,
cmd->device->id,
cmd->device->lun,
/*alloc*/FALSE);
if (dev == NULL)
......
This diff is collapsed.
......@@ -1603,8 +1603,7 @@ return -ENOTSUPP;
scsi_cdb[0] = RELEASE;
// allocate with wait = true, interruptible = false
SCpnt = scsi_allocate_device(ScsiDev, 1);
SCpnt = scsi_getset_command(ScsiDev, GFP_KERNEL);
{
CPQFC_DECLARE_COMPLETION(wait);
......@@ -1653,7 +1652,7 @@ return -ENOTSUPP;
result = SCpnt->result;
SDpnt = SCpnt->device;
scsi_release_command(SCpnt);
scsi_put_command(SCpnt);
SCpnt = NULL;
// if (!SDpnt->was_reset && SDpnt->scsi_request_fn)
......
......@@ -4599,7 +4599,7 @@ static void gdth_flush(int hanum)
#if LINUX_VERSION_CODE >= 0x020322
sdev = scsi_get_host_dev(gdth_ctr_tab[hanum]);
scp = scsi_allocate_device(sdev, 1);
scp = scsi_getset_command(sdev, GFP_KERNEL);
scp->cmd_len = 12;
scp->use_sg = 0;
#else
......@@ -4627,7 +4627,7 @@ static void gdth_flush(int hanum)
}
}
#if LINUX_VERSION_CODE >= 0x020322
scsi_release_command(scp);
scsi_put_command(scp);
scsi_free_host_dev(sdev);
#endif
}
......@@ -4673,7 +4673,7 @@ void gdth_halt(void)
memset(cmnd, 0xff, MAX_COMMAND_SIZE);
#if LINUX_VERSION_CODE >= 0x020322
sdev = scsi_get_host_dev(gdth_ctr_tab[hanum]);
scp = scsi_allocate_device(sdev, 1);
scp = scsi_getset_command(sdev, GFP_KERNEL);
scp->cmd_len = 12;
scp->use_sg = 0;
#else
......@@ -4690,7 +4690,7 @@ void gdth_halt(void)
TRACE2(("gdth_halt(): reset controller %d\n", hanum));
#if LINUX_VERSION_CODE >= 0x020322
gdth_do_cmd(scp, &gdtcmd, cmnd, 10);
scsi_release_command(scp);
scsi_put_command(scp);
scsi_free_host_dev(sdev);
#else
gdth_do_cmd(&scp, &gdtcmd, cmnd, 10);
......
......@@ -48,7 +48,7 @@ static int gdth_set_info(char *buffer,int length,int vh,int hanum,int busnum)
#if LINUX_VERSION_CODE >= 0x020322
sdev = scsi_get_host_dev(gdth_ctr_vtab[vh]);
scp = scsi_allocate_device(sdev, 1);
scp = scsi_getset_command(sdev, GFP_KERNEL);
if (!scp)
return -ENOMEM;
scp->cmd_len = 12;
......@@ -81,7 +81,7 @@ static int gdth_set_info(char *buffer,int length,int vh,int hanum,int busnum)
ret_val = -EINVAL;
}
#if LINUX_VERSION_CODE >= 0x020322
scsi_release_command(scp);
scsi_put_command(scp);
scsi_free_host_dev(sdev);
#endif
return ret_val;
......@@ -712,7 +712,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,
#if LINUX_VERSION_CODE >= 0x020322
sdev = scsi_get_host_dev(gdth_ctr_vtab[vh]);
scp = scsi_allocate_device(sdev, 1);
scp = scsi_getset_command(sdev, GFP_KERNEL);
if (!scp)
return -ENOMEM;
scp->cmd_len = 12;
......@@ -1234,7 +1234,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,
stop_output:
#if LINUX_VERSION_CODE >= 0x020322
scsi_release_command(scp);
scsi_put_command(scp);
scsi_free_host_dev(sdev);
#endif
*start = buffer +(offset-begin);
......
......@@ -347,6 +347,13 @@ void scsi_unregister(struct Scsi_Host *shost)
/* Cleanup proc */
scsi_proc_host_rm(shost);
while (!list_empty(&shost->free_list)) {
struct scsi_cmnd *cmd;
cmd = list_entry(shost->free_list.next,struct scsi_cmnd,list);
list_del_init(&cmd->list);
kmem_cache_free(scsi_core->scsi_cmd_cache, cmd);
}
kfree(shost);
}
......@@ -367,6 +374,7 @@ extern int blk_nohighio;
struct Scsi_Host * scsi_register(Scsi_Host_Template *shost_tp, int xtr_bytes)
{
struct Scsi_Host *shost, *shost_scr;
struct scsi_cmnd *cmd = NULL;
int gfp_mask;
DECLARE_MUTEX_LOCKED(sem);
......@@ -456,6 +464,16 @@ struct Scsi_Host * scsi_register(Scsi_Host_Template *shost_tp, int xtr_bytes)
found:
spin_unlock(&scsi_host_list_lock);
spin_lock_init(&shost->free_list_lock);
INIT_LIST_HEAD(&shost->free_list);
/* Get one backup command for this host. */
cmd = scsi_get_command(shost, GFP_KERNEL);
if (cmd)
list_add(&cmd->list, &shost->free_list);
else
printk(KERN_NOTICE "The system is running low in memory.\n");
scsi_proc_host_add(shost);
shost->eh_notify = &sem;
......
......@@ -375,6 +375,9 @@ struct Scsi_Host
struct list_head sh_list;
struct list_head my_devices;
spinlock_t free_list_lock;
struct list_head free_list; /* backup store of cmd structs */
spinlock_t default_lock;
spinlock_t *host_lock;
......
......@@ -265,7 +265,7 @@ void scsi_release_request(Scsi_Request * req)
{
if( req->sr_command != NULL )
{
scsi_release_command(req->sr_command);
scsi_put_command(req->sr_command);
req->sr_command = NULL;
}
......@@ -768,7 +768,7 @@ void scsi_wait_req (Scsi_Request * SRpnt, const void *cmnd ,
SRpnt->sr_request->waiting = NULL;
if( SRpnt->sr_command != NULL )
{
scsi_release_command(SRpnt->sr_command);
scsi_put_command(SRpnt->sr_command);
SRpnt->sr_command = NULL;
}
......@@ -834,7 +834,7 @@ void scsi_do_req(Scsi_Request * SRpnt, const void *cmnd,
*/
if( SRpnt->sr_command != NULL )
{
scsi_release_command(SRpnt->sr_command);
scsi_put_command(SRpnt->sr_command);
SRpnt->sr_command = NULL;
}
......@@ -1506,6 +1506,12 @@ void scsi_adjust_queue_depth(Scsi_Device *SDpnt, int tagged, int tags)
SDpnt->new_queue_depth = tags;
break;
}
/* TODO FIXME This is a hack and MUST go eventually.
This fixes a problem in scsi_scan.c::scsi_alloc_sdev()
else we cannot ever have ANY SCSI devices.
*/
SDpnt->current_queue_depth = 1;
spin_unlock_irqrestore(&device_request_lock, flags);
}
......@@ -2017,6 +2023,14 @@ static int __init init_scsi(void)
{
printk(KERN_INFO "SCSI subsystem driver " REVISION "\n");
scsi_core = kmalloc(sizeof(*scsi_core), GFP_KERNEL);
if (!scsi_core)
goto out_no_mem;
memset(scsi_core, 0, sizeof(*scsi_core));
if (scsi_create_cmdcache(scsi_core))
goto out_no_mem;
scsi_init_queue();
scsi_init_procfs();
devfs_mk_dir(NULL, "scsi", NULL);
......@@ -2025,6 +2039,10 @@ static int __init init_scsi(void)
scsi_sysfs_register();
open_softirq(SCSI_SOFTIRQ, scsi_softirq, NULL);
return 0;
out_no_mem:
printk(KERN_CRIT "Couldn't load SCSI Core -- out of memory!\n");
return -ENOMEM;
}
static void __exit exit_scsi(void)
......@@ -2034,6 +2052,12 @@ static void __exit exit_scsi(void)
devfs_remove("scsi");
scsi_exit_procfs();
scsi_exit_queue();
scsi_destroy_cmdcache(scsi_core);
if (scsi_core)
kfree(scsi_core);
scsi_core = NULL;
}
subsys_initcall(init_scsi);
......
......@@ -734,7 +734,8 @@ struct scsi_cmnd {
Scsi_Request *sc_request;
struct scsi_cmnd *next;
struct scsi_cmnd *reset_chain;
struct list_head list_entry; /* Used to place us on the cmd lists */
struct list_head list; /* scsi_cmnd participates in queue lists */
int eh_state; /* Used for state tracking in error handlr */
int eh_eflags; /* Used by error handlr */
......@@ -994,4 +995,45 @@ extern void scsi_device_unregister(struct scsi_device *);
extern int scsi_sysfs_register(void);
extern void scsi_sysfs_unregister(void);
/* -------------------------------------------------- */
/* data decl: */
/* All the SCSI Core specific global data, etc,
should go in here.
*/
struct scsi_core_data {
kmem_cache_t *scsi_cmd_cache;
kmem_cache_t *scsi_cmd_dma_cache;
};
extern struct scsi_core_data *scsi_core;
/* -------------------------------------------------- */
/* fn decl: */
int scsi_create_cmdcache(struct scsi_core_data *scsi_core);
int scsi_destroy_cmdcache(struct scsi_core_data *scsi_core);
struct scsi_cmnd * scsi_get_command(struct Scsi_Host *host, int alloc_flags);
void scsi_put_command(struct scsi_cmnd *cmd);
void scsi_setup_command(struct scsi_device *dev, struct scsi_cmnd *cmd);
/* -------------------------------------------------- */
/* inline funcs: */
/* scsi_getset_command: allocate, set and return a command struct,
when the device is known.
*/
static inline struct scsi_cmnd *scsi_getset_command(struct scsi_device *dev,
int flags)
{
struct scsi_cmnd *cmd;
if (!dev) return NULL;
if (!dev->host) return NULL;
scsi_setup_command(dev, (cmd = scsi_get_command(dev->host, flags)));
return cmd;
}
#endif
......@@ -1384,7 +1384,7 @@ static void scsi_eh_lock_done(struct scsi_cmnd *scmd)
scmd->sc_request = NULL;
sreq->sr_command = NULL;
scsi_release_command(scmd);
scsi_put_command(scmd);
scsi_release_request(sreq);
}
......
......@@ -33,7 +33,9 @@ struct scsi_host_sg_pool {
struct scsi_host_sg_pool scsi_sg_pools[SG_MEMPOOL_NR] = {
SP(8), SP(16), SP(32), SP(64), SP(MAX_PHYS_SEGMENTS)
};
#undef SP
#undef SP
struct scsi_core_data *scsi_core;
/*
* Function: scsi_insert_special_cmd()
......@@ -357,7 +359,7 @@ static Scsi_Cmnd *scsi_end_request(Scsi_Cmnd * SCpnt,
* This will goose the queue request function at the end, so we don't
* need to worry about launching another command.
*/
__scsi_release_command(SCpnt);
scsi_put_command(SCpnt);
scsi_queue_next_request(q, NULL);
return NULL;
}
......@@ -802,7 +804,8 @@ int scsi_prep_fn(struct request_queue *q, struct request *req)
SRpnt = (Scsi_Request *) req->special;
if( SRpnt->sr_magic == SCSI_REQ_MAGIC ) {
SCpnt = scsi_allocate_device(SRpnt->sr_device, 0);
SCpnt = scsi_getset_command(SRpnt->sr_device,
GFP_ATOMIC);
if (!SCpnt)
return BLKPREP_DEFER;
scsi_init_cmd_from_req(SCpnt, SRpnt);
......@@ -815,7 +818,7 @@ int scsi_prep_fn(struct request_queue *q, struct request *req)
if (req->special) {
SCpnt = (Scsi_Cmnd *) req->special;
} else {
SCpnt = scsi_allocate_device(SDpnt, 0);
SCpnt = scsi_getset_command(SDpnt, GFP_ATOMIC);
}
/*
* if command allocation failure, wait a bit
......@@ -1179,3 +1182,143 @@ void __exit scsi_exit_queue(void)
kmem_cache_destroy(sgp->slab);
}
}
/* -------------------------------------------------- */
int scsi_create_cmdcache(struct scsi_core_data *scsi_core)
{
if (!scsi_core)
return -EFAULT;
scsi_core->scsi_cmd_cache
= kmem_cache_create("scsi_cmd_cache",
sizeof(struct scsi_cmnd), 0,
SLAB_NO_REAP|SLAB_HWCACHE_ALIGN,NULL,NULL);
if (!scsi_core->scsi_cmd_cache)
return -ENOMEM;
scsi_core->scsi_cmd_dma_cache
= kmem_cache_create("scsi_cmd_cache(DMA)",
sizeof(struct scsi_cmnd), 0,
SLAB_NO_REAP|SLAB_HWCACHE_ALIGN
|SLAB_CACHE_DMA,
NULL,NULL);
if (!scsi_core->scsi_cmd_dma_cache) {
scsi_destroy_cmdcache(scsi_core);
return -ENOMEM;
}
return 0;
} /* end scsi_create_cmdcache() */
/* -------------------------------------------------- */
int scsi_destroy_cmdcache(struct scsi_core_data *scsi_core)
{
if (!scsi_core)
return -EFAULT;
if (scsi_core->scsi_cmd_cache &&
kmem_cache_destroy(scsi_core->scsi_cmd_cache)) {
goto bail;
} else {
scsi_core->scsi_cmd_cache = NULL;
}
if (scsi_core->scsi_cmd_dma_cache &&
kmem_cache_destroy(scsi_core->scsi_cmd_dma_cache)) {
goto bail;
} else {
scsi_core->scsi_cmd_dma_cache = NULL;
}
return 0;
bail:
printk(KERN_CRIT "Failed to free scsi command cache"
" -- memory leak\n");
return -EFAULT;
} /* end scsi_destroy_cmdcache() */
/* -------------------------------------------------- */
struct scsi_cmnd * scsi_get_command(struct Scsi_Host *host, int alloc_flags)
{
unsigned long flags;
struct scsi_cmnd *cmd = NULL;
if (!host)
return NULL;
if (host->unchecked_isa_dma) {
cmd = kmem_cache_alloc(scsi_core->scsi_cmd_dma_cache,
alloc_flags);
} else {
cmd = kmem_cache_alloc(scsi_core->scsi_cmd_cache, alloc_flags);
}
if (!cmd) {
spin_lock_irqsave(&host->free_list_lock, flags);
if (!list_empty(&host->free_list)) {
cmd = list_entry(host->free_list.next,
struct scsi_cmnd, list);
list_del_init(&cmd->list);
}
spin_unlock_irqrestore(&host->free_list_lock, flags);
}
return cmd;
} /* end scsi_get_command() */
/* -------------------------------------------------- */
/* scsi_put_command: free a scsi_cmnd struct.
Note: the command must not belong to any lists!
*/
void scsi_put_command(struct scsi_cmnd *cmd)
{
unsigned long flags;
struct Scsi_Host *host;
if (!cmd)
return;
if (!cmd->device || !cmd->device->host) {
printk(KERN_NOTICE "Trying to free a command which"
" doesn't belong to scsi core?!\n");
/* Memory leak, but let the system survive for now --
they'll get it eventually! */
return;
}
host = cmd->device->host;
spin_lock_irqsave(&host->free_list_lock, flags);
if (list_empty(&host->free_list)) {
list_add(&cmd->list, &host->free_list);
cmd = NULL;
}
spin_unlock_irqrestore(&host->free_list_lock, flags);
if (cmd) {
if (host->unchecked_isa_dma)
kmem_cache_free(scsi_core->scsi_cmd_dma_cache, cmd);
else
kmem_cache_free(scsi_core->scsi_cmd_cache, cmd);
}
} /* end scsi_put_command() */
/* -------------------------------------------------- */
/* scsi_setup_command: This will do post-alloc init of the command.
We want to do as little as possible here.
*/
void scsi_setup_command(struct scsi_device *dev, struct scsi_cmnd *cmd)
{
if (!cmd)
return;
memset(cmd, 0, sizeof(*cmd));
cmd->device = dev;
cmd->state = SCSI_STATE_UNUSED;
cmd->owner = SCSI_OWNER_NOBODY;
init_timer(&cmd->eh_timeout);
INIT_LIST_HEAD(&cmd->list);
} /* end scsi_setup_command() */
/* -------------------------------------------------- */
......@@ -471,7 +471,6 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
sdev->request_queue->queuedata = sdev;
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
scsi_build_commandblocks(sdev);
if (sdev->current_queue_depth == 0) {
goto out_bail;
}
......@@ -515,7 +514,6 @@ static struct scsi_device *scsi_alloc_sdev(struct Scsi_Host *shost,
} else if (sdev->request_queue)
scsi_free_queue(sdev->request_queue);
scsi_release_commandblocks(sdev);
kfree(sdev);
return NULL;
}
......@@ -535,7 +533,6 @@ static void scsi_free_sdev(struct scsi_device *sdev)
if (sdev->request_queue)
scsi_free_queue(sdev->request_queue);
scsi_release_commandblocks(sdev);
if (sdev->host->hostt->slave_destroy)
sdev->host->hostt->slave_destroy(sdev);
if (sdev->inquiry)
......
......@@ -39,7 +39,6 @@ EXPORT_SYMBOL(scsi_unregister);
EXPORT_SYMBOL(scsicam_bios_param);
EXPORT_SYMBOL(scsi_partsize);
EXPORT_SYMBOL(scsi_bios_ptable);
EXPORT_SYMBOL(scsi_allocate_device);
EXPORT_SYMBOL(scsi_do_cmd);
EXPORT_SYMBOL(scsi_ioctl);
EXPORT_SYMBOL(print_command);
......@@ -50,7 +49,6 @@ EXPORT_SYMBOL(print_status);
EXPORT_SYMBOL(scsi_sense_key_string);
EXPORT_SYMBOL(scsi_extd_sense_format);
EXPORT_SYMBOL(kernel_scsi_ioctl);
EXPORT_SYMBOL(scsi_release_command);
EXPORT_SYMBOL(print_Scsi_Cmnd);
EXPORT_SYMBOL(scsi_block_when_processing_errors);
EXPORT_SYMBOL(scsi_ioctl_send_command);
......@@ -114,3 +112,8 @@ EXPORT_SYMBOL(scsi_delete_timer);
* sysfs support
*/
EXPORT_SYMBOL(shost_devclass);
EXPORT_SYMBOL(scsi_get_command);
EXPORT_SYMBOL(scsi_put_command);
EXPORT_SYMBOL(scsi_setup_command);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment