Commit f2405db8 authored by Don Brace's avatar Don Brace Committed by James Bottomley

hpsa: do not queue commands internally in driver

By not doing maintaining a list of queued commands, we can eliminate some spin
locking in the main i/o path and gain significant improvement in IOPS.  Remove
the queuing code and the code that calls it; remove now-unused interrupt code;
remove DIRECT_LOOKUP_BIT.

Now that the passthru commands share the same command pool as
the main i/o path, and the total size of the pool is less than
or equal to the number of commands that will fit in the hardware
fifo, there is no need to check to see if we are exceeding the
hardware fifo's depth.
Reviewed-by: default avatarScott Teel <scott.teel@pmcs.com>
Reviewed-by: default avatarRobert Elliott <elliott@hp.com>
Signed-off-by: default avatarDon Brace <don.brace@pmcs.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 45fcb86e
This diff is collapsed.
......@@ -32,7 +32,6 @@ struct access_method {
void (*submit_command)(struct ctlr_info *h,
struct CommandList *c);
void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
unsigned long (*fifo_full)(struct ctlr_info *h);
bool (*intr_pending)(struct ctlr_info *h);
unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
};
......@@ -133,8 +132,6 @@ struct ctlr_info {
char hba_mode_enabled;
/* queue and queue Info */
struct list_head reqQ;
struct list_head cmpQ;
unsigned int Qdepth;
unsigned int maxSG;
spinlock_t lock;
......@@ -197,7 +194,6 @@ struct ctlr_info {
u32 __percpu *lockup_detected;
struct delayed_work monitor_ctlr_work;
int remove_in_progress;
u32 fifo_recently_full;
/* Address of h->q[x] is passed to intr handler to know which queue */
u8 q[MAX_REPLY_QUEUES];
u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
......@@ -427,14 +423,6 @@ static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
return register_value;
}
/*
* Returns true if fifo is full.
*
*/
static unsigned long SA5_fifo_full(struct ctlr_info *h)
{
return atomic_read(&h->commands_outstanding) >= h->max_commands;
}
/*
* returns value read from hardware.
* returns FIFO_EMPTY if there is nothing to read
......@@ -527,7 +515,6 @@ static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
static struct access_method SA5_access = {
SA5_submit_command,
SA5_intr_mask,
SA5_fifo_full,
SA5_intr_pending,
SA5_completed,
};
......@@ -535,7 +522,6 @@ static struct access_method SA5_access = {
static struct access_method SA5_ioaccel_mode1_access = {
SA5_submit_command,
SA5_performant_intr_mask,
SA5_fifo_full,
SA5_ioaccel_mode1_intr_pending,
SA5_ioaccel_mode1_completed,
};
......@@ -543,7 +529,6 @@ static struct access_method SA5_ioaccel_mode1_access = {
static struct access_method SA5_ioaccel_mode2_access = {
SA5_submit_command_ioaccel2,
SA5_performant_intr_mask,
SA5_fifo_full,
SA5_performant_intr_pending,
SA5_performant_completed,
};
......@@ -551,7 +536,6 @@ static struct access_method SA5_ioaccel_mode2_access = {
static struct access_method SA5_performant_access = {
SA5_submit_command,
SA5_performant_intr_mask,
SA5_fifo_full,
SA5_performant_intr_pending,
SA5_performant_completed,
};
......@@ -559,7 +543,6 @@ static struct access_method SA5_performant_access = {
static struct access_method SA5_performant_access_no_read = {
SA5_submit_command_no_read,
SA5_performant_intr_mask,
SA5_fifo_full,
SA5_performant_intr_pending,
SA5_performant_completed,
};
......
......@@ -375,22 +375,19 @@ struct ErrorInfo {
#define CMD_IOACCEL1 0x04
#define CMD_IOACCEL2 0x05
#define DIRECT_LOOKUP_SHIFT 5
#define DIRECT_LOOKUP_BIT 0x10
#define DIRECT_LOOKUP_SHIFT 4
#define DIRECT_LOOKUP_MASK (~((1 << DIRECT_LOOKUP_SHIFT) - 1))
#define HPSA_ERROR_BIT 0x02
struct ctlr_info; /* defined in hpsa.h */
/* The size of this structure needs to be divisible by 32
* on all architectures because low 5 bits of the addresses
/* The size of this structure needs to be divisible by 128
* on all architectures. The low 4 bits of the addresses
* are used as follows:
*
* bit 0: to device, used to indicate "performant mode" command
* from device, indidcates error status.
* bit 1-3: to device, indicates block fetch table entry for
* reducing DMA in fetching commands from host memory.
* bit 4: used to indicate whether tag is "direct lookup" (index),
* or a bus address.
*/
#define COMMANDLIST_ALIGNMENT 128
......@@ -405,7 +402,6 @@ struct CommandList {
struct ctlr_info *h;
int cmd_type;
long cmdindex;
struct list_head list;
struct completion *waiting;
void *scsi_cmd;
} __aligned(COMMANDLIST_ALIGNMENT);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment