Commit 105a3dbc authored by Robert Elliott's avatar Robert Elliott Committed by James Bottomley

hpsa: clean up driver init

Improve initialization error handling in hpsa_init_one
Clean up style and indent issues
Rename functions for consistency
Improve error messaging on allocations
Fix return status from hpsa_put_ctlr_into_performant_mode
Correct free order in hpsa_init_one using new function
   hpsa_free_performant_mode
Prevent inadvertent use of null pointers by nulling out the parent structures
   and zeroing out associated size variables.
Reviewed-by: default avatarScott Teel <scott.teel@pmcs.com>
Reviewed-by: default avatarKevin Barnett <kevin.barnett@pmcs.com>
Reviewed-by: default avatarTomas Henzl <thenzl@redhat.com>
Reviewed-by: default avatarHannes Reinecke <hare@Suse.de>
Signed-off-by: default avatarRobert Elliott <elliott@hp.com>
Signed-off-by: default avatarDon Brace <don.brace@pmcs.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJames Bottomley <JBottomley@Odin.com>
parent 2dd02d74
......@@ -234,9 +234,8 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
/* performant mode helper functions */
static void calc_bucket_map(int *bucket, int num_buckets,
int nsgs, int min_blocks, u32 *bucket_map);
static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h);
static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h);
static void hpsa_free_performant_mode(struct ctlr_info *h);
static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
static inline u32 next_command(struct ctlr_info *h, u8 q);
static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
u32 *cfg_base_addr, u64 *cfg_base_addr_index,
......@@ -1630,6 +1629,7 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
* since it didn't get added to scsi mid layer
*/
fixup_botched_add(h, added[i]);
added[i] = NULL;
}
free_and_out:
......@@ -1753,7 +1753,7 @@ static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
h->cmd_sg_list = NULL;
}
static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
{
int i;
......@@ -6435,9 +6435,11 @@ static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
if (h->msix_vector) {
if (h->pdev->msix_enabled)
pci_disable_msix(h->pdev);
h->msix_vector = 0;
} else if (h->msi_vector) {
if (h->pdev->msi_enabled)
pci_disable_msi(h->pdev);
h->msi_vector = 0;
}
}
......@@ -6576,10 +6578,14 @@ static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
static void hpsa_free_cfgtables(struct ctlr_info *h)
{
if (h->transtable)
if (h->transtable) {
iounmap(h->transtable);
if (h->cfgtable)
h->transtable = NULL;
}
if (h->cfgtable) {
iounmap(h->cfgtable);
h->cfgtable = NULL;
}
}
/* Find and map CISS config table and transfer table
......@@ -6796,6 +6802,7 @@ static void hpsa_free_pci_init(struct ctlr_info *h)
{
hpsa_free_cfgtables(h); /* pci_init 4 */
iounmap(h->vaddr); /* pci_init 3 */
h->vaddr = NULL;
hpsa_disable_interrupt_mode(h); /* pci_init 2 */
pci_release_regions(h->pdev); /* pci_init 2 */
pci_disable_device(h->pdev); /* pci_init 1 */
......@@ -6866,6 +6873,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
hpsa_free_cfgtables(h);
clean3: /* vaddr, intmode+region, pci */
iounmap(h->vaddr);
h->vaddr = NULL;
clean2: /* intmode+region, pci */
hpsa_disable_interrupt_mode(h);
pci_release_regions(h->pdev);
......@@ -6955,16 +6963,23 @@ static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
static void hpsa_free_cmd_pool(struct ctlr_info *h)
{
kfree(h->cmd_pool_bits);
if (h->cmd_pool)
h->cmd_pool_bits = NULL;
if (h->cmd_pool) {
pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(struct CommandList),
h->cmd_pool,
h->cmd_pool_dhandle);
if (h->errinfo_pool)
h->cmd_pool = NULL;
h->cmd_pool_dhandle = 0;
}
if (h->errinfo_pool) {
pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(struct ErrorInfo),
h->errinfo_pool,
h->errinfo_pool_dhandle);
h->errinfo_pool = NULL;
h->errinfo_pool_dhandle = 0;
}
}
static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
......@@ -7012,12 +7027,14 @@ static void hpsa_free_irqs(struct ctlr_info *h)
i = h->intr_mode;
irq_set_affinity_hint(h->intr[i], NULL);
free_irq(h->intr[i], &h->q[i]);
h->q[i] = 0;
return;
}
for (i = 0; i < h->msix_vector; i++) {
irq_set_affinity_hint(h->intr[i], NULL);
free_irq(h->intr[i], &h->q[i]);
h->q[i] = 0;
}
for (; i < MAX_REPLY_QUEUES; i++)
h->q[i] = 0;
......@@ -7070,6 +7087,7 @@ static int hpsa_request_irqs(struct ctlr_info *h,
intxhandler, IRQF_SHARED, h->devname,
&h->q[h->intr_mode]);
}
irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
}
if (rc) {
dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
......@@ -7114,23 +7132,17 @@ static void hpsa_free_reply_queues(struct ctlr_info *h)
h->reply_queue[i].head = NULL;
h->reply_queue[i].busaddr = 0;
}
h->reply_queue_size = 0;
}
static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
{
hpsa_free_irqs(h);
hpsa_free_sg_chain_blocks(h);
hpsa_free_cmd_pool(h);
kfree(h->blockFetchTable); /* perf 2 */
hpsa_free_reply_queues(h); /* perf 1 */
hpsa_free_ioaccel1_cmd_and_bft(h); /* perf 1 */
hpsa_free_ioaccel2_cmd_and_bft(h); /* perf 1 */
hpsa_free_cfgtables(h); /* pci_init 4 */
iounmap(h->vaddr); /* pci_init 3 */
hpsa_disable_interrupt_mode(h); /* pci_init 2 */
pci_disable_device(h->pdev);
pci_release_regions(h->pdev); /* pci_init 2 */
kfree(h);
hpsa_free_performant_mode(h); /* init_one 7 */
hpsa_free_sg_chain_blocks(h); /* init_one 6 */
hpsa_free_cmd_pool(h); /* init_one 5 */
hpsa_free_irqs(h); /* init_one 4 */
hpsa_free_pci_init(h); /* init_one 3 */
kfree(h); /* init_one 1 */
}
/* Called when controller lockup detected. */
......@@ -7403,10 +7415,13 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h)
if (!h) {
dev_err(&pdev->dev, "Failed to allocate controller head\n");
return -ENOMEM;
}
h->pdev = pdev;
h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
INIT_LIST_HEAD(&h->offline_device_list);
spin_lock_init(&h->lock);
......@@ -7424,20 +7439,21 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
if (!h->resubmit_wq) {
rc = -ENOMEM;
goto clean1;
goto clean1; /* aer/h */
}
/* Allocate and clear per-cpu variable lockup_detected */
h->lockup_detected = alloc_percpu(u32);
if (!h->lockup_detected) {
dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
rc = -ENOMEM;
goto clean1;
goto clean1; /* wq/aer/h */
}
set_lockup_detected_for_all_cpus(h, 0);
rc = hpsa_pci_init(h);
if (rc != 0)
goto clean1;
if (rc)
goto clean2; /* lockup, wq/aer/h */
sprintf(h->devname, HPSA "%d", number_of_controllers);
h->ctlr = number_of_controllers;
......@@ -7453,23 +7469,25 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dac = 0;
} else {
dev_err(&pdev->dev, "no suitable DMA available\n");
goto clean2;
goto clean3; /* pci, lockup, wq/aer/h */
}
}
/* make sure the board interrupts are off */
h->access.set_intr_mask(h, HPSA_INTR_OFF);
if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
goto clean2;
rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
if (rc)
goto clean3; /* pci, lockup, wq/aer/h */
dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
h->devname, pdev->device,
h->intr[h->intr_mode], dac ? "" : " not");
rc = hpsa_alloc_cmd_pool(h);
if (rc)
goto clean2_and_free_irqs;
if (hpsa_allocate_sg_chain_blocks(h))
goto clean4;
goto clean4; /* irq, pci, lockup, wq/aer/h */
rc = hpsa_alloc_sg_chain_blocks(h);
if (rc)
goto clean5; /* cmd, irq, pci, lockup, wq/aer/h */
init_waitqueue_head(&h->scan_wait_queue);
init_waitqueue_head(&h->abort_cmd_wait_queue);
h->scan_finished = 1; /* no scan currently in progress */
......@@ -7479,9 +7497,12 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
h->hba_mode_enabled = 0;
h->scsi_host = NULL;
spin_lock_init(&h->devlock);
hpsa_put_ctlr_into_performant_mode(h);
rc = hpsa_put_ctlr_into_performant_mode(h);
if (rc)
goto clean6; /* sg, cmd, irq, pci, lockup, wq/aer/h */
/* At this point, the controller is ready to take commands.
/*
* At this point, the controller is ready to take commands.
* Now, if reset_devices and the hard reset didn't work, try
* the soft reset and see if that works.
*/
......@@ -7546,7 +7567,7 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
hpsa_hba_inquiry(h);
rc = hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
if (rc)
goto clean4;
goto clean7;
/* Monitor the controller for firmware lockups */
h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
......@@ -7558,22 +7579,32 @@ static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
h->heartbeat_sample_interval);
return 0;
clean4:
clean7: /* perf, sg, cmd, irq, pci, lockup, wq/aer/h */
kfree(h->hba_inquiry_data);
hpsa_free_performant_mode(h);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
hpsa_free_sg_chain_blocks(h);
clean5: /* cmd, irq, pci, lockup, wq/aer/h */
hpsa_free_cmd_pool(h);
hpsa_free_ioaccel1_cmd_and_bft(h);
hpsa_free_ioaccel2_cmd_and_bft(h);
clean2_and_free_irqs:
clean4: /* irq, pci, lockup, wq/aer/h */
hpsa_free_irqs(h);
clean2:
clean3: /* pci, lockup, wq/aer/h */
hpsa_free_pci_init(h);
clean1:
if (h->resubmit_wq)
clean2: /* lockup, wq/aer/h */
if (h->lockup_detected) {
free_percpu(h->lockup_detected);
h->lockup_detected = NULL;
}
clean1: /* wq/aer/h */
if (h->resubmit_wq) {
destroy_workqueue(h->resubmit_wq);
if (h->rescan_ctlr_wq)
h->resubmit_wq = NULL;
}
if (h->rescan_ctlr_wq) {
destroy_workqueue(h->rescan_ctlr_wq);
if (h->lockup_detected)
free_percpu(h->lockup_detected);
h->rescan_ctlr_wq = NULL;
}
kfree(h);
return rc;
}
......@@ -7621,7 +7652,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
*/
hpsa_flush_cache(h);
h->access.set_intr_mask(h, HPSA_INTR_OFF);
hpsa_free_irqs(h);
hpsa_free_irqs(h); /* init_one 4 */
hpsa_disable_interrupt_mode(h); /* pci_init 2 */
}
......@@ -7629,8 +7660,10 @@ static void hpsa_free_device_info(struct ctlr_info *h)
{
int i;
for (i = 0; i < h->ndevices; i++)
for (i = 0; i < h->ndevices; i++) {
kfree(h->dev[i]);
h->dev[i] = NULL;
}
}
static void hpsa_remove_one(struct pci_dev *pdev)
......@@ -7652,26 +7685,29 @@ static void hpsa_remove_one(struct pci_dev *pdev)
cancel_delayed_work_sync(&h->rescan_ctlr_work);
destroy_workqueue(h->rescan_ctlr_wq);
destroy_workqueue(h->resubmit_wq);
hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
/* includes hpsa_free_irqs */
/* includes hpsa_free_irqs - init_one 4 */
/* includes hpsa_disable_interrupt_mode - pci_init 2 */
hpsa_shutdown(pdev);
hpsa_free_device_info(h);
hpsa_free_sg_chain_blocks(h);
kfree(h->blockFetchTable); /* perf 2 */
hpsa_free_reply_queues(h); /* perf 1 */
hpsa_free_ioaccel1_cmd_and_bft(h); /* perf 1 */
hpsa_free_ioaccel2_cmd_and_bft(h); /* perf 1 */
hpsa_free_device_info(h); /* scan */
hpsa_unregister_scsi(h); /* init_one "8" */
kfree(h->hba_inquiry_data); /* init_one "8" */
h->hba_inquiry_data = NULL; /* init_one "8" */
hpsa_free_performant_mode(h); /* init_one 7 */
hpsa_free_sg_chain_blocks(h); /* init_one 6 */
hpsa_free_cmd_pool(h); /* init_one 5 */
kfree(h->hba_inquiry_data);
/* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
/* includes hpsa_disable_interrupt_mode - pci_init 2 */
hpsa_free_pci_init(h);
hpsa_free_pci_init(h); /* init_one 3 */
free_percpu(h->lockup_detected);
kfree(h);
free_percpu(h->lockup_detected); /* init_one 2 */
h->lockup_detected = NULL; /* init_one 2 */
/* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
kfree(h); /* init_one 1 */
}
static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
......@@ -7729,7 +7765,10 @@ static void calc_bucket_map(int bucket[], int num_buckets,
}
}
/* return -ENODEV or other reason on error, 0 on success */
/*
* return -ENODEV on err, 0 on success (or no action)
* allocates numerous items that must be freed later
*/
static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
{
int i;
......@@ -7914,12 +7953,16 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
/* Free ioaccel1 mode command blocks and block fetch table */
static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
{
if (h->ioaccel_cmd_pool)
if (h->ioaccel_cmd_pool) {
pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
h->ioaccel_cmd_pool,
h->ioaccel_cmd_pool_dhandle);
h->ioaccel_cmd_pool = NULL;
h->ioaccel_cmd_pool_dhandle = 0;
}
kfree(h->ioaccel1_blockFetchTable);
h->ioaccel1_blockFetchTable = NULL;
}
/* Allocate ioaccel1 mode command blocks and block fetch table */
......@@ -7963,12 +8006,16 @@ static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
{
hpsa_free_ioaccel2_sg_chain_blocks(h);
if (h->ioaccel2_cmd_pool)
if (h->ioaccel2_cmd_pool) {
pci_free_consistent(h->pdev,
h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
h->ioaccel2_cmd_pool,
h->ioaccel2_cmd_pool_dhandle);
h->ioaccel2_cmd_pool = NULL;
h->ioaccel2_cmd_pool_dhandle = 0;
}
kfree(h->ioaccel2_blockFetchTable);
h->ioaccel2_blockFetchTable = NULL;
}
/* Allocate ioaccel2 mode command blocks and block fetch table */
......@@ -8013,33 +8060,46 @@ static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
return rc;
}
static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
/* Free items allocated by hpsa_put_ctlr_into_performant_mode */
static void hpsa_free_performant_mode(struct ctlr_info *h)
{
kfree(h->blockFetchTable);
h->blockFetchTable = NULL;
hpsa_free_reply_queues(h);
hpsa_free_ioaccel1_cmd_and_bft(h);
hpsa_free_ioaccel2_cmd_and_bft(h);
}
/* return -ENODEV on error, 0 on success (or no action)
* allocates numerous items that must be freed later
*/
static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
{
u32 trans_support;
unsigned long transMethod = CFGTBL_Trans_Performant |
CFGTBL_Trans_use_short_tags;
int i;
int i, rc;
if (hpsa_simple_mode)
return;
return 0;
trans_support = readl(&(h->cfgtable->TransportSupport));
if (!(trans_support & PERFORMANT_MODE))
return;
return 0;
/* Check for I/O accelerator mode support */
if (trans_support & CFGTBL_Trans_io_accel1) {
transMethod |= CFGTBL_Trans_io_accel1 |
CFGTBL_Trans_enable_directed_msix;
if (hpsa_alloc_ioaccel1_cmd_and_bft(h))
goto clean_up;
} else {
if (trans_support & CFGTBL_Trans_io_accel2) {
rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
if (rc)
return rc;
} else if (trans_support & CFGTBL_Trans_io_accel2) {
transMethod |= CFGTBL_Trans_io_accel2 |
CFGTBL_Trans_enable_directed_msix;
if (hpsa_alloc_ioaccel2_cmd_and_bft(h))
goto clean_up;
}
rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
if (rc)
return rc;
}
h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
......@@ -8051,8 +8111,10 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
h->reply_queue_size,
&(h->reply_queue[i].busaddr));
if (!h->reply_queue[i].head)
goto clean_up;
if (!h->reply_queue[i].head) {
rc = -ENOMEM;
goto clean1; /* rq, ioaccel */
}
h->reply_queue[i].size = h->max_commands;
h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
h->reply_queue[i].current_entry = 0;
......@@ -8061,15 +8123,24 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
/* Need a block fetch table for performant mode */
h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
sizeof(u32)), GFP_KERNEL);
if (!h->blockFetchTable)
goto clean_up;
if (!h->blockFetchTable) {
rc = -ENOMEM;
goto clean1; /* rq, ioaccel */
}
hpsa_enter_performant_mode(h, trans_support);
return;
rc = hpsa_enter_performant_mode(h, trans_support);
if (rc)
goto clean2; /* bft, rq, ioaccel */
return 0;
clean_up:
hpsa_free_reply_queues(h);
clean2: /* bft, rq, ioaccel */
kfree(h->blockFetchTable);
h->blockFetchTable = NULL;
clean1: /* rq, ioaccel */
hpsa_free_reply_queues(h);
hpsa_free_ioaccel1_cmd_and_bft(h);
hpsa_free_ioaccel2_cmd_and_bft(h);
return rc;
}
static int is_accelerated_cmd(struct CommandList *c)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment