Commit 57f97e96 authored by Michael J. Ruhl's avatar Michael J. Ruhl Committed by Doug Ledford

IB/hfi1: Get the hfi1_devdata structure as early as possible

Currently several things occur before the hfi1_devdata structure is
allocated.  This leads to an inconsistent logging ability and makes
it more difficult to restructure some code paths.

Allocate (and do a minimal init) the structure as soon as possible.
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Reviewed-by: default avatarSadanand Warrier <sadanand.warrier@intel.com>
Signed-off-by: default avatarMichael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 6a516bc9
...@@ -67,8 +67,6 @@ ...@@ -67,8 +67,6 @@
#include "debugfs.h" #include "debugfs.h"
#include "fault.h" #include "fault.h"
#define NUM_IB_PORTS 1
uint kdeth_qp; uint kdeth_qp;
module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO); module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix"); MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
...@@ -14914,20 +14912,16 @@ static int check_int_registers(struct hfi1_devdata *dd) ...@@ -14914,20 +14912,16 @@ static int check_int_registers(struct hfi1_devdata *dd)
} }
/** /**
* Allocate and initialize the device structure for the hfi. * hfi1_init_dd() - Initialize most of the dd structure.
* @dev: the pci_dev for hfi1_ib device * @dev: the pci_dev for hfi1_ib device
* @ent: pci_device_id struct for this dev * @ent: pci_device_id struct for this dev
* *
* Also allocates, initializes, and returns the devdata struct for this
* device instance
*
* This is global, and is called directly at init to set up the * This is global, and is called directly at init to set up the
* chip-specific function pointers for later use. * chip-specific function pointers for later use.
*/ */
struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, int hfi1_init_dd(struct hfi1_devdata *dd)
const struct pci_device_id *ent)
{ {
struct hfi1_devdata *dd; struct pci_dev *pdev = dd->pcidev;
struct hfi1_pportdata *ppd; struct hfi1_pportdata *ppd;
u64 reg; u64 reg;
int i, ret; int i, ret;
...@@ -14938,13 +14932,8 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, ...@@ -14938,13 +14932,8 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
"Functional simulator" "Functional simulator"
}; };
struct pci_dev *parent = pdev->bus->self; struct pci_dev *parent = pdev->bus->self;
u32 sdma_engines; u32 sdma_engines = chip_sdma_engines(dd);
dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
sizeof(struct hfi1_pportdata));
if (IS_ERR(dd))
goto bail;
sdma_engines = chip_sdma_engines(dd);
ppd = dd->pport; ppd = dd->pport;
for (i = 0; i < dd->num_pports; i++, ppd++) { for (i = 0; i < dd->num_pports; i++, ppd++) {
int vl; int vl;
...@@ -15246,9 +15235,8 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, ...@@ -15246,9 +15235,8 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
hfi1_pcie_ddcleanup(dd); hfi1_pcie_ddcleanup(dd);
bail_free: bail_free:
hfi1_free_devdata(dd); hfi1_free_devdata(dd);
dd = ERR_PTR(ret);
bail: bail:
return dd; return ret;
} }
static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate, static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
......
...@@ -1887,10 +1887,8 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd) ...@@ -1887,10 +1887,8 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
#define HFI1_CTXT_WAITING_URG 4 #define HFI1_CTXT_WAITING_URG 4
/* free up any allocated data at closes */ /* free up any allocated data at closes */
struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, int hfi1_init_dd(struct hfi1_devdata *dd);
const struct pci_device_id *ent);
void hfi1_free_devdata(struct hfi1_devdata *dd); void hfi1_free_devdata(struct hfi1_devdata *dd);
struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra);
/* LED beaconing functions */ /* LED beaconing functions */
void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon, void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
...@@ -1974,7 +1972,7 @@ void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd); ...@@ -1974,7 +1972,7 @@ void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd);
/* Hook for sysfs read of QSFP */ /* Hook for sysfs read of QSFP */
int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len); int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len);
int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent); int hfi1_pcie_init(struct hfi1_devdata *dd);
void hfi1_clean_up_interrupts(struct hfi1_devdata *dd); void hfi1_clean_up_interrupts(struct hfi1_devdata *dd);
void hfi1_pcie_cleanup(struct pci_dev *pdev); void hfi1_pcie_cleanup(struct pci_dev *pdev);
int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev); int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev);
...@@ -2125,19 +2123,6 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd) ...@@ -2125,19 +2123,6 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
return base_sdma_integrity; return base_sdma_integrity;
} }
/*
* hfi1_early_err is used (only!) to print early errors before devdata is
* allocated, or when dd->pcidev may not be valid, and at the tail end of
* cleanup when devdata may have been freed, etc. hfi1_dev_porterr is
* the same as dd_dev_err, but is used when the message really needs
* the IB port# to be definitive as to what's happening..
*/
#define hfi1_early_err(dev, fmt, ...) \
dev_err(dev, fmt, ##__VA_ARGS__)
#define hfi1_early_info(dev, fmt, ...) \
dev_info(dev, fmt, ##__VA_ARGS__)
#define dd_dev_emerg(dd, fmt, ...) \ #define dd_dev_emerg(dd, fmt, ...) \
dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \ dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \
rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__) rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
......
...@@ -83,6 +83,8 @@ ...@@ -83,6 +83,8 @@
#define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */ #define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
#define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */ #define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
#define NUM_IB_PORTS 1
/* /*
* Number of user receive contexts we are configured to use (to allow for more * Number of user receive contexts we are configured to use (to allow for more
* pio buffers per ctxt, etc.) Zero means use one user context per CPU. * pio buffers per ctxt, etc.) Zero means use one user context per CPU.
...@@ -654,8 +656,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, ...@@ -654,8 +656,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
ppd->part_enforce |= HFI1_PART_ENFORCE_IN; ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
if (loopback) { if (loopback) {
hfi1_early_err(&pdev->dev, dd_dev_err(dd, "Faking data partition 0x8001 in idx %u\n",
"Faking data partition 0x8001 in idx %u\n",
!default_pkey_idx); !default_pkey_idx);
ppd->pkeys[!default_pkey_idx] = 0x8001; ppd->pkeys[!default_pkey_idx] = 0x8001;
} }
...@@ -702,9 +703,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, ...@@ -702,9 +703,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
return; return;
bail: bail:
dd_dev_err(dd, "Congestion Control Agent disabled for port %d\n", port);
hfi1_early_err(&pdev->dev,
"Congestion Control Agent disabled for port %d\n", port);
} }
/* /*
...@@ -1246,15 +1245,19 @@ void hfi1_free_devdata(struct hfi1_devdata *dd) ...@@ -1246,15 +1245,19 @@ void hfi1_free_devdata(struct hfi1_devdata *dd)
kobject_put(&dd->kobj); kobject_put(&dd->kobj);
} }
/* /**
* Allocate our primary per-unit data structure. Must be done via verbs * hfi1_alloc_devdata - Allocate our primary per-unit data structure.
* allocator, because the verbs cleanup process both does cleanup and * @pdev: Valid PCI device
* free of the data structure. * @extra: How many bytes to alloc past the default
*
* Must be done via verbs allocator, because the verbs cleanup process
* both does cleanup and free of the data structure.
* "extra" is for chip-specific data. * "extra" is for chip-specific data.
* *
* Use the idr mechanism to get a unit number for this unit. * Use the idr mechanism to get a unit number for this unit.
*/ */
struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
size_t extra)
{ {
unsigned long flags; unsigned long flags;
struct hfi1_devdata *dd; struct hfi1_devdata *dd;
...@@ -1287,7 +1290,7 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra) ...@@ -1287,7 +1290,7 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
idr_preload_end(); idr_preload_end();
if (ret < 0) { if (ret < 0) {
hfi1_early_err(&pdev->dev, dev_err(&pdev->dev,
"Could not allocate unit ID: error %d\n", -ret); "Could not allocate unit ID: error %d\n", -ret);
goto bail; goto bail;
} }
...@@ -1604,22 +1607,22 @@ static void postinit_cleanup(struct hfi1_devdata *dd) ...@@ -1604,22 +1607,22 @@ static void postinit_cleanup(struct hfi1_devdata *dd)
hfi1_free_devdata(dd); hfi1_free_devdata(dd);
} }
static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt) static int init_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt)
{ {
if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
hfi1_early_err(dev, "Receive header queue count too small\n"); dd_dev_err(dd, "Receive header queue count too small\n");
return -EINVAL; return -EINVAL;
} }
if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
hfi1_early_err(dev, dd_dev_err(dd,
"Receive header queue count cannot be greater than %u\n", "Receive header queue count cannot be greater than %u\n",
HFI1_MAX_HDRQ_EGRBUF_CNT); HFI1_MAX_HDRQ_EGRBUF_CNT);
return -EINVAL; return -EINVAL;
} }
if (thecnt % HDRQ_INCREMENT) { if (thecnt % HDRQ_INCREMENT) {
hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n", dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n",
thecnt, HDRQ_INCREMENT); thecnt, HDRQ_INCREMENT);
return -EINVAL; return -EINVAL;
} }
...@@ -1639,21 +1642,28 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1639,21 +1642,28 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Validate dev ids */ /* Validate dev ids */
if (!(ent->device == PCI_DEVICE_ID_INTEL0 || if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
ent->device == PCI_DEVICE_ID_INTEL1)) { ent->device == PCI_DEVICE_ID_INTEL1)) {
hfi1_early_err(&pdev->dev, dev_err(&pdev->dev, "Failing on unknown Intel deviceid 0x%x\n",
"Failing on unknown Intel deviceid 0x%x\n",
ent->device); ent->device);
ret = -ENODEV; ret = -ENODEV;
goto bail; goto bail;
} }
/* Allocate the dd so we can get to work */
dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
sizeof(struct hfi1_pportdata));
if (IS_ERR(dd)) {
ret = PTR_ERR(dd);
goto bail;
}
/* Validate some global module parameters */ /* Validate some global module parameters */
ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt); ret = init_validate_rcvhdrcnt(dd, rcvhdrcnt);
if (ret) if (ret)
goto bail; goto bail;
/* use the encoding function as a sanitization check */ /* use the encoding function as a sanitization check */
if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n", dd_dev_err(dd, "Invalid HdrQ Entry size %u\n",
hfi1_hdrq_entsize); hfi1_hdrq_entsize);
ret = -EINVAL; ret = -EINVAL;
goto bail; goto bail;
...@@ -1676,10 +1686,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1676,10 +1686,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
clamp_val(eager_buffer_size, clamp_val(eager_buffer_size,
MIN_EAGER_BUFFER * 8, MIN_EAGER_BUFFER * 8,
MAX_EAGER_BUFFER_TOTAL); MAX_EAGER_BUFFER_TOTAL);
hfi1_early_info(&pdev->dev, "Eager buffer size %u\n", dd_dev_info(dd, "Eager buffer size %u\n",
eager_buffer_size); eager_buffer_size);
} else { } else {
hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n"); dd_dev_err(dd, "Invalid Eager buffer size of 0\n");
ret = -EINVAL; ret = -EINVAL;
goto bail; goto bail;
} }
...@@ -1687,7 +1697,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1687,7 +1697,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* restrict value of hfi1_rcvarr_split */ /* restrict value of hfi1_rcvarr_split */
hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100); hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
ret = hfi1_pcie_init(pdev, ent); ret = hfi1_pcie_init(dd);
if (ret) if (ret)
goto bail; goto bail;
...@@ -1695,12 +1705,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -1695,12 +1705,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* Do device-specific initialization, function table setup, dd * Do device-specific initialization, function table setup, dd
* allocation, etc. * allocation, etc.
*/ */
dd = hfi1_init_dd(pdev, ent); ret = hfi1_init_dd(dd);
if (ret)
if (IS_ERR(dd)) {
ret = PTR_ERR(dd);
goto clean_bail; /* error already printed */ goto clean_bail; /* error already printed */
}
ret = create_workqueues(dd); ret = create_workqueues(dd);
if (ret) if (ret)
......
/* /*
* Copyright(c) 2015 - 2017 Intel Corporation. * Copyright(c) 2015 - 2018 Intel Corporation.
* *
* This file is provided under a dual BSD/GPLv2 license. When using or * This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license. * redistributing this file, you may do so under either license.
...@@ -62,13 +62,11 @@ ...@@ -62,13 +62,11 @@
/* /*
* Do all the common PCIe setup and initialization. * Do all the common PCIe setup and initialization.
* devdata is not yet allocated, and is not allocated until after this
* routine returns success. Therefore dd_dev_err() can't be used for error
* printing.
*/ */
int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent) int hfi1_pcie_init(struct hfi1_devdata *dd)
{ {
int ret; int ret;
struct pci_dev *pdev = dd->pcidev;
ret = pci_enable_device(pdev); ret = pci_enable_device(pdev);
if (ret) { if (ret) {
...@@ -84,15 +82,13 @@ int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -84,15 +82,13 @@ int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
* about that, it appears. If the original BAR was retained * about that, it appears. If the original BAR was retained
* in the kernel data structures, this may be OK. * in the kernel data structures, this may be OK.
*/ */
hfi1_early_err(&pdev->dev, "pci enable failed: error %d\n", dd_dev_err(dd, "pci enable failed: error %d\n", -ret);
-ret); return ret;
goto done;
} }
ret = pci_request_regions(pdev, DRIVER_NAME); ret = pci_request_regions(pdev, DRIVER_NAME);
if (ret) { if (ret) {
hfi1_early_err(&pdev->dev, dd_dev_err(dd, "pci_request_regions fails: err %d\n", -ret);
"pci_request_regions fails: err %d\n", -ret);
goto bail; goto bail;
} }
...@@ -105,8 +101,7 @@ int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -105,8 +101,7 @@ int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
*/ */
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) { if (ret) {
hfi1_early_err(&pdev->dev, dd_dev_err(dd, "Unable to set DMA mask: %d\n", ret);
"Unable to set DMA mask: %d\n", ret);
goto bail; goto bail;
} }
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
...@@ -114,18 +109,16 @@ int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -114,18 +109,16 @@ int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
} }
if (ret) { if (ret) {
hfi1_early_err(&pdev->dev, dd_dev_err(dd, "Unable to set DMA consistent mask: %d\n", ret);
"Unable to set DMA consistent mask: %d\n", ret);
goto bail; goto bail;
} }
pci_set_master(pdev); pci_set_master(pdev);
(void)pci_enable_pcie_error_reporting(pdev); (void)pci_enable_pcie_error_reporting(pdev);
goto done; return 0;
bail: bail:
hfi1_pcie_cleanup(pdev); hfi1_pcie_cleanup(pdev);
done:
return ret; return ret;
} }
...@@ -201,7 +194,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) ...@@ -201,7 +194,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
dd_dev_err(dd, "WC mapping of send buffers failed\n"); dd_dev_err(dd, "WC mapping of send buffers failed\n");
goto nomem; goto nomem;
} }
dd_dev_info(dd, "WC piobase: %p\n for %x", dd->piobase, TXE_PIO_SIZE); dd_dev_info(dd, "WC piobase: %p for %x\n", dd->piobase, TXE_PIO_SIZE);
dd->physaddr = addr; /* used for io_remap, etc. */ dd->physaddr = addr; /* used for io_remap, etc. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment