Commit 872bf2fb authored by Yishai Hadas's avatar Yishai Hadas Committed by David S. Miller

net/mlx4_core: Maintain a persistent memory for mlx4 device

Maintain a persistent memory that should survive reset flow/PCI error.
This comes as a preparation for coming series to support above flows.
Signed-off-by: default avatarYishai Hadas <yishaih@mellanox.com>
Signed-off-by: default avatarOr Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7aee42c6
...@@ -154,7 +154,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev, ...@@ -154,7 +154,7 @@ void mlx4_ib_notify_slaves_on_guid_change(struct mlx4_ib_dev *dev,
continue; continue;
slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ; slave_id = (block_num * NUM_ALIAS_GUID_IN_REC) + i ;
if (slave_id >= dev->dev->num_vfs + 1) if (slave_id >= dev->dev->persist->num_vfs + 1)
return; return;
tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE]; tmp_cur_ag = *(__be64 *)&p_data[i * GUID_REC_SIZE];
form_cache_ag = get_cached_alias_guid(dev, port_num, form_cache_ag = get_cached_alias_guid(dev, port_num,
......
...@@ -1951,7 +1951,8 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, ...@@ -1951,7 +1951,8 @@ static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
ctx->ib_dev = &dev->ib_dev; ctx->ib_dev = &dev->ib_dev;
for (i = 0; for (i = 0;
i < min(dev->dev->caps.sqp_demux, (u16)(dev->dev->num_vfs + 1)); i < min(dev->dev->caps.sqp_demux,
(u16)(dev->dev->persist->num_vfs + 1));
i++) { i++) {
struct mlx4_active_ports actv_ports = struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev->dev, i); mlx4_get_active_ports(dev->dev, i);
......
...@@ -198,7 +198,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, ...@@ -198,7 +198,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
0xffffff; 0xffffff;
props->vendor_part_id = dev->dev->pdev->device; props->vendor_part_id = dev->dev->persist->pdev->device;
props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32)); props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
memcpy(&props->sys_image_guid, out_mad->data + 4, 8); memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
...@@ -1375,7 +1375,7 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr, ...@@ -1375,7 +1375,7 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
{ {
struct mlx4_ib_dev *dev = struct mlx4_ib_dev *dev =
container_of(device, struct mlx4_ib_dev, ib_dev.dev); container_of(device, struct mlx4_ib_dev, ib_dev.dev);
return sprintf(buf, "MT%d\n", dev->dev->pdev->device); return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
} }
static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
...@@ -1937,7 +1937,8 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev) ...@@ -1937,7 +1937,8 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev)
int i; int i;
if (mlx4_is_master(ibdev->dev)) { if (mlx4_is_master(ibdev->dev)) {
for (slave = 0; slave <= ibdev->dev->num_vfs; ++slave) { for (slave = 0; slave <= ibdev->dev->persist->num_vfs;
++slave) {
for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) {
for (i = 0; for (i = 0;
i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; i < ibdev->dev->phys_caps.pkey_phys_table_len[port];
...@@ -1994,7 +1995,7 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) ...@@ -1994,7 +1995,7 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) { mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) {
for (j = 0; j < eq_per_port; j++) { for (j = 0; j < eq_per_port; j++) {
snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s", snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s",
i, j, dev->pdev->bus->name); i, j, dev->persist->pdev->bus->name);
/* Set IRQ for specific name (per ring) */ /* Set IRQ for specific name (per ring) */
if (mlx4_assign_eq(dev, name, NULL, if (mlx4_assign_eq(dev, name, NULL,
&ibdev->eq_table[eq])) { &ibdev->eq_table[eq])) {
...@@ -2058,7 +2059,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ...@@ -2058,7 +2059,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev); ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
if (!ibdev) { if (!ibdev) {
dev_err(&dev->pdev->dev, "Device struct alloc failed\n"); dev_err(&dev->persist->pdev->dev,
"Device struct alloc failed\n");
return NULL; return NULL;
} }
...@@ -2085,7 +2087,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ...@@ -2085,7 +2087,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->num_ports = num_ports; ibdev->num_ports = num_ports;
ibdev->ib_dev.phys_port_cnt = ibdev->num_ports; ibdev->ib_dev.phys_port_cnt = ibdev->num_ports;
ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
ibdev->ib_dev.dma_device = &dev->pdev->dev; ibdev->ib_dev.dma_device = &dev->persist->pdev->dev;
if (dev->caps.userspace_caps) if (dev->caps.userspace_caps)
ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION; ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
...@@ -2236,7 +2238,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ...@@ -2236,7 +2238,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
sizeof(long), sizeof(long),
GFP_KERNEL); GFP_KERNEL);
if (!ibdev->ib_uc_qpns_bitmap) { if (!ibdev->ib_uc_qpns_bitmap) {
dev_err(&dev->pdev->dev, "bit map alloc failed\n"); dev_err(&dev->persist->pdev->dev,
"bit map alloc failed\n");
goto err_steer_qp_release; goto err_steer_qp_release;
} }
......
...@@ -401,7 +401,8 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device ...@@ -401,7 +401,8 @@ struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device
if (!mfrpl->ibfrpl.page_list) if (!mfrpl->ibfrpl.page_list)
goto err_free; goto err_free;
mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->pdev->dev, mfrpl->mapped_page_list = dma_alloc_coherent(&dev->dev->persist->
pdev->dev,
size, &mfrpl->map, size, &mfrpl->map,
GFP_KERNEL); GFP_KERNEL);
if (!mfrpl->mapped_page_list) if (!mfrpl->mapped_page_list)
...@@ -423,7 +424,8 @@ void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list) ...@@ -423,7 +424,8 @@ void mlx4_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list); struct mlx4_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
int size = page_list->max_page_list_len * sizeof (u64); int size = page_list->max_page_list_len * sizeof (u64);
dma_free_coherent(&dev->dev->pdev->dev, size, mfrpl->mapped_page_list, dma_free_coherent(&dev->dev->persist->pdev->dev, size,
mfrpl->mapped_page_list,
mfrpl->map); mfrpl->map);
kfree(mfrpl->ibfrpl.page_list); kfree(mfrpl->ibfrpl.page_list);
kfree(mfrpl); kfree(mfrpl);
......
...@@ -375,7 +375,7 @@ static void get_name(struct mlx4_ib_dev *dev, char *name, int i, int max) ...@@ -375,7 +375,7 @@ static void get_name(struct mlx4_ib_dev *dev, char *name, int i, int max)
char base_name[9]; char base_name[9];
/* pci_name format is: bus:dev:func -> xxxx:yy:zz.n */ /* pci_name format is: bus:dev:func -> xxxx:yy:zz.n */
strlcpy(name, pci_name(dev->dev->pdev), max); strlcpy(name, pci_name(dev->dev->persist->pdev), max);
strncpy(base_name, name, 8); /*till xxxx:yy:*/ strncpy(base_name, name, 8); /*till xxxx:yy:*/
base_name[8] = '\0'; base_name[8] = '\0';
/* with no ARI only 3 last bits are used so when the fn is higher than 8 /* with no ARI only 3 last bits are used so when the fn is higher than 8
...@@ -792,7 +792,7 @@ static int register_pkey_tree(struct mlx4_ib_dev *device) ...@@ -792,7 +792,7 @@ static int register_pkey_tree(struct mlx4_ib_dev *device)
if (!mlx4_is_master(device->dev)) if (!mlx4_is_master(device->dev))
return 0; return 0;
for (i = 0; i <= device->dev->num_vfs; ++i) for (i = 0; i <= device->dev->persist->num_vfs; ++i)
register_one_pkey_tree(device, i); register_one_pkey_tree(device, i);
return 0; return 0;
...@@ -807,7 +807,7 @@ static void unregister_pkey_tree(struct mlx4_ib_dev *device) ...@@ -807,7 +807,7 @@ static void unregister_pkey_tree(struct mlx4_ib_dev *device)
if (!mlx4_is_master(device->dev)) if (!mlx4_is_master(device->dev))
return; return;
for (slave = device->dev->num_vfs; slave >= 0; --slave) { for (slave = device->dev->persist->num_vfs; slave >= 0; --slave) {
list_for_each_entry_safe(p, t, list_for_each_entry_safe(p, t,
&device->pkeys.pkey_port_list[slave], &device->pkeys.pkey_port_list[slave],
entry) { entry) {
......
...@@ -592,7 +592,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, ...@@ -592,7 +592,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
buf->nbufs = 1; buf->nbufs = 1;
buf->npages = 1; buf->npages = 1;
buf->page_shift = get_order(size) + PAGE_SHIFT; buf->page_shift = get_order(size) + PAGE_SHIFT;
buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev, buf->direct.buf = dma_alloc_coherent(&dev->persist->pdev->dev,
size, &t, gfp); size, &t, gfp);
if (!buf->direct.buf) if (!buf->direct.buf)
return -ENOMEM; return -ENOMEM;
...@@ -619,7 +619,8 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, ...@@ -619,7 +619,8 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
for (i = 0; i < buf->nbufs; ++i) { for (i = 0; i < buf->nbufs; ++i) {
buf->page_list[i].buf = buf->page_list[i].buf =
dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, dma_alloc_coherent(&dev->persist->pdev->dev,
PAGE_SIZE,
&t, gfp); &t, gfp);
if (!buf->page_list[i].buf) if (!buf->page_list[i].buf)
goto err_free; goto err_free;
...@@ -657,7 +658,8 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) ...@@ -657,7 +658,8 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
int i; int i;
if (buf->nbufs == 1) if (buf->nbufs == 1)
dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, dma_free_coherent(&dev->persist->pdev->dev, size,
buf->direct.buf,
buf->direct.map); buf->direct.map);
else { else {
if (BITS_PER_LONG == 64 && buf->direct.buf) if (BITS_PER_LONG == 64 && buf->direct.buf)
...@@ -665,7 +667,8 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) ...@@ -665,7 +667,8 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
for (i = 0; i < buf->nbufs; ++i) for (i = 0; i < buf->nbufs; ++i)
if (buf->page_list[i].buf) if (buf->page_list[i].buf)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, dma_free_coherent(&dev->persist->pdev->dev,
PAGE_SIZE,
buf->page_list[i].buf, buf->page_list[i].buf,
buf->page_list[i].map); buf->page_list[i].map);
kfree(buf->page_list); kfree(buf->page_list);
...@@ -738,7 +741,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp ...@@ -738,7 +741,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp
if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
goto out; goto out;
pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev), gfp); pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev, gfp);
if (!pgdir) { if (!pgdir) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
...@@ -775,7 +778,7 @@ void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db) ...@@ -775,7 +778,7 @@ void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
set_bit(i, db->u.pgdir->bits[o]); set_bit(i, db->u.pgdir->bits[o]);
if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) { if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma); db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list); list_del(&db->u.pgdir->list);
kfree(db->u.pgdir); kfree(db->u.pgdir);
......
...@@ -70,7 +70,7 @@ static void poll_catas(unsigned long dev_ptr) ...@@ -70,7 +70,7 @@ static void poll_catas(unsigned long dev_ptr)
if (readl(priv->catas_err.map)) { if (readl(priv->catas_err.map)) {
/* If the device is off-line, we cannot try to recover it */ /* If the device is off-line, we cannot try to recover it */
if (pci_channel_offline(dev->pdev)) if (pci_channel_offline(dev->persist->pdev))
mod_timer(&priv->catas_err.timer, mod_timer(&priv->catas_err.timer,
round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL)); round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL));
else { else {
...@@ -94,6 +94,7 @@ static void catas_reset(struct work_struct *work) ...@@ -94,6 +94,7 @@ static void catas_reset(struct work_struct *work)
{ {
struct mlx4_priv *priv, *tmppriv; struct mlx4_priv *priv, *tmppriv;
struct mlx4_dev *dev; struct mlx4_dev *dev;
struct mlx4_dev_persistent *persist;
LIST_HEAD(tlist); LIST_HEAD(tlist);
int ret; int ret;
...@@ -103,20 +104,20 @@ static void catas_reset(struct work_struct *work) ...@@ -103,20 +104,20 @@ static void catas_reset(struct work_struct *work)
spin_unlock_irq(&catas_lock); spin_unlock_irq(&catas_lock);
list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) { list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) {
struct pci_dev *pdev = priv->dev.pdev; struct pci_dev *pdev = priv->dev.persist->pdev;
/* If the device is off-line, we cannot reset it */ /* If the device is off-line, we cannot reset it */
if (pci_channel_offline(pdev)) if (pci_channel_offline(pdev))
continue; continue;
ret = mlx4_restart_one(priv->dev.pdev); ret = mlx4_restart_one(priv->dev.persist->pdev);
/* 'priv' now is not valid */ /* 'priv' now is not valid */
if (ret) if (ret)
pr_err("mlx4 %s: Reset failed (%d)\n", pr_err("mlx4 %s: Reset failed (%d)\n",
pci_name(pdev), ret); pci_name(pdev), ret);
else { else {
dev = pci_get_drvdata(pdev); persist = pci_get_drvdata(pdev);
mlx4_dbg(dev, "Reset succeeded\n"); mlx4_dbg(persist->dev, "Reset succeeded\n");
} }
} }
} }
...@@ -134,7 +135,7 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev) ...@@ -134,7 +135,7 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
init_timer(&priv->catas_err.timer); init_timer(&priv->catas_err.timer);
priv->catas_err.map = NULL; priv->catas_err.map = NULL;
addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) + addr = pci_resource_start(dev->persist->pdev, priv->fw.catas_bar) +
priv->fw.catas_offset; priv->fw.catas_offset;
priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4);
......
...@@ -307,7 +307,7 @@ static int cmd_pending(struct mlx4_dev *dev) ...@@ -307,7 +307,7 @@ static int cmd_pending(struct mlx4_dev *dev)
{ {
u32 status; u32 status;
if (pci_channel_offline(dev->pdev)) if (pci_channel_offline(dev->persist->pdev))
return -EIO; return -EIO;
status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET); status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
...@@ -328,7 +328,7 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, ...@@ -328,7 +328,7 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
mutex_lock(&cmd->hcr_mutex); mutex_lock(&cmd->hcr_mutex);
if (pci_channel_offline(dev->pdev)) { if (pci_channel_offline(dev->persist->pdev)) {
/* /*
* Device is going through error recovery * Device is going through error recovery
* and cannot accept commands. * and cannot accept commands.
...@@ -342,7 +342,7 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param, ...@@ -342,7 +342,7 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS); end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
while (cmd_pending(dev)) { while (cmd_pending(dev)) {
if (pci_channel_offline(dev->pdev)) { if (pci_channel_offline(dev->persist->pdev)) {
/* /*
* Device is going through error recovery * Device is going through error recovery
* and cannot accept commands. * and cannot accept commands.
...@@ -464,7 +464,7 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, ...@@ -464,7 +464,7 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
down(&priv->cmd.poll_sem); down(&priv->cmd.poll_sem);
if (pci_channel_offline(dev->pdev)) { if (pci_channel_offline(dev->persist->pdev)) {
/* /*
* Device is going through error recovery * Device is going through error recovery
* and cannot accept commands. * and cannot accept commands.
...@@ -487,7 +487,7 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param, ...@@ -487,7 +487,7 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
end = msecs_to_jiffies(timeout) + jiffies; end = msecs_to_jiffies(timeout) + jiffies;
while (cmd_pending(dev) && time_before(jiffies, end)) { while (cmd_pending(dev) && time_before(jiffies, end)) {
if (pci_channel_offline(dev->pdev)) { if (pci_channel_offline(dev->persist->pdev)) {
/* /*
* Device is going through error recovery * Device is going through error recovery
* and cannot accept commands. * and cannot accept commands.
...@@ -612,7 +612,7 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param, ...@@ -612,7 +612,7 @@ int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier, int out_is_imm, u32 in_modifier, u8 op_modifier,
u16 op, unsigned long timeout, int native) u16 op, unsigned long timeout, int native)
{ {
if (pci_channel_offline(dev->pdev)) if (pci_channel_offline(dev->persist->pdev))
return -EIO; return -EIO;
if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) { if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
...@@ -1997,11 +1997,12 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) ...@@ -1997,11 +1997,12 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
if (mlx4_is_master(dev)) if (mlx4_is_master(dev))
priv->mfunc.comm = priv->mfunc.comm =
ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) + ioremap(pci_resource_start(dev->persist->pdev,
priv->fw.comm_bar) +
priv->fw.comm_base, MLX4_COMM_PAGESIZE); priv->fw.comm_base, MLX4_COMM_PAGESIZE);
else else
priv->mfunc.comm = priv->mfunc.comm =
ioremap(pci_resource_start(dev->pdev, 2) + ioremap(pci_resource_start(dev->persist->pdev, 2) +
MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE); MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
if (!priv->mfunc.comm) { if (!priv->mfunc.comm) {
mlx4_err(dev, "Couldn't map communication vector\n"); mlx4_err(dev, "Couldn't map communication vector\n");
...@@ -2107,7 +2108,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev) ...@@ -2107,7 +2108,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
err_comm: err_comm:
iounmap(priv->mfunc.comm); iounmap(priv->mfunc.comm);
err_vhcr: err_vhcr:
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
priv->mfunc.vhcr, priv->mfunc.vhcr,
priv->mfunc.vhcr_dma); priv->mfunc.vhcr_dma);
priv->mfunc.vhcr = NULL; priv->mfunc.vhcr = NULL;
...@@ -2130,8 +2131,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev) ...@@ -2130,8 +2131,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
} }
if (!mlx4_is_slave(dev) && !priv->cmd.hcr) { if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev,
MLX4_HCR_BASE, MLX4_HCR_SIZE); 0) + MLX4_HCR_BASE, MLX4_HCR_SIZE);
if (!priv->cmd.hcr) { if (!priv->cmd.hcr) {
mlx4_err(dev, "Couldn't map command register\n"); mlx4_err(dev, "Couldn't map command register\n");
goto err; goto err;
...@@ -2140,7 +2141,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev) ...@@ -2140,7 +2141,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
} }
if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) { if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE, priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev,
PAGE_SIZE,
&priv->mfunc.vhcr_dma, &priv->mfunc.vhcr_dma,
GFP_KERNEL); GFP_KERNEL);
if (!priv->mfunc.vhcr) if (!priv->mfunc.vhcr)
...@@ -2150,7 +2152,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev) ...@@ -2150,7 +2152,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
} }
if (!priv->cmd.pool) { if (!priv->cmd.pool) {
priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, priv->cmd.pool = pci_pool_create("mlx4_cmd",
dev->persist->pdev,
MLX4_MAILBOX_SIZE, MLX4_MAILBOX_SIZE,
MLX4_MAILBOX_SIZE, 0); MLX4_MAILBOX_SIZE, 0);
if (!priv->cmd.pool) if (!priv->cmd.pool)
...@@ -2202,7 +2205,7 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask) ...@@ -2202,7 +2205,7 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
} }
if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr && if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
(cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) { (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE, dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
priv->mfunc.vhcr, priv->mfunc.vhcr_dma); priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
priv->mfunc.vhcr = NULL; priv->mfunc.vhcr = NULL;
} }
...@@ -2306,8 +2309,9 @@ u32 mlx4_comm_get_version(void) ...@@ -2306,8 +2309,9 @@ u32 mlx4_comm_get_version(void)
static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf) static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
{ {
if ((vf < 0) || (vf >= dev->num_vfs)) { if ((vf < 0) || (vf >= dev->persist->num_vfs)) {
mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n", vf, dev->num_vfs); mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n",
vf, dev->persist->num_vfs);
return -EINVAL; return -EINVAL;
} }
...@@ -2316,7 +2320,7 @@ static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf) ...@@ -2316,7 +2320,7 @@ static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave) int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
{ {
if (slave < 1 || slave > dev->num_vfs) { if (slave < 1 || slave > dev->persist->num_vfs) {
mlx4_err(dev, mlx4_err(dev,
"Bad slave number:%d (number of activated slaves: %lu)\n", "Bad slave number:%d (number of activated slaves: %lu)\n",
slave, dev->num_slaves); slave, dev->num_slaves);
...@@ -2388,7 +2392,7 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev, ...@@ -2388,7 +2392,7 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
if (port <= 0 || port > dev->caps.num_ports) if (port <= 0 || port > dev->caps.num_ports)
return slaves_pport; return slaves_pport;
for (i = 0; i < dev->num_vfs + 1; i++) { for (i = 0; i < dev->persist->num_vfs + 1; i++) {
struct mlx4_active_ports actv_ports = struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, i); mlx4_get_active_ports(dev, i);
if (test_bit(port - 1, actv_ports.ports)) if (test_bit(port - 1, actv_ports.ports))
...@@ -2408,7 +2412,7 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv( ...@@ -2408,7 +2412,7 @@ struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX); bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
for (i = 0; i < dev->num_vfs + 1; i++) { for (i = 0; i < dev->persist->num_vfs + 1; i++) {
struct mlx4_active_ports actv_ports = struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, i); mlx4_get_active_ports(dev, i);
if (bitmap_equal(crit_ports->ports, actv_ports.ports, if (bitmap_equal(crit_ports->ports, actv_ports.ports,
......
...@@ -70,10 +70,10 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, ...@@ -70,10 +70,10 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
/* Allocate HW buffers on provided NUMA node. /* Allocate HW buffers on provided NUMA node.
* dev->numa_node is used in mtt range allocation flow. * dev->numa_node is used in mtt range allocation flow.
*/ */
set_dev_node(&mdev->dev->pdev->dev, node); set_dev_node(&mdev->dev->persist->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres, err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
cq->buf_size, 2 * PAGE_SIZE); cq->buf_size, 2 * PAGE_SIZE);
set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node); set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err) if (err)
goto err_cq; goto err_cq;
......
...@@ -92,7 +92,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) ...@@ -92,7 +92,7 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
(u16) (mdev->dev->caps.fw_ver >> 32), (u16) (mdev->dev->caps.fw_ver >> 32),
(u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff), (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
(u16) (mdev->dev->caps.fw_ver & 0xffff)); (u16) (mdev->dev->caps.fw_ver & 0xffff));
strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
sizeof(drvinfo->bus_info)); sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0; drvinfo->n_stats = 0;
drvinfo->regdump_len = 0; drvinfo->regdump_len = 0;
......
...@@ -241,8 +241,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev) ...@@ -241,8 +241,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
spin_lock_init(&mdev->uar_lock); spin_lock_init(&mdev->uar_lock);
mdev->dev = dev; mdev->dev = dev;
mdev->dma_device = &(dev->pdev->dev); mdev->dma_device = &dev->persist->pdev->dev;
mdev->pdev = dev->pdev; mdev->pdev = dev->persist->pdev;
mdev->device_up = false; mdev->device_up = false;
mdev->LSO_support = !!(dev->caps.flags & (1 << 15)); mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
......
...@@ -2457,7 +2457,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, ...@@ -2457,7 +2457,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
netif_set_real_num_tx_queues(dev, prof->tx_ring_num); netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
netif_set_real_num_rx_queues(dev, prof->rx_ring_num); netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev); SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
dev->dev_port = port - 1; dev->dev_port = port - 1;
/* /*
......
...@@ -387,10 +387,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, ...@@ -387,10 +387,10 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
ring->rx_info, tmp); ring->rx_info, tmp);
/* Allocate HW buffers on provided NUMA node */ /* Allocate HW buffers on provided NUMA node */
set_dev_node(&mdev->dev->pdev->dev, node); set_dev_node(&mdev->dev->persist->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
ring->buf_size, 2 * PAGE_SIZE); ring->buf_size, 2 * PAGE_SIZE);
set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node); set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err) if (err)
goto err_info; goto err_info;
......
...@@ -91,10 +91,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ...@@ -91,10 +91,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE); ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
/* Allocate HW buffers on provided NUMA node */ /* Allocate HW buffers on provided NUMA node */
set_dev_node(&mdev->dev->pdev->dev, node); set_dev_node(&mdev->dev->persist->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size, err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
2 * PAGE_SIZE); 2 * PAGE_SIZE);
set_dev_node(&mdev->dev->pdev->dev, mdev->dev->numa_node); set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err) { if (err) {
en_err(priv, "Failed allocating hwq resources\n"); en_err(priv, "Failed allocating hwq resources\n");
goto err_bounce; goto err_bounce;
......
...@@ -237,7 +237,7 @@ int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port) ...@@ -237,7 +237,7 @@ int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port)
struct mlx4_eqe eqe; struct mlx4_eqe eqe;
/*don't send if we don't have the that slave */ /*don't send if we don't have the that slave */
if (dev->num_vfs < slave) if (dev->persist->num_vfs < slave)
return 0; return 0;
memset(&eqe, 0, sizeof eqe); memset(&eqe, 0, sizeof eqe);
...@@ -255,7 +255,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port, ...@@ -255,7 +255,7 @@ int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
struct mlx4_eqe eqe; struct mlx4_eqe eqe;
/*don't send if we don't have the that slave */ /*don't send if we don't have the that slave */
if (dev->num_vfs < slave) if (dev->persist->num_vfs < slave)
return 0; return 0;
memset(&eqe, 0, sizeof eqe); memset(&eqe, 0, sizeof eqe);
...@@ -310,7 +310,7 @@ static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event) ...@@ -310,7 +310,7 @@ static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev, struct mlx4_slaves_pport slaves_pport = mlx4_phys_to_slaves_pport(dev,
port); port);
for (i = 0; i < dev->num_vfs + 1; i++) for (i = 0; i < dev->persist->num_vfs + 1; i++)
if (test_bit(i, slaves_pport.slaves)) if (test_bit(i, slaves_pport.slaves))
set_and_calc_slave_port_state(dev, i, port, set_and_calc_slave_port_state(dev, i, port,
event, &gen_event); event, &gen_event);
...@@ -560,7 +560,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) ...@@ -560,7 +560,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
mlx4_priv(dev)->sense.do_sense_port[port] = 1; mlx4_priv(dev)->sense.do_sense_port[port] = 1;
if (!mlx4_is_master(dev)) if (!mlx4_is_master(dev))
break; break;
for (i = 0; i < dev->num_vfs + 1; i++) { for (i = 0; i < dev->persist->num_vfs + 1;
i++) {
if (!test_bit(i, slaves_port.slaves)) if (!test_bit(i, slaves_port.slaves))
continue; continue;
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) { if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
...@@ -596,7 +597,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) ...@@ -596,7 +597,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
if (!mlx4_is_master(dev)) if (!mlx4_is_master(dev))
break; break;
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
for (i = 0; i < dev->num_vfs + 1; i++) { for (i = 0;
i < dev->persist->num_vfs + 1;
i++) {
if (!test_bit(i, slaves_port.slaves)) if (!test_bit(i, slaves_port.slaves))
continue; continue;
if (i == mlx4_master_func_num(dev)) if (i == mlx4_master_func_num(dev))
...@@ -865,7 +868,7 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) ...@@ -865,7 +868,7 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
if (!priv->eq_table.uar_map[index]) { if (!priv->eq_table.uar_map[index]) {
priv->eq_table.uar_map[index] = priv->eq_table.uar_map[index] =
ioremap(pci_resource_start(dev->pdev, 2) + ioremap(pci_resource_start(dev->persist->pdev, 2) +
((eq->eqn / 4) << PAGE_SHIFT), ((eq->eqn / 4) << PAGE_SHIFT),
PAGE_SIZE); PAGE_SIZE);
if (!priv->eq_table.uar_map[index]) { if (!priv->eq_table.uar_map[index]) {
...@@ -928,8 +931,10 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent, ...@@ -928,8 +931,10 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
eq_context = mailbox->buf; eq_context = mailbox->buf;
for (i = 0; i < npages; ++i) { for (i = 0; i < npages; ++i) {
eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, eq->page_list[i].buf = dma_alloc_coherent(&dev->persist->
PAGE_SIZE, &t, GFP_KERNEL); pdev->dev,
PAGE_SIZE, &t,
GFP_KERNEL);
if (!eq->page_list[i].buf) if (!eq->page_list[i].buf)
goto err_out_free_pages; goto err_out_free_pages;
...@@ -995,7 +1000,7 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent, ...@@ -995,7 +1000,7 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
err_out_free_pages: err_out_free_pages:
for (i = 0; i < npages; ++i) for (i = 0; i < npages; ++i)
if (eq->page_list[i].buf) if (eq->page_list[i].buf)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
eq->page_list[i].buf, eq->page_list[i].buf,
eq->page_list[i].map); eq->page_list[i].map);
...@@ -1044,7 +1049,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev, ...@@ -1044,7 +1049,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
mlx4_mtt_cleanup(dev, &eq->mtt); mlx4_mtt_cleanup(dev, &eq->mtt);
for (i = 0; i < npages; ++i) for (i = 0; i < npages; ++i)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
eq->page_list[i].buf, eq->page_list[i].buf,
eq->page_list[i].map); eq->page_list[i].map);
...@@ -1060,7 +1065,7 @@ static void mlx4_free_irqs(struct mlx4_dev *dev) ...@@ -1060,7 +1065,7 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
int i, vec; int i, vec;
if (eq_table->have_irq) if (eq_table->have_irq)
free_irq(dev->pdev->irq, dev); free_irq(dev->persist->pdev->irq, dev);
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
if (eq_table->eq[i].have_irq) { if (eq_table->eq[i].have_irq) {
...@@ -1089,7 +1094,8 @@ static int mlx4_map_clr_int(struct mlx4_dev *dev) ...@@ -1089,7 +1094,8 @@ static int mlx4_map_clr_int(struct mlx4_dev *dev)
{ {
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) + priv->clr_base = ioremap(pci_resource_start(dev->persist->pdev,
priv->fw.clr_int_bar) +
priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
if (!priv->clr_base) { if (!priv->clr_base) {
mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n"); mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n");
...@@ -1212,13 +1218,13 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) ...@@ -1212,13 +1218,13 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
i * MLX4_IRQNAME_SIZE, i * MLX4_IRQNAME_SIZE,
MLX4_IRQNAME_SIZE, MLX4_IRQNAME_SIZE,
"mlx4-comp-%d@pci:%s", i, "mlx4-comp-%d@pci:%s", i,
pci_name(dev->pdev)); pci_name(dev->persist->pdev));
} else { } else {
snprintf(priv->eq_table.irq_names + snprintf(priv->eq_table.irq_names +
i * MLX4_IRQNAME_SIZE, i * MLX4_IRQNAME_SIZE,
MLX4_IRQNAME_SIZE, MLX4_IRQNAME_SIZE,
"mlx4-async@pci:%s", "mlx4-async@pci:%s",
pci_name(dev->pdev)); pci_name(dev->persist->pdev));
} }
eq_name = priv->eq_table.irq_names + eq_name = priv->eq_table.irq_names +
...@@ -1235,8 +1241,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) ...@@ -1235,8 +1241,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
snprintf(priv->eq_table.irq_names, snprintf(priv->eq_table.irq_names,
MLX4_IRQNAME_SIZE, MLX4_IRQNAME_SIZE,
DRV_NAME "@pci:%s", DRV_NAME "@pci:%s",
pci_name(dev->pdev)); pci_name(dev->persist->pdev));
err = request_irq(dev->pdev->irq, mlx4_interrupt, err = request_irq(dev->persist->pdev->irq, mlx4_interrupt,
IRQF_SHARED, priv->eq_table.irq_names, dev); IRQF_SHARED, priv->eq_table.irq_names, dev);
if (err) if (err)
goto err_out_async; goto err_out_async;
......
...@@ -56,7 +56,7 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu ...@@ -56,7 +56,7 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
int i; int i;
if (chunk->nsg > 0) if (chunk->nsg > 0)
pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages,
PCI_DMA_BIDIRECTIONAL); PCI_DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->npages; ++i) for (i = 0; i < chunk->npages; ++i)
...@@ -69,7 +69,8 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk * ...@@ -69,7 +69,8 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
int i; int i;
for (i = 0; i < chunk->npages; ++i) for (i = 0; i < chunk->npages; ++i)
dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, dma_free_coherent(&dev->persist->pdev->dev,
chunk->mem[i].length,
lowmem_page_address(sg_page(&chunk->mem[i])), lowmem_page_address(sg_page(&chunk->mem[i])),
sg_dma_address(&chunk->mem[i])); sg_dma_address(&chunk->mem[i]));
} }
...@@ -173,7 +174,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, ...@@ -173,7 +174,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
--cur_order; --cur_order;
if (coherent) if (coherent)
ret = mlx4_alloc_icm_coherent(&dev->pdev->dev, ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
&chunk->mem[chunk->npages], &chunk->mem[chunk->npages],
cur_order, gfp_mask); cur_order, gfp_mask);
else else
...@@ -193,7 +194,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, ...@@ -193,7 +194,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
if (coherent) if (coherent)
++chunk->nsg; ++chunk->nsg;
else if (chunk->npages == MLX4_ICM_CHUNK_LEN) { else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
chunk->npages, chunk->npages,
PCI_DMA_BIDIRECTIONAL); PCI_DMA_BIDIRECTIONAL);
...@@ -208,7 +209,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, ...@@ -208,7 +209,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
} }
if (!coherent && chunk) { if (!coherent && chunk) {
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
chunk->npages, chunk->npages,
PCI_DMA_BIDIRECTIONAL); PCI_DMA_BIDIRECTIONAL);
......
This diff is collapsed.
...@@ -221,16 +221,17 @@ extern int mlx4_debug_level; ...@@ -221,16 +221,17 @@ extern int mlx4_debug_level;
#define mlx4_dbg(mdev, format, ...) \ #define mlx4_dbg(mdev, format, ...) \
do { \ do { \
if (mlx4_debug_level) \ if (mlx4_debug_level) \
dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format, \ dev_printk(KERN_DEBUG, \
&(mdev)->persist->pdev->dev, format, \
##__VA_ARGS__); \ ##__VA_ARGS__); \
} while (0) } while (0)
#define mlx4_err(mdev, format, ...) \ #define mlx4_err(mdev, format, ...) \
dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__) dev_err(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
#define mlx4_info(mdev, format, ...) \ #define mlx4_info(mdev, format, ...) \
dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__) dev_info(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
#define mlx4_warn(mdev, format, ...) \ #define mlx4_warn(mdev, format, ...) \
dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__) dev_warn(&(mdev)->persist->pdev->dev, format, ##__VA_ARGS__)
extern int mlx4_log_num_mgm_entry_size; extern int mlx4_log_num_mgm_entry_size;
extern int log_mtts_per_seg; extern int log_mtts_per_seg;
......
...@@ -708,13 +708,13 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, ...@@ -708,13 +708,13 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
if (!mtts) if (!mtts)
return -ENOMEM; return -ENOMEM;
dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle, dma_sync_single_for_cpu(&dev->persist->pdev->dev, dma_handle,
npages * sizeof (u64), DMA_TO_DEVICE); npages * sizeof (u64), DMA_TO_DEVICE);
for (i = 0; i < npages; ++i) for (i = 0; i < npages; ++i)
mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
dma_sync_single_for_device(&dev->pdev->dev, dma_handle, dma_sync_single_for_device(&dev->persist->pdev->dev, dma_handle,
npages * sizeof (u64), DMA_TO_DEVICE); npages * sizeof (u64), DMA_TO_DEVICE);
return 0; return 0;
...@@ -1020,13 +1020,13 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list ...@@ -1020,13 +1020,13 @@ int mlx4_map_phys_fmr(struct mlx4_dev *dev, struct mlx4_fmr *fmr, u64 *page_list
/* Make sure MPT status is visible before writing MTT entries */ /* Make sure MPT status is visible before writing MTT entries */
wmb(); wmb();
dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle, dma_sync_single_for_cpu(&dev->persist->pdev->dev, fmr->dma_handle,
npages * sizeof(u64), DMA_TO_DEVICE); npages * sizeof(u64), DMA_TO_DEVICE);
for (i = 0; i < npages; ++i) for (i = 0; i < npages; ++i)
fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT); fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
dma_sync_single_for_device(&dev->pdev->dev, fmr->dma_handle, dma_sync_single_for_device(&dev->persist->pdev->dev, fmr->dma_handle,
npages * sizeof(u64), DMA_TO_DEVICE); npages * sizeof(u64), DMA_TO_DEVICE);
fmr->mpt->key = cpu_to_be32(key); fmr->mpt->key = cpu_to_be32(key);
......
...@@ -151,11 +151,13 @@ int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar) ...@@ -151,11 +151,13 @@ int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
return -ENOMEM; return -ENOMEM;
if (mlx4_is_slave(dev)) if (mlx4_is_slave(dev))
offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) / offset = uar->index % ((int)pci_resource_len(dev->persist->pdev,
2) /
dev->caps.uar_page_size); dev->caps.uar_page_size);
else else
offset = uar->index; offset = uar->index;
uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset; uar->pfn = (pci_resource_start(dev->persist->pdev, 2) >> PAGE_SHIFT)
+ offset;
uar->map = NULL; uar->map = NULL;
return 0; return 0;
} }
......
...@@ -553,9 +553,9 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port) ...@@ -553,9 +553,9 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port)
slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
dev, &exclusive_ports); dev, &exclusive_ports);
slave_gid -= bitmap_weight(slaves_pport_actv.slaves, slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
dev->num_vfs + 1); dev->persist->num_vfs + 1);
} }
vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs)) if (slave_gid <= ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) % vfs))
return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1; return ((MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs) + 1;
return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs; return (MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS) / vfs;
...@@ -590,10 +590,10 @@ int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port) ...@@ -590,10 +590,10 @@ int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port)
slaves_pport_actv = mlx4_phys_to_slaves_pport_actv( slaves_pport_actv = mlx4_phys_to_slaves_pport_actv(
dev, &exclusive_ports); dev, &exclusive_ports);
slave_gid -= bitmap_weight(slaves_pport_actv.slaves, slave_gid -= bitmap_weight(slaves_pport_actv.slaves,
dev->num_vfs + 1); dev->persist->num_vfs + 1);
} }
gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS; gids = MLX4_ROCE_MAX_GIDS - MLX4_ROCE_PF_GIDS;
vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; vfs = bitmap_weight(slaves_pport.slaves, dev->persist->num_vfs + 1) - 1;
if (slave_gid <= gids % vfs) if (slave_gid <= gids % vfs)
return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1); return MLX4_ROCE_PF_GIDS + ((gids / vfs) + 1) * (slave_gid - 1);
...@@ -644,7 +644,7 @@ void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave) ...@@ -644,7 +644,7 @@ void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
int num_eth_ports, err; int num_eth_ports, err;
int i; int i;
if (slave < 0 || slave > dev->num_vfs) if (slave < 0 || slave > dev->persist->num_vfs)
return; return;
actv_ports = mlx4_get_active_ports(dev, slave); actv_ports = mlx4_get_active_ports(dev, slave);
...@@ -1214,7 +1214,8 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, ...@@ -1214,7 +1214,8 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
return -EINVAL; return -EINVAL;
slaves_pport = mlx4_phys_to_slaves_pport(dev, port); slaves_pport = mlx4_phys_to_slaves_pport(dev, port);
num_vfs = bitmap_weight(slaves_pport.slaves, dev->num_vfs + 1) - 1; num_vfs = bitmap_weight(slaves_pport.slaves,
dev->persist->num_vfs + 1) - 1;
for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) { for (i = 0; i < MLX4_ROCE_MAX_GIDS; i++) {
if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid, if (!memcmp(priv->port[port].gid_table.roce_gids[i].raw, gid,
...@@ -1258,7 +1259,7 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, ...@@ -1258,7 +1259,7 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
dev, &exclusive_ports); dev, &exclusive_ports);
num_vfs_before += bitmap_weight( num_vfs_before += bitmap_weight(
slaves_pport_actv.slaves, slaves_pport_actv.slaves,
dev->num_vfs + 1); dev->persist->num_vfs + 1);
} }
/* candidate_slave_gid isn't necessarily the correct slave, but /* candidate_slave_gid isn't necessarily the correct slave, but
...@@ -1288,7 +1289,7 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid, ...@@ -1288,7 +1289,7 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
dev, &exclusive_ports); dev, &exclusive_ports);
slave_gid += bitmap_weight( slave_gid += bitmap_weight(
slaves_pport_actv.slaves, slaves_pport_actv.slaves,
dev->num_vfs + 1); dev->persist->num_vfs + 1);
} }
} }
*slave_id = slave_gid; *slave_id = slave_gid;
......
...@@ -76,19 +76,21 @@ int mlx4_reset(struct mlx4_dev *dev) ...@@ -76,19 +76,21 @@ int mlx4_reset(struct mlx4_dev *dev)
goto out; goto out;
} }
pcie_cap = pci_pcie_cap(dev->pdev); pcie_cap = pci_pcie_cap(dev->persist->pdev);
for (i = 0; i < 64; ++i) { for (i = 0; i < 64; ++i) {
if (i == 22 || i == 23) if (i == 22 || i == 23)
continue; continue;
if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) { if (pci_read_config_dword(dev->persist->pdev, i * 4,
hca_header + i)) {
err = -ENODEV; err = -ENODEV;
mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n"); mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n");
goto out; goto out;
} }
} }
reset = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_RESET_BASE, reset = ioremap(pci_resource_start(dev->persist->pdev, 0) +
MLX4_RESET_BASE,
MLX4_RESET_SIZE); MLX4_RESET_SIZE);
if (!reset) { if (!reset) {
err = -ENOMEM; err = -ENOMEM;
...@@ -122,8 +124,8 @@ int mlx4_reset(struct mlx4_dev *dev) ...@@ -122,8 +124,8 @@ int mlx4_reset(struct mlx4_dev *dev)
end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES; end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES;
do { do {
if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) && if (!pci_read_config_word(dev->persist->pdev, PCI_VENDOR_ID,
vendor != 0xffff) &vendor) && vendor != 0xffff)
break; break;
msleep(1); msleep(1);
...@@ -138,14 +140,16 @@ int mlx4_reset(struct mlx4_dev *dev) ...@@ -138,14 +140,16 @@ int mlx4_reset(struct mlx4_dev *dev)
/* Now restore the PCI headers */ /* Now restore the PCI headers */
if (pcie_cap) { if (pcie_cap) {
devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4]; devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4];
if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL, if (pcie_capability_write_word(dev->persist->pdev,
PCI_EXP_DEVCTL,
devctl)) { devctl)) {
err = -ENODEV; err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n"); mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n");
goto out; goto out;
} }
linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4]; linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL, if (pcie_capability_write_word(dev->persist->pdev,
PCI_EXP_LNKCTL,
linkctl)) { linkctl)) {
err = -ENODEV; err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n"); mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n");
...@@ -157,7 +161,8 @@ int mlx4_reset(struct mlx4_dev *dev) ...@@ -157,7 +161,8 @@ int mlx4_reset(struct mlx4_dev *dev)
if (i * 4 == PCI_COMMAND) if (i * 4 == PCI_COMMAND)
continue; continue;
if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) { if (pci_write_config_dword(dev->persist->pdev, i * 4,
hca_header[i])) {
err = -ENODEV; err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n", mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n",
i); i);
...@@ -165,7 +170,7 @@ int mlx4_reset(struct mlx4_dev *dev) ...@@ -165,7 +170,7 @@ int mlx4_reset(struct mlx4_dev *dev)
} }
} }
if (pci_write_config_dword(dev->pdev, PCI_COMMAND, if (pci_write_config_dword(dev->persist->pdev, PCI_COMMAND,
hca_header[PCI_COMMAND / 4])) { hca_header[PCI_COMMAND / 4])) {
err = -ENODEV; err = -ENODEV;
mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n"); mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n");
......
...@@ -309,12 +309,13 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave, ...@@ -309,12 +309,13 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
int allocated, free, reserved, guaranteed, from_free; int allocated, free, reserved, guaranteed, from_free;
int from_rsvd; int from_rsvd;
if (slave > dev->num_vfs) if (slave > dev->persist->num_vfs)
return -EINVAL; return -EINVAL;
spin_lock(&res_alloc->alloc_lock); spin_lock(&res_alloc->alloc_lock);
allocated = (port > 0) ? allocated = (port > 0) ?
res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] : res_alloc->allocated[(port - 1) *
(dev->persist->num_vfs + 1) + slave] :
res_alloc->allocated[slave]; res_alloc->allocated[slave];
free = (port > 0) ? res_alloc->res_port_free[port - 1] : free = (port > 0) ? res_alloc->res_port_free[port - 1] :
res_alloc->res_free; res_alloc->res_free;
...@@ -352,7 +353,8 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave, ...@@ -352,7 +353,8 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
if (!err) { if (!err) {
/* grant the request */ /* grant the request */
if (port > 0) { if (port > 0) {
res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count; res_alloc->allocated[(port - 1) *
(dev->persist->num_vfs + 1) + slave] += count;
res_alloc->res_port_free[port - 1] -= count; res_alloc->res_port_free[port - 1] -= count;
res_alloc->res_port_rsvd[port - 1] -= from_rsvd; res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
} else { } else {
...@@ -376,13 +378,14 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave, ...@@ -376,13 +378,14 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
&priv->mfunc.master.res_tracker.res_alloc[res_type]; &priv->mfunc.master.res_tracker.res_alloc[res_type];
int allocated, guaranteed, from_rsvd; int allocated, guaranteed, from_rsvd;
if (slave > dev->num_vfs) if (slave > dev->persist->num_vfs)
return; return;
spin_lock(&res_alloc->alloc_lock); spin_lock(&res_alloc->alloc_lock);
allocated = (port > 0) ? allocated = (port > 0) ?
res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] : res_alloc->allocated[(port - 1) *
(dev->persist->num_vfs + 1) + slave] :
res_alloc->allocated[slave]; res_alloc->allocated[slave];
guaranteed = res_alloc->guaranteed[slave]; guaranteed = res_alloc->guaranteed[slave];
...@@ -397,7 +400,8 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave, ...@@ -397,7 +400,8 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
} }
if (port > 0) { if (port > 0) {
res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count; res_alloc->allocated[(port - 1) *
(dev->persist->num_vfs + 1) + slave] -= count;
res_alloc->res_port_free[port - 1] += count; res_alloc->res_port_free[port - 1] += count;
res_alloc->res_port_rsvd[port - 1] += from_rsvd; res_alloc->res_port_rsvd[port - 1] += from_rsvd;
} else { } else {
...@@ -415,7 +419,8 @@ static inline void initialize_res_quotas(struct mlx4_dev *dev, ...@@ -415,7 +419,8 @@ static inline void initialize_res_quotas(struct mlx4_dev *dev,
enum mlx4_resource res_type, enum mlx4_resource res_type,
int vf, int num_instances) int vf, int num_instances)
{ {
res_alloc->guaranteed[vf] = num_instances / (2 * (dev->num_vfs + 1)); res_alloc->guaranteed[vf] = num_instances /
(2 * (dev->persist->num_vfs + 1));
res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf]; res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
if (vf == mlx4_master_func_num(dev)) { if (vf == mlx4_master_func_num(dev)) {
res_alloc->res_free = num_instances; res_alloc->res_free = num_instances;
...@@ -486,21 +491,26 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) ...@@ -486,21 +491,26 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) { for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
struct resource_allocator *res_alloc = struct resource_allocator *res_alloc =
&priv->mfunc.master.res_tracker.res_alloc[i]; &priv->mfunc.master.res_tracker.res_alloc[i];
res_alloc->quota = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
res_alloc->guaranteed = kmalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); sizeof(int), GFP_KERNEL);
res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
sizeof(int), GFP_KERNEL);
if (i == RES_MAC || i == RES_VLAN) if (i == RES_MAC || i == RES_VLAN)
res_alloc->allocated = kzalloc(MLX4_MAX_PORTS * res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
(dev->num_vfs + 1) * sizeof(int), (dev->persist->num_vfs
GFP_KERNEL); + 1) *
sizeof(int), GFP_KERNEL);
else else
res_alloc->allocated = kzalloc((dev->num_vfs + 1) * sizeof(int), GFP_KERNEL); res_alloc->allocated = kzalloc((dev->persist->
num_vfs + 1) *
sizeof(int), GFP_KERNEL);
if (!res_alloc->quota || !res_alloc->guaranteed || if (!res_alloc->quota || !res_alloc->guaranteed ||
!res_alloc->allocated) !res_alloc->allocated)
goto no_mem_err; goto no_mem_err;
spin_lock_init(&res_alloc->alloc_lock); spin_lock_init(&res_alloc->alloc_lock);
for (t = 0; t < dev->num_vfs + 1; t++) { for (t = 0; t < dev->persist->num_vfs + 1; t++) {
struct mlx4_active_ports actv_ports = struct mlx4_active_ports actv_ports =
mlx4_get_active_ports(dev, t); mlx4_get_active_ports(dev, t);
switch (i) { switch (i) {
......
...@@ -744,8 +744,15 @@ struct mlx4_vf_dev { ...@@ -744,8 +744,15 @@ struct mlx4_vf_dev {
u8 n_ports; u8 n_ports;
}; };
struct mlx4_dev { struct mlx4_dev_persistent {
struct pci_dev *pdev; struct pci_dev *pdev;
struct mlx4_dev *dev;
int nvfs[MLX4_MAX_PORTS + 1];
int num_vfs;
};
struct mlx4_dev {
struct mlx4_dev_persistent *persist;
unsigned long flags; unsigned long flags;
unsigned long num_slaves; unsigned long num_slaves;
struct mlx4_caps caps; struct mlx4_caps caps;
...@@ -754,13 +761,11 @@ struct mlx4_dev { ...@@ -754,13 +761,11 @@ struct mlx4_dev {
struct radix_tree_root qp_table_tree; struct radix_tree_root qp_table_tree;
u8 rev_id; u8 rev_id;
char board_id[MLX4_BOARD_ID_LEN]; char board_id[MLX4_BOARD_ID_LEN];
int num_vfs;
int numa_node; int numa_node;
int oper_log_mgm_entry_size; int oper_log_mgm_entry_size;
u64 regid_promisc_array[MLX4_MAX_PORTS + 1]; u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
u64 regid_allmulti_array[MLX4_MAX_PORTS + 1]; u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
struct mlx4_vf_dev *dev_vfs; struct mlx4_vf_dev *dev_vfs;
int nvfs[MLX4_MAX_PORTS + 1];
}; };
struct mlx4_eqe { struct mlx4_eqe {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment