Commit d3243aef authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-RDMA'

Michael Chan says:

====================
bnxt_en: Add interface to support RDMA driver.

This series adds an interface to support a brand new RDMA driver bnxt_re.
The first step is to re-arrange some code so that pci_enable_msix() can
be called during pci probe.  The purpose is to allow the RDMA driver to
initialize and stay initialized whether the netdev is up or down.

Then we make some changes to VF resource allocation so that there is
enough resources to support RDMA.

Finally the last patch adds a simple interface to allow the RDMA driver to
probe and register itself with any bnxt_en devices that support RDMA.
Once registered, the RDMA driver can request MSIX, send fw messages, and
receive some notifications.

v2: Fixed kbuild test robot warnings.

David, please consider this series for net-next.  Thanks.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5b8e2f61 a588e458
obj-$(CONFIG_BNXT) += bnxt_en.o obj-$(CONFIG_BNXT) += bnxt_en.o
bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o
...@@ -52,6 +52,7 @@ ...@@ -52,6 +52,7 @@
#include "bnxt_hsi.h" #include "bnxt_hsi.h"
#include "bnxt.h" #include "bnxt.h"
#include "bnxt_ulp.h"
#include "bnxt_sriov.h" #include "bnxt_sriov.h"
#include "bnxt_ethtool.h" #include "bnxt_ethtool.h"
#include "bnxt_dcb.h" #include "bnxt_dcb.h"
...@@ -1528,12 +1529,11 @@ static int bnxt_async_event_process(struct bnxt *bp, ...@@ -1528,12 +1529,11 @@ static int bnxt_async_event_process(struct bnxt *bp,
set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event); set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
break; break;
default: default:
netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
event_id);
goto async_event_process_exit; goto async_event_process_exit;
} }
schedule_work(&bp->sp_task); schedule_work(&bp->sp_task);
async_event_process_exit: async_event_process_exit:
bnxt_ulp_async_events(bp, cmpl);
return 0; return 0;
} }
...@@ -3117,27 +3117,46 @@ int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, ...@@ -3117,27 +3117,46 @@ int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
return rc; return rc;
} }
static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
int bmap_size)
{ {
struct hwrm_func_drv_rgtr_input req = {0}; struct hwrm_func_drv_rgtr_input req = {0};
int i;
DECLARE_BITMAP(async_events_bmap, 256); DECLARE_BITMAP(async_events_bmap, 256);
u32 *events = (u32 *)async_events_bmap; u32 *events = (u32 *)async_events_bmap;
int i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
req.enables = req.enables =
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
FUNC_DRV_RGTR_REQ_ENABLES_VER |
FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
memset(async_events_bmap, 0, sizeof(async_events_bmap)); memset(async_events_bmap, 0, sizeof(async_events_bmap));
for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
__set_bit(bnxt_async_events_arr[i], async_events_bmap); __set_bit(bnxt_async_events_arr[i], async_events_bmap);
if (bmap && bmap_size) {
for (i = 0; i < bmap_size; i++) {
if (test_bit(i, bmap))
__set_bit(i, async_events_bmap);
}
}
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
req.async_event_fwd[i] |= cpu_to_le32(events[i]); req.async_event_fwd[i] |= cpu_to_le32(events[i]);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
{
struct hwrm_func_drv_rgtr_input req = {0};
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
req.enables =
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
FUNC_DRV_RGTR_REQ_ENABLES_VER);
req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
req.ver_maj = DRV_VER_MAJ; req.ver_maj = DRV_VER_MAJ;
req.ver_min = DRV_VER_MIN; req.ver_min = DRV_VER_MIN;
...@@ -3146,6 +3165,7 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) ...@@ -3146,6 +3165,7 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
if (BNXT_PF(bp)) { if (BNXT_PF(bp)) {
DECLARE_BITMAP(vf_req_snif_bmap, 256); DECLARE_BITMAP(vf_req_snif_bmap, 256);
u32 *data = (u32 *)vf_req_snif_bmap; u32 *data = (u32 *)vf_req_snif_bmap;
int i;
memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap)); memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
...@@ -3527,7 +3547,7 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) ...@@ -3527,7 +3547,7 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
return rc; return rc;
} }
static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
{ {
unsigned int ring = 0, grp_idx; unsigned int ring = 0, grp_idx;
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
...@@ -3575,6 +3595,9 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) ...@@ -3575,6 +3595,9 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
#endif #endif
if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
req.flags |=
cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
} }
...@@ -4152,7 +4175,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) ...@@ -4152,7 +4175,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
return rc; return rc;
} }
int bnxt_hwrm_func_qcaps(struct bnxt *bp) static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{ {
int rc = 0; int rc = 0;
struct hwrm_func_qcaps_input req = {0}; struct hwrm_func_qcaps_input req = {0};
...@@ -4166,6 +4189,11 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp) ...@@ -4166,6 +4189,11 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
if (rc) if (rc)
goto hwrm_func_qcaps_exit; goto hwrm_func_qcaps_exit;
if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED))
bp->flags |= BNXT_FLAG_ROCEV1_CAP;
if (resp->flags & cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED))
bp->flags |= BNXT_FLAG_ROCEV2_CAP;
bp->tx_push_thresh = 0; bp->tx_push_thresh = 0;
if (resp->flags & if (resp->flags &
cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)) cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
...@@ -4743,45 +4771,12 @@ static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, ...@@ -4743,45 +4771,12 @@ static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
return 0; return 0;
} }
static int bnxt_setup_msix(struct bnxt *bp) static void bnxt_setup_msix(struct bnxt *bp)
{ {
struct msix_entry *msix_ent;
struct net_device *dev = bp->dev;
int i, total_vecs, rc = 0, min = 1;
const int len = sizeof(bp->irq_tbl[0].name); const int len = sizeof(bp->irq_tbl[0].name);
struct net_device *dev = bp->dev;
int tcs, i;
bp->flags &= ~BNXT_FLAG_USING_MSIX;
total_vecs = bp->cp_nr_rings;
msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
if (!msix_ent)
return -ENOMEM;
for (i = 0; i < total_vecs; i++) {
msix_ent[i].entry = i;
msix_ent[i].vector = 0;
}
if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
min = 2;
total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
if (total_vecs < 0) {
rc = -ENODEV;
goto msix_setup_exit;
}
bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
if (bp->irq_tbl) {
int tcs;
/* Trim rings based upon num of vectors allocated */
rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
total_vecs, min == 1);
if (rc)
goto msix_setup_exit;
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
tcs = netdev_get_num_tc(dev); tcs = netdev_get_num_tc(dev);
if (tcs > 1) { if (tcs > 1) {
bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs; bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
...@@ -4799,12 +4794,10 @@ static int bnxt_setup_msix(struct bnxt *bp) ...@@ -4799,12 +4794,10 @@ static int bnxt_setup_msix(struct bnxt *bp)
} }
} }
} }
bp->cp_nr_rings = total_vecs;
for (i = 0; i < bp->cp_nr_rings; i++) { for (i = 0; i < bp->cp_nr_rings; i++) {
char *attr; char *attr;
bp->irq_tbl[i].vector = msix_ent[i].vector;
if (bp->flags & BNXT_FLAG_SHARED_RINGS) if (bp->flags & BNXT_FLAG_SHARED_RINGS)
attr = "TxRx"; attr = "TxRx";
else if (i < bp->rx_nr_rings) else if (i < bp->rx_nr_rings)
...@@ -4812,13 +4805,135 @@ static int bnxt_setup_msix(struct bnxt *bp) ...@@ -4812,13 +4805,135 @@ static int bnxt_setup_msix(struct bnxt *bp)
else else
attr = "tx"; attr = "tx";
snprintf(bp->irq_tbl[i].name, len, snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr,
"%s-%s-%d", dev->name, attr, i); i);
bp->irq_tbl[i].handler = bnxt_msix; bp->irq_tbl[i].handler = bnxt_msix;
} }
}
static void bnxt_setup_inta(struct bnxt *bp)
{
const int len = sizeof(bp->irq_tbl[0].name);
if (netdev_get_num_tc(bp->dev))
netdev_reset_tc(bp->dev);
snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
0);
bp->irq_tbl[0].handler = bnxt_inta;
}
static int bnxt_setup_int_mode(struct bnxt *bp)
{
int rc;
if (bp->flags & BNXT_FLAG_USING_MSIX)
bnxt_setup_msix(bp);
else
bnxt_setup_inta(bp);
rc = bnxt_set_real_num_queues(bp); rc = bnxt_set_real_num_queues(bp);
return rc;
}
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
{
#if defined(CONFIG_BNXT_SRIOV)
if (BNXT_VF(bp))
return bp->vf.max_stat_ctxs;
#endif
return bp->pf.max_stat_ctxs;
}
void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
{
#if defined(CONFIG_BNXT_SRIOV)
if (BNXT_VF(bp))
bp->vf.max_stat_ctxs = max;
else
#endif
bp->pf.max_stat_ctxs = max;
}
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
{
#if defined(CONFIG_BNXT_SRIOV)
if (BNXT_VF(bp))
return bp->vf.max_cp_rings;
#endif
return bp->pf.max_cp_rings;
}
void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
{
#if defined(CONFIG_BNXT_SRIOV)
if (BNXT_VF(bp))
bp->vf.max_cp_rings = max;
else
#endif
bp->pf.max_cp_rings = max;
}
static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
{
#if defined(CONFIG_BNXT_SRIOV)
if (BNXT_VF(bp))
return bp->vf.max_irqs;
#endif
return bp->pf.max_irqs;
}
void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
{
#if defined(CONFIG_BNXT_SRIOV)
if (BNXT_VF(bp))
bp->vf.max_irqs = max_irqs;
else
#endif
bp->pf.max_irqs = max_irqs;
}
static int bnxt_init_msix(struct bnxt *bp)
{
int i, total_vecs, rc = 0, min = 1;
struct msix_entry *msix_ent;
total_vecs = bnxt_get_max_func_irqs(bp);
msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
if (!msix_ent)
return -ENOMEM;
for (i = 0; i < total_vecs; i++) {
msix_ent[i].entry = i;
msix_ent[i].vector = 0;
}
if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
min = 2;
total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
if (total_vecs < 0) {
rc = -ENODEV;
goto msix_setup_exit;
}
bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
if (bp->irq_tbl) {
for (i = 0; i < total_vecs; i++)
bp->irq_tbl[i].vector = msix_ent[i].vector;
bp->total_irqs = total_vecs;
/* Trim rings based upon num of vectors allocated */
rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
total_vecs, min == 1);
if (rc) if (rc)
goto msix_setup_exit; goto msix_setup_exit;
bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
bp->cp_nr_rings = (min == 1) ?
max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings;
} else { } else {
rc = -ENOMEM; rc = -ENOMEM;
goto msix_setup_exit; goto msix_setup_exit;
...@@ -4828,52 +4943,54 @@ static int bnxt_setup_msix(struct bnxt *bp) ...@@ -4828,52 +4943,54 @@ static int bnxt_setup_msix(struct bnxt *bp)
return 0; return 0;
msix_setup_exit: msix_setup_exit:
netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc); netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
kfree(bp->irq_tbl);
bp->irq_tbl = NULL;
pci_disable_msix(bp->pdev); pci_disable_msix(bp->pdev);
kfree(msix_ent); kfree(msix_ent);
return rc; return rc;
} }
static int bnxt_setup_inta(struct bnxt *bp) static int bnxt_init_inta(struct bnxt *bp)
{ {
int rc;
const int len = sizeof(bp->irq_tbl[0].name);
if (netdev_get_num_tc(bp->dev))
netdev_reset_tc(bp->dev);
bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL); bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
if (!bp->irq_tbl) { if (!bp->irq_tbl)
rc = -ENOMEM; return -ENOMEM;
return rc;
} bp->total_irqs = 1;
bp->rx_nr_rings = 1; bp->rx_nr_rings = 1;
bp->tx_nr_rings = 1; bp->tx_nr_rings = 1;
bp->cp_nr_rings = 1; bp->cp_nr_rings = 1;
bp->tx_nr_rings_per_tc = bp->tx_nr_rings; bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
bp->flags |= BNXT_FLAG_SHARED_RINGS; bp->flags |= BNXT_FLAG_SHARED_RINGS;
bp->irq_tbl[0].vector = bp->pdev->irq; bp->irq_tbl[0].vector = bp->pdev->irq;
snprintf(bp->irq_tbl[0].name, len, return 0;
"%s-%s-%d", bp->dev->name, "TxRx", 0);
bp->irq_tbl[0].handler = bnxt_inta;
rc = bnxt_set_real_num_queues(bp);
return rc;
} }
static int bnxt_setup_int_mode(struct bnxt *bp) static int bnxt_init_int_mode(struct bnxt *bp)
{ {
int rc = 0; int rc = 0;
if (bp->flags & BNXT_FLAG_MSIX_CAP) if (bp->flags & BNXT_FLAG_MSIX_CAP)
rc = bnxt_setup_msix(bp); rc = bnxt_init_msix(bp);
if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) { if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
/* fallback to INTA */ /* fallback to INTA */
rc = bnxt_setup_inta(bp); rc = bnxt_init_inta(bp);
} }
return rc; return rc;
} }
static void bnxt_clear_int_mode(struct bnxt *bp)
{
if (bp->flags & BNXT_FLAG_USING_MSIX)
pci_disable_msix(bp->pdev);
kfree(bp->irq_tbl);
bp->irq_tbl = NULL;
bp->flags &= ~BNXT_FLAG_USING_MSIX;
}
static void bnxt_free_irq(struct bnxt *bp) static void bnxt_free_irq(struct bnxt *bp)
{ {
struct bnxt_irq *irq; struct bnxt_irq *irq;
...@@ -4892,10 +5009,6 @@ static void bnxt_free_irq(struct bnxt *bp) ...@@ -4892,10 +5009,6 @@ static void bnxt_free_irq(struct bnxt *bp)
free_irq(irq->vector, bp->bnapi[i]); free_irq(irq->vector, bp->bnapi[i]);
irq->requested = 0; irq->requested = 0;
} }
if (bp->flags & BNXT_FLAG_USING_MSIX)
pci_disable_msix(bp->pdev);
kfree(bp->irq_tbl);
bp->irq_tbl = NULL;
} }
static int bnxt_request_irq(struct bnxt *bp) static int bnxt_request_irq(struct bnxt *bp)
...@@ -5566,22 +5679,7 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) ...@@ -5566,22 +5679,7 @@ int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
static int bnxt_open(struct net_device *dev) static int bnxt_open(struct net_device *dev)
{ {
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
int rc = 0;
if (!test_bit(BNXT_STATE_FN_RST_DONE, &bp->state)) {
rc = bnxt_hwrm_func_reset(bp);
if (rc) {
netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
rc);
rc = -EBUSY;
return rc;
}
/* Do func_reset during the 1st PF open only to prevent killing
* the VFs when the PF is brought down and up.
*/
if (BNXT_PF(bp))
set_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
}
return __bnxt_open_nic(bp, true, true); return __bnxt_open_nic(bp, true, true);
} }
...@@ -6685,12 +6783,15 @@ static void bnxt_remove_one(struct pci_dev *pdev) ...@@ -6685,12 +6783,15 @@ static void bnxt_remove_one(struct pci_dev *pdev)
cancel_work_sync(&bp->sp_task); cancel_work_sync(&bp->sp_task);
bp->sp_event = 0; bp->sp_event = 0;
bnxt_clear_int_mode(bp);
bnxt_hwrm_func_drv_unrgtr(bp); bnxt_hwrm_func_drv_unrgtr(bp);
bnxt_free_hwrm_resources(bp); bnxt_free_hwrm_resources(bp);
bnxt_dcb_free(bp); bnxt_dcb_free(bp);
pci_iounmap(pdev, bp->bar2); pci_iounmap(pdev, bp->bar2);
pci_iounmap(pdev, bp->bar1); pci_iounmap(pdev, bp->bar1);
pci_iounmap(pdev, bp->bar0); pci_iounmap(pdev, bp->bar0);
kfree(bp->edev);
bp->edev = NULL;
free_netdev(dev); free_netdev(dev);
pci_release_regions(pdev); pci_release_regions(pdev);
...@@ -6799,6 +6900,39 @@ int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared) ...@@ -6799,6 +6900,39 @@ int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
} }
static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
bool shared)
{
int rc;
rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
if (rc)
return rc;
if (bp->flags & BNXT_FLAG_ROCE_CAP) {
int max_cp, max_stat, max_irq;
/* Reserve minimum resources for RoCE */
max_cp = bnxt_get_max_func_cp_rings(bp);
max_stat = bnxt_get_max_func_stat_ctxs(bp);
max_irq = bnxt_get_max_func_irqs(bp);
if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
return 0;
max_cp -= BNXT_MIN_ROCE_CP_RINGS;
max_irq -= BNXT_MIN_ROCE_CP_RINGS;
max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
max_cp = min_t(int, max_cp, max_irq);
max_cp = min_t(int, max_cp, max_stat);
rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
if (rc)
rc = 0;
}
return rc;
}
static int bnxt_set_dflt_rings(struct bnxt *bp) static int bnxt_set_dflt_rings(struct bnxt *bp)
{ {
int dflt_rings, max_rx_rings, max_tx_rings, rc; int dflt_rings, max_rx_rings, max_tx_rings, rc;
...@@ -6807,7 +6941,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp) ...@@ -6807,7 +6941,7 @@ static int bnxt_set_dflt_rings(struct bnxt *bp)
if (sh) if (sh)
bp->flags |= BNXT_FLAG_SHARED_RINGS; bp->flags |= BNXT_FLAG_SHARED_RINGS;
dflt_rings = netif_get_num_default_rss_queues(); dflt_rings = netif_get_num_default_rss_queues();
rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
if (rc) if (rc)
return rc; return rc;
bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
...@@ -6823,6 +6957,13 @@ static int bnxt_set_dflt_rings(struct bnxt *bp) ...@@ -6823,6 +6957,13 @@ static int bnxt_set_dflt_rings(struct bnxt *bp)
return rc; return rc;
} }
void bnxt_restore_pf_fw_resources(struct bnxt *bp)
{
ASSERT_RTNL();
bnxt_hwrm_func_qcaps(bp);
bnxt_subtract_ulp_resources(bp, BNXT_ROCE_ULP);
}
static void bnxt_parse_log_pcie_link(struct bnxt *bp) static void bnxt_parse_log_pcie_link(struct bnxt *bp)
{ {
enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
...@@ -6928,6 +7069,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -6928,6 +7069,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc) if (rc)
goto init_err; goto init_err;
rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
if (rc)
goto init_err;
bp->ulp_probe = bnxt_ulp_probe;
/* Get the MAX capabilities for this function */ /* Get the MAX capabilities for this function */
rc = bnxt_hwrm_func_qcaps(bp); rc = bnxt_hwrm_func_qcaps(bp);
if (rc) { if (rc) {
...@@ -6949,12 +7096,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -6949,12 +7096,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_set_tpa_flags(bp); bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp); bnxt_set_ring_params(bp);
if (BNXT_PF(bp)) bnxt_set_max_func_irqs(bp, max_irqs);
bp->pf.max_irqs = max_irqs;
#if defined(CONFIG_BNXT_SRIOV)
else
bp->vf.max_irqs = max_irqs;
#endif
bnxt_set_dflt_rings(bp); bnxt_set_dflt_rings(bp);
/* Default RSS hash cfg. */ /* Default RSS hash cfg. */
...@@ -6985,10 +7127,18 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -6985,10 +7127,18 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc) if (rc)
goto init_err; goto init_err;
rc = register_netdev(dev); rc = bnxt_hwrm_func_reset(bp);
if (rc)
goto init_err;
rc = bnxt_init_int_mode(bp);
if (rc) if (rc)
goto init_err; goto init_err;
rc = register_netdev(dev);
if (rc)
goto init_err_clr_int;
netdev_info(dev, "%s found at mem %lx, node addr %pM\n", netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
board_info[ent->driver_data].name, board_info[ent->driver_data].name,
(long)pci_resource_start(pdev, 0), dev->dev_addr); (long)pci_resource_start(pdev, 0), dev->dev_addr);
...@@ -6997,6 +7147,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -6997,6 +7147,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0; return 0;
init_err_clr_int:
bnxt_clear_int_mode(bp);
init_err: init_err:
pci_iounmap(pdev, bp->bar0); pci_iounmap(pdev, bp->bar0);
pci_release_regions(pdev); pci_release_regions(pdev);
...@@ -7026,6 +7179,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, ...@@ -7026,6 +7179,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
rtnl_lock(); rtnl_lock();
netif_device_detach(netdev); netif_device_detach(netdev);
bnxt_ulp_stop(bp);
if (state == pci_channel_io_perm_failure) { if (state == pci_channel_io_perm_failure) {
rtnl_unlock(); rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT; return PCI_ERS_RESULT_DISCONNECT;
...@@ -7034,8 +7189,6 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, ...@@ -7034,8 +7189,6 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev)) if (netif_running(netdev))
bnxt_close(netdev); bnxt_close(netdev);
/* So that func_reset will be done during slot_reset */
clear_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
pci_disable_device(pdev); pci_disable_device(pdev);
rtnl_unlock(); rtnl_unlock();
...@@ -7069,11 +7222,14 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) ...@@ -7069,11 +7222,14 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
} else { } else {
pci_set_master(pdev); pci_set_master(pdev);
if (netif_running(netdev)) err = bnxt_hwrm_func_reset(bp);
if (!err && netif_running(netdev))
err = bnxt_open(netdev); err = bnxt_open(netdev);
if (!err) if (!err) {
result = PCI_ERS_RESULT_RECOVERED; result = PCI_ERS_RESULT_RECOVERED;
bnxt_ulp_start(bp);
}
} }
if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
......
...@@ -387,6 +387,9 @@ struct rx_tpa_end_cmp_ext { ...@@ -387,6 +387,9 @@ struct rx_tpa_end_cmp_ext {
#define DB_KEY_TX_PUSH (0x4 << 28) #define DB_KEY_TX_PUSH (0x4 << 28)
#define DB_LONG_TX_PUSH (0x2 << 24) #define DB_LONG_TX_PUSH (0x2 << 24)
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
#define INVALID_HW_RING_ID ((u16)-1) #define INVALID_HW_RING_ID ((u16)-1)
/* The hardware supports certain page sizes. Use the supported page sizes /* The hardware supports certain page sizes. Use the supported page sizes
...@@ -953,6 +956,10 @@ struct bnxt { ...@@ -953,6 +956,10 @@ struct bnxt {
#define BNXT_FLAG_PORT_STATS 0x400 #define BNXT_FLAG_PORT_STATS 0x400
#define BNXT_FLAG_UDP_RSS_CAP 0x800 #define BNXT_FLAG_UDP_RSS_CAP 0x800
#define BNXT_FLAG_EEE_CAP 0x1000 #define BNXT_FLAG_EEE_CAP 0x1000
#define BNXT_FLAG_ROCEV1_CAP 0x8000
#define BNXT_FLAG_ROCEV2_CAP 0x10000
#define BNXT_FLAG_ROCE_CAP (BNXT_FLAG_ROCEV1_CAP | \
BNXT_FLAG_ROCEV2_CAP)
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
...@@ -965,6 +972,9 @@ struct bnxt { ...@@ -965,6 +972,9 @@ struct bnxt {
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp)) #define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp))
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0) #define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
struct bnxt_en_dev *edev;
struct bnxt_en_dev * (*ulp_probe)(struct net_device *);
struct bnxt_napi **bnapi; struct bnxt_napi **bnapi;
struct bnxt_rx_ring_info *rx_ring; struct bnxt_rx_ring_info *rx_ring;
...@@ -1021,9 +1031,9 @@ struct bnxt { ...@@ -1021,9 +1031,9 @@ struct bnxt {
unsigned long state; unsigned long state;
#define BNXT_STATE_OPEN 0 #define BNXT_STATE_OPEN 0
#define BNXT_STATE_IN_SP_TASK 1 #define BNXT_STATE_IN_SP_TASK 1
#define BNXT_STATE_FN_RST_DONE 2
struct bnxt_irq *irq_tbl; struct bnxt_irq *irq_tbl;
int total_irqs;
u8 mac_addr[ETH_ALEN]; u8 mac_addr[ETH_ALEN];
#ifdef CONFIG_BNXT_DCB #ifdef CONFIG_BNXT_DCB
...@@ -1233,8 +1243,15 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); ...@@ -1233,8 +1243,15 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
int _hwrm_send_message(struct bnxt *, void *, u32, int); int _hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message(struct bnxt *, void *, u32, int); int hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message_silent(struct bnxt *, void *, u32, int); int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
int bmap_size);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
int bnxt_hwrm_set_coal(struct bnxt *); int bnxt_hwrm_set_coal(struct bnxt *);
int bnxt_hwrm_func_qcaps(struct bnxt *); unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max);
void bnxt_tx_disable(struct bnxt *bp); void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp); void bnxt_tx_enable(struct bnxt *bp);
int bnxt_hwrm_set_pause(struct bnxt *); int bnxt_hwrm_set_pause(struct bnxt *);
...@@ -1244,4 +1261,5 @@ int bnxt_open_nic(struct bnxt *, bool, bool); ...@@ -1244,4 +1261,5 @@ int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_close_nic(struct bnxt *, bool, bool); int bnxt_close_nic(struct bnxt *, bool, bool);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
void bnxt_restore_pf_fw_resources(struct bnxt *bp);
#endif #endif
...@@ -420,15 +420,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) ...@@ -420,15 +420,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
/* Remaining rings are distributed equally amongs VF's for now */ /* Remaining rings are distributed equally amongs VF's for now */
/* TODO: the following workaroud is needed to restrict total number vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs;
* of vf_cp_rings not exceed number of HW ring groups. This WA should
* be removed once new HWRM provides HW ring groups capability in
* hwrm_func_qcap.
*/
vf_cp_rings = min_t(u16, pf->max_cp_rings, pf->max_stat_ctxs);
vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / num_vfs;
/* TODO: restore this logic below once the WA above is removed */
/* vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs; */
vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
if (bp->flags & BNXT_FLAG_AGG_RINGS) if (bp->flags & BNXT_FLAG_AGG_RINGS)
vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) / vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) /
...@@ -590,7 +582,9 @@ void bnxt_sriov_disable(struct bnxt *bp) ...@@ -590,7 +582,9 @@ void bnxt_sriov_disable(struct bnxt *bp)
bp->pf.active_vfs = 0; bp->pf.active_vfs = 0;
/* Reclaim all resources for the PF. */ /* Reclaim all resources for the PF. */
bnxt_hwrm_func_qcaps(bp); rtnl_lock();
bnxt_restore_pf_fw_resources(bp);
rtnl_unlock();
} }
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
......
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2016 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/bitops.h>
#include <linux/irq.h>
#include <asm/byteorder.h>
#include <linux/bitmap.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_ulp.h"
static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
struct bnxt_ulp_ops *ulp_ops, void *handle)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ulp *ulp;
ASSERT_RTNL();
if (ulp_id >= BNXT_MAX_ULP)
return -EINVAL;
ulp = &edev->ulp_tbl[ulp_id];
if (rcu_access_pointer(ulp->ulp_ops)) {
netdev_err(bp->dev, "ulp id %d already registered\n", ulp_id);
return -EBUSY;
}
if (ulp_id == BNXT_ROCE_ULP) {
unsigned int max_stat_ctxs;
max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
bp->num_stat_ctxs == max_stat_ctxs)
return -ENOMEM;
bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs -
BNXT_MIN_ROCE_STAT_CTXS);
}
atomic_set(&ulp->ref_count, 0);
ulp->handle = handle;
rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
if (ulp_id == BNXT_ROCE_ULP) {
if (test_bit(BNXT_STATE_OPEN, &bp->state))
bnxt_hwrm_vnic_cfg(bp, 0);
}
return 0;
}
static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ulp *ulp;
int i = 0;
ASSERT_RTNL();
if (ulp_id >= BNXT_MAX_ULP)
return -EINVAL;
ulp = &edev->ulp_tbl[ulp_id];
if (!rcu_access_pointer(ulp->ulp_ops)) {
netdev_err(bp->dev, "ulp id %d not registered\n", ulp_id);
return -EINVAL;
}
if (ulp_id == BNXT_ROCE_ULP) {
unsigned int max_stat_ctxs;
max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1);
}
if (ulp->max_async_event_id)
bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
RCU_INIT_POINTER(ulp->ulp_ops, NULL);
synchronize_rcu();
ulp->max_async_event_id = 0;
ulp->async_events_bmap = NULL;
while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
msleep(100);
i++;
}
return 0;
}
static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
struct bnxt_msix_entry *ent, int num_msix)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
int max_idx, max_cp_rings;
int avail_msix, i, idx;
ASSERT_RTNL();
if (ulp_id != BNXT_ROCE_ULP)
return -EINVAL;
if (!(bp->flags & BNXT_FLAG_USING_MSIX))
return -ENODEV;
max_cp_rings = bnxt_get_max_func_cp_rings(bp);
max_idx = min_t(int, bp->total_irqs, max_cp_rings);
avail_msix = max_idx - bp->cp_nr_rings;
if (!avail_msix)
return -ENOMEM;
if (avail_msix > num_msix)
avail_msix = num_msix;
idx = max_idx - avail_msix;
for (i = 0; i < avail_msix; i++) {
ent[i].vector = bp->irq_tbl[idx + i].vector;
ent[i].ring_idx = idx + i;
ent[i].db_offset = (idx + i) * 0x80;
}
bnxt_set_max_func_irqs(bp, max_idx - avail_msix);
bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
return avail_msix;
}
static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
int max_cp_rings, msix_requested;
ASSERT_RTNL();
if (ulp_id != BNXT_ROCE_ULP)
return -EINVAL;
max_cp_rings = bnxt_get_max_func_cp_rings(bp);
msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
edev->ulp_tbl[ulp_id].msix_requested = 0;
bnxt_set_max_func_irqs(bp, bp->total_irqs);
return 0;
}
void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id)
{
ASSERT_RTNL();
if (bnxt_ulp_registered(bp->edev, ulp_id)) {
struct bnxt_en_dev *edev = bp->edev;
unsigned int msix_req, max;
msix_req = edev->ulp_tbl[ulp_id].msix_requested;
max = bnxt_get_max_func_cp_rings(bp);
bnxt_set_max_func_cp_rings(bp, max - msix_req);
max = bnxt_get_max_func_stat_ctxs(bp);
bnxt_set_max_func_stat_ctxs(bp, max - 1);
}
}
static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
struct bnxt_fw_msg *fw_msg)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct input *req;
int rc;
mutex_lock(&bp->hwrm_cmd_lock);
req = fw_msg->msg;
req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
rc = _hwrm_send_message(bp, fw_msg->msg, fw_msg->msg_len,
fw_msg->timeout);
if (!rc) {
struct output *resp = bp->hwrm_cmd_resp_addr;
u32 len = le16_to_cpu(resp->resp_len);
if (fw_msg->resp_max_len < len)
len = fw_msg->resp_max_len;
memcpy(fw_msg->resp, resp, len);
}
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
static void bnxt_ulp_get(struct bnxt_ulp *ulp)
{
atomic_inc(&ulp->ref_count);
}
static void bnxt_ulp_put(struct bnxt_ulp *ulp)
{
atomic_dec(&ulp->ref_count);
}
void bnxt_ulp_stop(struct bnxt *bp)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
int i;
if (!edev)
return;
for (i = 0; i < BNXT_MAX_ULP; i++) {
struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
ops = rtnl_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_stop)
continue;
ops->ulp_stop(ulp->handle);
}
}
void bnxt_ulp_start(struct bnxt *bp)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
int i;
if (!edev)
return;
for (i = 0; i < BNXT_MAX_ULP; i++) {
struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
ops = rtnl_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_start)
continue;
ops->ulp_start(ulp->handle);
}
}
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs)
{
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
int i;
if (!edev)
return;
for (i = 0; i < BNXT_MAX_ULP; i++) {
struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
rcu_read_lock();
ops = rcu_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_sriov_config) {
rcu_read_unlock();
continue;
}
bnxt_ulp_get(ulp);
rcu_read_unlock();
ops->ulp_sriov_config(ulp->handle, num_vfs);
bnxt_ulp_put(ulp);
}
}
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl)
{
u16 event_id = le16_to_cpu(cmpl->event_id);
struct bnxt_en_dev *edev = bp->edev;
struct bnxt_ulp_ops *ops;
int i;
if (!edev)
return;
rcu_read_lock();
for (i = 0; i < BNXT_MAX_ULP; i++) {
struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
ops = rcu_dereference(ulp->ulp_ops);
if (!ops || !ops->ulp_async_notifier)
continue;
if (!ulp->async_events_bmap ||
event_id > ulp->max_async_event_id)
continue;
/* Read max_async_event_id first before testing the bitmap. */
smp_rmb();
if (test_bit(event_id, ulp->async_events_bmap))
ops->ulp_async_notifier(ulp->handle, cmpl);
}
rcu_read_unlock();
}
static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id,
unsigned long *events_bmap, u16 max_id)
{
struct net_device *dev = edev->net;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ulp *ulp;
if (ulp_id >= BNXT_MAX_ULP)
return -EINVAL;
ulp = &edev->ulp_tbl[ulp_id];
ulp->async_events_bmap = events_bmap;
/* Make sure bnxt_ulp_async_events() sees this order */
smp_wmb();
ulp->max_async_event_id = max_id;
bnxt_hwrm_func_rgtr_async_events(bp, events_bmap, max_id + 1);
return 0;
}
static const struct bnxt_en_ops bnxt_en_ops_tbl = {
.bnxt_register_device = bnxt_register_dev,
.bnxt_unregister_device = bnxt_unregister_dev,
.bnxt_request_msix = bnxt_req_msix_vecs,
.bnxt_free_msix = bnxt_free_msix_vecs,
.bnxt_send_fw_msg = bnxt_send_msg,
.bnxt_register_fw_async_events = bnxt_register_async_events,
};
struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_en_dev *edev;
edev = bp->edev;
if (!edev) {
edev = kzalloc(sizeof(*edev), GFP_KERNEL);
if (!edev)
return ERR_PTR(-ENOMEM);
edev->en_ops = &bnxt_en_ops_tbl;
if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
edev->net = dev;
edev->pdev = bp->pdev;
bp->edev = edev;
}
return bp->edev;
}
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2016 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_ULP_H
#define BNXT_ULP_H
#define BNXT_ROCE_ULP 0
#define BNXT_OTHER_ULP 1
#define BNXT_MAX_ULP 2
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
struct hwrm_async_event_cmpl;
struct bnxt;
struct bnxt_ulp_ops {
/* async_notifier() cannot sleep (in BH context) */
void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
void (*ulp_stop)(void *);
void (*ulp_start)(void *);
void (*ulp_sriov_config)(void *, int);
};
struct bnxt_msix_entry {
u32 vector;
u32 ring_idx;
u32 db_offset;
};
struct bnxt_fw_msg {
void *msg;
int msg_len;
void *resp;
int resp_max_len;
int timeout;
};
struct bnxt_ulp {
void *handle;
struct bnxt_ulp_ops __rcu *ulp_ops;
unsigned long *async_events_bmap;
u16 max_async_event_id;
u16 msix_requested;
atomic_t ref_count;
};
struct bnxt_en_dev {
struct net_device *net;
struct pci_dev *pdev;
u32 flags;
#define BNXT_EN_FLAG_ROCEV1_CAP 0x1
#define BNXT_EN_FLAG_ROCEV2_CAP 0x2
#define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \
BNXT_EN_FLAG_ROCEV2_CAP)
const struct bnxt_en_ops *en_ops;
struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
};
struct bnxt_en_ops {
int (*bnxt_register_device)(struct bnxt_en_dev *, int,
struct bnxt_ulp_ops *, void *);
int (*bnxt_unregister_device)(struct bnxt_en_dev *, int);
int (*bnxt_request_msix)(struct bnxt_en_dev *, int,
struct bnxt_msix_entry *, int);
int (*bnxt_free_msix)(struct bnxt_en_dev *, int);
int (*bnxt_send_fw_msg)(struct bnxt_en_dev *, int,
struct bnxt_fw_msg *);
int (*bnxt_register_fw_async_events)(struct bnxt_en_dev *, int,
unsigned long *, u16);
};
static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
{
if (edev && rcu_access_pointer(edev->ulp_tbl[ulp_id].ulp_ops))
return true;
return false;
}
void bnxt_subtract_ulp_resources(struct bnxt *bp, int ulp_id);
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl);
struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment