Commit 670726a8 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'bnxt_en-update-for-net-next'

Michael Chan says:

====================
bnxt_en: Update for net-next

This series starts with 2 patches to support firmware crash dump.  The
driver allocates the required DMA memory ahead of time for firmware to
store the crash dump if and when it crashes.  Patch 3 adds priority and
TPID for the .ndo_set_vf_vlan() callback.  Note that this was rejected
and reverted last year and it is being re-submitted after recent changes
in the guidelines.  The remaining patches are MSIX related.  Legacy
interrupt is no longer supported by firmware so we remove the support
in the driver.  We then convert to use the newer kernel APIs to
allocate and enable MSIX vectors.  The last patch adds support for
dynamic MSIX.

v3: https://lore.kernel.org/20240823195657.31588-1-michael.chan@broadcom.com
v2: https://lore.kernel.org/20240816212832.185379-1-michael.chan@broadcom.com
v1: https://lore.kernel.org/20240713234339.70293-1-michael.chan@broadcom.com
====================

Link: https://patch.msgid.link/20240828183235.128948-1-michael.chan@broadcom.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 791f9b68 e68256c8
This diff is collapsed.
......@@ -1356,7 +1356,6 @@ struct bnxt_vf_info {
u16 vlan;
u16 func_qcfg_flags;
u32 flags;
#define BNXT_VF_QOS 0x1
#define BNXT_VF_SPOOFCHK 0x2
#define BNXT_VF_LINK_FORCED 0x4
#define BNXT_VF_LINK_UP 0x8
......@@ -1756,8 +1755,6 @@ struct bnxt_test_info {
#define BNXT_GRCPF_REG_CHIMP_COMM 0x0
#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
#define BNXT_GRC_REG_STATUS_P5 0x520
......@@ -2200,8 +2197,6 @@ struct bnxt {
#define BNXT_FLAG_STRIP_VLAN 0x20
#define BNXT_FLAG_AGG_RINGS (BNXT_FLAG_JUMBO | BNXT_FLAG_GRO | \
BNXT_FLAG_LRO)
#define BNXT_FLAG_USING_MSIX 0x40
#define BNXT_FLAG_MSIX_CAP 0x80
#define BNXT_FLAG_RFS 0x100
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
......@@ -2649,6 +2644,9 @@ struct bnxt {
#endif
u32 thermal_threshold_type;
enum board_idx board_idx;
struct bnxt_ctx_pg_info *fw_crash_mem;
u32 fw_crash_len;
};
#define BNXT_NUM_RX_RING_STATS 8
......
......@@ -372,20 +372,81 @@ static int __bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
return rc;
}
static u32 bnxt_copy_crash_data(struct bnxt_ring_mem_info *rmem, void *buf,
u32 dump_len)
{
u32 data_copied = 0;
u32 data_len;
int i;
for (i = 0; i < rmem->nr_pages; i++) {
data_len = rmem->page_size;
if (data_copied + data_len > dump_len)
data_len = dump_len - data_copied;
memcpy(buf + data_copied, rmem->pg_arr[i], data_len);
data_copied += data_len;
if (data_copied >= dump_len)
break;
}
return data_copied;
}
static int bnxt_copy_crash_dump(struct bnxt *bp, void *buf, u32 dump_len)
{
struct bnxt_ring_mem_info *rmem;
u32 offset = 0;
if (!bp->fw_crash_mem)
return -ENOENT;
rmem = &bp->fw_crash_mem->ring_mem;
if (rmem->depth > 1) {
int i;
for (i = 0; i < rmem->nr_pages; i++) {
struct bnxt_ctx_pg_info *pg_tbl;
pg_tbl = bp->fw_crash_mem->ctx_pg_tbl[i];
offset += bnxt_copy_crash_data(&pg_tbl->ring_mem,
buf + offset,
dump_len - offset);
if (offset >= dump_len)
break;
}
} else {
bnxt_copy_crash_data(rmem, buf, dump_len);
}
return 0;
}
static bool bnxt_crash_dump_avail(struct bnxt *bp)
{
u32 sig = 0;
/* First 4 bytes(signature) of crash dump is always non-zero */
bnxt_copy_crash_dump(bp, &sig, sizeof(sig));
return !!sig;
}
int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len)
{
if (dump_type == BNXT_DUMP_CRASH) {
if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)
return bnxt_copy_crash_dump(bp, buf, *dump_len);
#ifdef CONFIG_TEE_BNXT_FW
return tee_bnxt_copy_coredump(buf, 0, *dump_len);
#else
return -EOPNOTSUPP;
else if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
return tee_bnxt_copy_coredump(buf, 0, *dump_len);
#endif
else
return -EOPNOTSUPP;
} else {
return __bnxt_get_coredump(bp, buf, dump_len);
}
}
static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
{
struct hwrm_dbg_qcfg_output *resp;
struct hwrm_dbg_qcfg_input *req;
......@@ -395,7 +456,8 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
return -EOPNOTSUPP;
if (dump_type == BNXT_DUMP_CRASH &&
!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR))
!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR ||
(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)))
return -EOPNOTSUPP;
rc = hwrm_req_init(bp, req, HWRM_DBG_QCFG);
......@@ -403,8 +465,12 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
return rc;
req->fid = cpu_to_le16(0xffff);
if (dump_type == BNXT_DUMP_CRASH)
req->flags = cpu_to_le16(DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR);
if (dump_type == BNXT_DUMP_CRASH) {
if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_SOC);
else
req->flags = cpu_to_le16(BNXT_DBG_FL_CR_DUMP_SIZE_HOST);
}
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
......@@ -412,7 +478,10 @@ static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
goto get_dump_len_exit;
if (dump_type == BNXT_DUMP_CRASH) {
*dump_len = le32_to_cpu(resp->crashdump_size);
if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR)
*dump_len = BNXT_CRASH_DUMP_LEN;
else
*dump_len = le32_to_cpu(resp->crashdump_size);
} else {
/* Driver adds coredump header and "HWRM_VER_GET response"
* segment additionally to coredump.
......@@ -434,10 +503,17 @@ u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type)
{
u32 len = 0;
if (dump_type == BNXT_DUMP_CRASH &&
bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR &&
bp->fw_crash_mem) {
if (!bnxt_crash_dump_avail(bp))
return 0;
return bp->fw_crash_len;
}
if (bnxt_hwrm_get_dump_len(bp, dump_type, &len)) {
if (dump_type == BNXT_DUMP_CRASH)
len = BNXT_CRASH_DUMP_LEN;
else
if (dump_type != BNXT_DUMP_CRASH)
__bnxt_get_coredump(bp, NULL, &len);
}
return len;
......
......@@ -111,7 +111,15 @@ struct hwrm_dbg_cmn_output {
#define HWRM_DBG_CMN_FLAGS_MORE 1
};
#define BNXT_DBG_FL_CR_DUMP_SIZE_SOC \
DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
#define BNXT_DBG_FL_CR_DUMP_SIZE_HOST \
DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR
#define BNXT_DBG_CR_DUMP_MDM_CFG_DDR \
DBG_CRASHDUMP_MEDIUM_CFG_REQ_TYPE_DDR
int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len);
int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len);
u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type);
#endif
......@@ -4989,9 +4989,16 @@ static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
return -EINVAL;
}
if (!IS_ENABLED(CONFIG_TEE_BNXT_FW) && dump->flag == BNXT_DUMP_CRASH) {
netdev_info(dev, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
return -EOPNOTSUPP;
if (dump->flag == BNXT_DUMP_CRASH) {
if (bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR &&
(!IS_ENABLED(CONFIG_TEE_BNXT_FW))) {
netdev_info(dev,
"Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
return -EOPNOTSUPP;
} else if (!(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR)) {
netdev_info(dev, "Crash dump collection from host memory is not supported on this interface.\n");
return -EOPNOTSUPP;
}
}
bp->dump_flag = dump->flag;
......
......@@ -15,6 +15,7 @@
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
#include <linux/etherdevice.h>
#include <net/dcbnl.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_hwrm.h"
......@@ -196,11 +197,8 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id,
memcpy(&ivi->mac, vf->vf_mac_addr, ETH_ALEN);
ivi->max_tx_rate = vf->max_tx_rate;
ivi->min_tx_rate = vf->min_tx_rate;
ivi->vlan = vf->vlan;
if (vf->flags & BNXT_VF_QOS)
ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
else
ivi->qos = 0;
ivi->vlan = vf->vlan & VLAN_VID_MASK;
ivi->qos = vf->vlan >> VLAN_PRIO_SHIFT;
ivi->spoofchk = !!(vf->flags & BNXT_VF_SPOOFCHK);
ivi->trusted = bnxt_is_trusted_vf(bp, vf);
if (!(vf->flags & BNXT_VF_LINK_FORCED))
......@@ -256,21 +254,21 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
if (bp->hwrm_spec_code < 0x10201)
return -ENOTSUPP;
if (vlan_proto != htons(ETH_P_8021Q))
if (vlan_proto != htons(ETH_P_8021Q) &&
(vlan_proto != htons(ETH_P_8021AD) ||
!(bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP)))
return -EPROTONOSUPPORT;
rc = bnxt_vf_ndo_prep(bp, vf_id);
if (rc)
return rc;
/* TODO: needed to implement proper handling of user priority,
* currently fail the command if there is valid priority
*/
if (vlan_id > 4095 || qos)
if (vlan_id >= VLAN_N_VID || qos >= IEEE_8021Q_MAX_PRIORITIES ||
(!vlan_id && qos))
return -EINVAL;
vf = &bp->pf.vf[vf_id];
vlan_tag = vlan_id;
vlan_tag = vlan_id | (u16)qos << VLAN_PRIO_SHIFT;
if (vlan_tag == vf->vlan)
return 0;
......@@ -279,6 +277,10 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos,
req->fid = cpu_to_le16(vf->fw_fid);
req->dflt_vlan = cpu_to_le16(vlan_tag);
req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
if (bp->fw_cap & BNXT_FW_CAP_DFLT_VLAN_TPID_PCP) {
req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_TPID);
req->tpid = vlan_proto;
}
rc = hwrm_req_send(bp, req);
if (!rc)
vf->vlan = vlan_tag;
......@@ -900,11 +902,6 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
struct net_device *dev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(dev);
if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
netdev_warn(dev, "Not allow SRIOV if the irq mode is not MSIX\n");
return 0;
}
rtnl_lock();
if (!netif_running(dev)) {
netdev_warn(dev, "Reject SRIOV config request since if is down!\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment