Commit 2a7f38c5 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-next'

Yuval Mintz says:

====================
qed/qede: Mostly-cleanup series

This series contains some cleanup of the qed and qede code:
 - #1 contains mostly static/endian changes in order to allow qede to
   pass sparse compilation cleanly.
 - #2, #5 and #6 are either semantic or remove dead-code from driver.
 - #9, #10 and #11 relate to printing and slightly change some APIs
   between qed and the protocol drivers for that end [sharing the
   interface names and information regarding device].

The rest of the patches are minor changes/fixes to various flows
in qed.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 417ccf6b 712c3cbf
...@@ -598,16 +598,11 @@ struct qed_dev { ...@@ -598,16 +598,11 @@ struct qed_dev {
enum qed_dev_type type; enum qed_dev_type type;
/* Translate type/revision combo into the proper conditions */ /* Translate type/revision combo into the proper conditions */
#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB) #define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
#define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \
CHIP_REV_IS_A0(dev))
#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \ #define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \
CHIP_REV_IS_B0(dev)) CHIP_REV_IS_B0(dev))
#define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH) #define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
#define QED_IS_K2(dev) QED_IS_AH(dev) #define QED_IS_K2(dev) QED_IS_AH(dev)
#define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \
QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2)
u16 vendor_id; u16 vendor_id;
u16 device_id; u16 device_id;
#define QED_DEV_ID_MASK 0xff00 #define QED_DEV_ID_MASK 0xff00
...@@ -621,7 +616,6 @@ struct qed_dev { ...@@ -621,7 +616,6 @@ struct qed_dev {
u16 chip_rev; u16 chip_rev;
#define CHIP_REV_MASK 0xf #define CHIP_REV_MASK 0xf
#define CHIP_REV_SHIFT 12 #define CHIP_REV_SHIFT 12
#define CHIP_REV_IS_A0(_cdev) (!(_cdev)->chip_rev)
#define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1) #define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1)
u16 chip_metal; u16 chip_metal;
...@@ -633,7 +627,7 @@ struct qed_dev { ...@@ -633,7 +627,7 @@ struct qed_dev {
#define CHIP_BOND_ID_SHIFT 0 #define CHIP_BOND_ID_SHIFT 0
u8 num_engines; u8 num_engines;
u8 num_ports_in_engines; u8 num_ports_in_engine;
u8 num_funcs_in_port; u8 num_funcs_in_port;
u8 path_id; u8 path_id;
...@@ -644,7 +638,6 @@ struct qed_dev { ...@@ -644,7 +638,6 @@ struct qed_dev {
int pcie_width; int pcie_width;
int pcie_speed; int pcie_speed;
u8 ver_str[VER_SIZE];
/* Add MF related configuration */ /* Add MF related configuration */
u8 mcp_rev; u8 mcp_rev;
......
...@@ -300,7 +300,7 @@ static void qed_init_qm_params(struct qed_hwfn *p_hwfn) ...@@ -300,7 +300,7 @@ static void qed_init_qm_params(struct qed_hwfn *p_hwfn)
qm_info->vport_wfq_en = 1; qm_info->vport_wfq_en = 1;
/* TC config is different for AH 4 port */ /* TC config is different for AH 4 port */
four_port = p_hwfn->cdev->num_ports_in_engines == MAX_NUM_PORTS_K2; four_port = p_hwfn->cdev->num_ports_in_engine == MAX_NUM_PORTS_K2;
/* in AH 4 port we have fewer TCs per port */ /* in AH 4 port we have fewer TCs per port */
qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 :
...@@ -329,7 +329,7 @@ static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn) ...@@ -329,7 +329,7 @@ static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn)
static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn) static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn)
{ {
/* Initialize qm port parameters */ /* Initialize qm port parameters */
u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engines; u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engine;
/* indicate how ooo and high pri traffic is dealt with */ /* indicate how ooo and high pri traffic is dealt with */
active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ? active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ?
...@@ -693,7 +693,7 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn) ...@@ -693,7 +693,7 @@ static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn)
qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn)); qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn));
/* port table */ /* port table */
for (i = 0; i < p_hwfn->cdev->num_ports_in_engines; i++) { for (i = 0; i < p_hwfn->cdev->num_ports_in_engine; i++) {
port = &(qm_info->qm_port_params[i]); port = &(qm_info->qm_port_params[i]);
DP_VERBOSE(p_hwfn, DP_VERBOSE(p_hwfn,
NETIF_MSG_HW, NETIF_MSG_HW,
...@@ -823,7 +823,7 @@ static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn) ...@@ -823,7 +823,7 @@ static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn)
goto alloc_err; goto alloc_err;
qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) * qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
p_hwfn->cdev->num_ports_in_engines, p_hwfn->cdev->num_ports_in_engine,
GFP_KERNEL); GFP_KERNEL);
if (!qm_info->qm_port_params) if (!qm_info->qm_port_params)
goto alloc_err; goto alloc_err;
...@@ -1108,7 +1108,7 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn) ...@@ -1108,7 +1108,7 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
return -EINVAL; return -EINVAL;
} }
switch (p_hwfn->cdev->num_ports_in_engines) { switch (p_hwfn->cdev->num_ports_in_engine) {
case 1: case 1:
hw_mode |= 1 << MODE_PORTS_PER_ENG_1; hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
break; break;
...@@ -1120,7 +1120,7 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn) ...@@ -1120,7 +1120,7 @@ static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
break; break;
default: default:
DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n", DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n",
p_hwfn->cdev->num_ports_in_engines); p_hwfn->cdev->num_ports_in_engine);
return -EINVAL; return -EINVAL;
} }
...@@ -1253,7 +1253,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, ...@@ -1253,7 +1253,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
} }
memset(&params, 0, sizeof(params)); memset(&params, 0, sizeof(params));
params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engines; params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
params.pf_rl_en = qm_info->pf_rl_en; params.pf_rl_en = qm_info->pf_rl_en;
params.pf_wfq_en = qm_info->pf_wfq_en; params.pf_wfq_en = qm_info->pf_wfq_en;
...@@ -1513,7 +1513,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, ...@@ -1513,7 +1513,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
qed_int_igu_enable(p_hwfn, p_ptt, int_mode); qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
/* send function start command */ /* send function start command */
rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode, rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn,
p_hwfn->cdev->mf_mode,
allow_npar_tx_switch); allow_npar_tx_switch);
if (rc) { if (rc) {
DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
...@@ -1697,6 +1698,11 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) ...@@ -1697,6 +1698,11 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
return mfw_rc; return mfw_rc;
} }
/* Check if there is a DID mismatch between nvm-cfg/efuse */
if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR)
DP_NOTICE(p_hwfn,
"warning: device configuration is not supported on this board type. The device may not function as expected.\n");
/* send DCBX attention request command */ /* send DCBX attention request command */
DP_VERBOSE(p_hwfn, DP_VERBOSE(p_hwfn,
QED_MSG_DCB, QED_MSG_DCB,
...@@ -1942,6 +1948,13 @@ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) ...@@ -1942,6 +1948,13 @@ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
if (!p_ptt) if (!p_ptt)
return -EAGAIN; return -EAGAIN;
/* If roce info is allocated it means roce is initialized and should
* be enabled in searcher.
*/
if (p_hwfn->p_rdma_info &&
p_hwfn->b_rdma_enabled_in_prs)
qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1);
/* Re-open incoming traffic */ /* Re-open incoming traffic */
qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
qed_ptt_release(p_hwfn, p_ptt); qed_ptt_release(p_hwfn, p_ptt);
...@@ -2239,7 +2252,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, ...@@ -2239,7 +2252,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
case QED_BDQ: case QED_BDQ:
if (!*p_resc_num) if (!*p_resc_num)
*p_resc_start = 0; *p_resc_start = 0;
else if (p_hwfn->cdev->num_ports_in_engines == 4) else if (p_hwfn->cdev->num_ports_in_engine == 4)
*p_resc_start = p_hwfn->port_id; *p_resc_start = p_hwfn->port_id;
else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI)
*p_resc_start = p_hwfn->port_id; *p_resc_start = p_hwfn->port_id;
...@@ -2656,15 +2669,15 @@ static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn, ...@@ -2656,15 +2669,15 @@ static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn,
port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0); port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0);
if (port_mode < 3) { if (port_mode < 3) {
p_hwfn->cdev->num_ports_in_engines = 1; p_hwfn->cdev->num_ports_in_engine = 1;
} else if (port_mode <= 5) { } else if (port_mode <= 5) {
p_hwfn->cdev->num_ports_in_engines = 2; p_hwfn->cdev->num_ports_in_engine = 2;
} else { } else {
DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n", DP_NOTICE(p_hwfn, "PORT MODE: %d not supported\n",
p_hwfn->cdev->num_ports_in_engines); p_hwfn->cdev->num_ports_in_engine);
/* Default num_ports_in_engines to something */ /* Default num_ports_in_engine to something */
p_hwfn->cdev->num_ports_in_engines = 1; p_hwfn->cdev->num_ports_in_engine = 1;
} }
} }
...@@ -2674,20 +2687,20 @@ static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn, ...@@ -2674,20 +2687,20 @@ static void qed_hw_info_port_num_ah(struct qed_hwfn *p_hwfn,
u32 port; u32 port;
int i; int i;
p_hwfn->cdev->num_ports_in_engines = 0; p_hwfn->cdev->num_ports_in_engine = 0;
for (i = 0; i < MAX_NUM_PORTS_K2; i++) { for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
port = qed_rd(p_hwfn, p_ptt, port = qed_rd(p_hwfn, p_ptt,
CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4)); CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
if (port & 1) if (port & 1)
p_hwfn->cdev->num_ports_in_engines++; p_hwfn->cdev->num_ports_in_engine++;
} }
if (!p_hwfn->cdev->num_ports_in_engines) { if (!p_hwfn->cdev->num_ports_in_engine) {
DP_NOTICE(p_hwfn, "All NIG ports are inactive\n"); DP_NOTICE(p_hwfn, "All NIG ports are inactive\n");
/* Default num_ports_in_engine to something */ /* Default num_ports_in_engine to something */
p_hwfn->cdev->num_ports_in_engines = 1; p_hwfn->cdev->num_ports_in_engine = 1;
} }
} }
...@@ -2806,12 +2819,6 @@ static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -2806,12 +2819,6 @@ static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
cdev->chip_num, cdev->chip_rev, cdev->chip_num, cdev->chip_rev,
cdev->chip_bond_id, cdev->chip_metal); cdev->chip_bond_id, cdev->chip_metal);
if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) {
DP_NOTICE(cdev->hwfns,
"The chip type/rev (BB A0) is not supported!\n");
return -EINVAL;
}
return 0; return 0;
} }
...@@ -4061,7 +4068,7 @@ static int qed_device_num_ports(struct qed_dev *cdev) ...@@ -4061,7 +4068,7 @@ static int qed_device_num_ports(struct qed_dev *cdev)
if (cdev->num_hwfns > 1) if (cdev->num_hwfns > 1)
return 1; return 1;
return cdev->num_ports_in_engines * qed_device_num_engines(cdev); return cdev->num_ports_in_engine * qed_device_num_engines(cdev);
} }
int qed_device_get_port_id(struct qed_dev *cdev) int qed_device_get_port_id(struct qed_dev *cdev)
......
...@@ -11655,6 +11655,8 @@ struct public_drv_mb { ...@@ -11655,6 +11655,8 @@ struct public_drv_mb {
#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2 #define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2
#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 #define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3
#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR (1 << 0)
u32 drv_pulse_mb; u32 drv_pulse_mb;
#define DRV_PULSE_SEQ_MASK 0x00007fff #define DRV_PULSE_SEQ_MASK 0x00007fff
#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 #define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
...@@ -11780,6 +11782,12 @@ struct nvm_cfg1_glob { ...@@ -11780,6 +11782,12 @@ struct nvm_cfg1_glob {
u32 led_global_settings; u32 led_global_settings;
u32 generic_cont1; u32 generic_cont1;
u32 mbi_version; u32 mbi_version;
#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00
#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
u32 mbi_date; u32 mbi_date;
u32 misc_sig; u32 misc_sig;
u32 device_capabilities; u32 device_capabilities;
......
...@@ -2300,14 +2300,25 @@ static int qed_tunn_configure(struct qed_dev *cdev, ...@@ -2300,14 +2300,25 @@ static int qed_tunn_configure(struct qed_dev *cdev,
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
struct qed_hwfn *hwfn = &cdev->hwfns[i]; struct qed_hwfn *hwfn = &cdev->hwfns[i];
struct qed_ptt *p_ptt;
struct qed_tunnel_info *tun; struct qed_tunnel_info *tun;
tun = &hwfn->cdev->tunnel; tun = &hwfn->cdev->tunnel;
if (IS_PF(cdev)) {
p_ptt = qed_ptt_acquire(hwfn);
if (!p_ptt)
return -EAGAIN;
} else {
p_ptt = NULL;
}
rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info, rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info,
QED_SPQ_MODE_EBLOCK, NULL); QED_SPQ_MODE_EBLOCK, NULL);
if (rc) if (rc) {
if (IS_PF(cdev))
qed_ptt_release(hwfn, p_ptt);
return rc; return rc;
}
if (IS_PF_SRIOV(hwfn)) { if (IS_PF_SRIOV(hwfn)) {
u16 vxlan_port, geneve_port; u16 vxlan_port, geneve_port;
...@@ -2324,6 +2335,8 @@ static int qed_tunn_configure(struct qed_dev *cdev, ...@@ -2324,6 +2335,8 @@ static int qed_tunn_configure(struct qed_dev *cdev,
qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
} }
if (IS_PF(cdev))
qed_ptt_release(hwfn, p_ptt);
} }
return 0; return 0;
......
...@@ -281,6 +281,9 @@ int qed_fill_dev_info(struct qed_dev *cdev, ...@@ -281,6 +281,9 @@ int qed_fill_dev_info(struct qed_dev *cdev,
qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
&dev_info->mfw_rev, NULL); &dev_info->mfw_rev, NULL);
qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
&dev_info->mbi_version);
qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
&dev_info->flash_size); &dev_info->flash_size);
...@@ -335,6 +338,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev, ...@@ -335,6 +338,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
if (!cdev) if (!cdev)
goto err0; goto err0;
cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
cdev->protocol = params->protocol; cdev->protocol = params->protocol;
if (params->is_vf) if (params->is_vf)
...@@ -606,6 +610,18 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn) ...@@ -606,6 +610,18 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
return rc; return rc;
} }
static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
{
/* Calling the disable function will make sure that any
* currently-running function is completed. The following call to the
* enable function makes this sequence a flush-like operation.
*/
if (p_hwfn->b_sp_dpc_enabled) {
tasklet_disable(p_hwfn->sp_dpc);
tasklet_enable(p_hwfn->sp_dpc);
}
}
void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
{ {
struct qed_dev *cdev = p_hwfn->cdev; struct qed_dev *cdev = p_hwfn->cdev;
...@@ -617,6 +633,8 @@ void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) ...@@ -617,6 +633,8 @@ void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
synchronize_irq(cdev->int_params.msix_table[id].vector); synchronize_irq(cdev->int_params.msix_table[id].vector);
else else
synchronize_irq(cdev->pdev->irq); synchronize_irq(cdev->pdev->irq);
qed_slowpath_tasklet_flush(p_hwfn);
} }
static void qed_slowpath_irq_free(struct qed_dev *cdev) static void qed_slowpath_irq_free(struct qed_dev *cdev)
...@@ -1111,17 +1129,13 @@ static int qed_slowpath_stop(struct qed_dev *cdev) ...@@ -1111,17 +1129,13 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
return 0; return 0;
} }
static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE], static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
char ver_str[VER_SIZE])
{ {
int i; int i;
memcpy(cdev->name, name, NAME_SIZE); memcpy(cdev->name, name, NAME_SIZE);
for_each_hwfn(cdev, i) for_each_hwfn(cdev, i)
snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
memcpy(cdev->ver_str, ver_str, VER_SIZE);
cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
} }
static u32 qed_sb_init(struct qed_dev *cdev, static u32 qed_sb_init(struct qed_dev *cdev,
...@@ -1675,7 +1689,7 @@ const struct qed_common_ops qed_common_ops_pass = { ...@@ -1675,7 +1689,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.probe = &qed_probe, .probe = &qed_probe,
.remove = &qed_remove, .remove = &qed_remove,
.set_power_state = &qed_set_power_state, .set_power_state = &qed_set_power_state,
.set_id = &qed_set_id, .set_name = &qed_set_name,
.update_pf_params = &qed_update_pf_params, .update_pf_params = &qed_update_pf_params,
.slowpath_start = &qed_slowpath_start, .slowpath_start = &qed_slowpath_start,
.slowpath_stop = &qed_slowpath_stop, .slowpath_stop = &qed_slowpath_stop,
......
...@@ -1523,6 +1523,36 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, ...@@ -1523,6 +1523,36 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
return 0; return 0;
} }
int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_mbi_ver)
{
u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr;
if (IS_VF(p_hwfn->cdev))
return -EINVAL;
/* Read the address of the nvm_cfg */
nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
if (!nvm_cfg_addr) {
DP_NOTICE(p_hwfn, "Shared memory not initialized\n");
return -EINVAL;
}
/* Read the offset of nvm_cfg1 */
nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
offsetof(struct nvm_cfg1, glob) +
offsetof(struct nvm_cfg1_glob, mbi_version);
*p_mbi_ver = qed_rd(p_hwfn, p_ptt,
mbi_ver_addr) &
(NVM_CFG1_GLOB_MBI_VERSION_0_MASK |
NVM_CFG1_GLOB_MBI_VERSION_1_MASK |
NVM_CFG1_GLOB_MBI_VERSION_2_MASK);
return 0;
}
int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type) int qed_mcp_get_media_type(struct qed_dev *cdev, u32 *p_media_type)
{ {
struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
......
...@@ -255,6 +255,18 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, ...@@ -255,6 +255,18 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
u32 *p_mfw_ver, u32 *p_running_bundle_id); u32 *p_mfw_ver, u32 *p_running_bundle_id);
/**
* @brief Get the MBI version value
*
* @param p_hwfn
* @param p_ptt
* @param p_mbi_ver - A pointer to a variable to be filled with the MBI version.
*
* @return int - 0 - operation was successful.
*/
int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_mbi_ver);
/** /**
* @brief Get media type value of the port. * @brief Get media type value of the port.
* *
...@@ -482,7 +494,7 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn, ...@@ -482,7 +494,7 @@ int qed_mcp_bist_nvm_test_get_image_att(struct qed_hwfn *p_hwfn,
#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id) #define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id)
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ #define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
((_p_hwfn)->cdev->num_ports_in_engines * \ ((_p_hwfn)->cdev->num_ports_in_engine * \
qed_device_num_engines((_p_hwfn)->cdev))) qed_device_num_engines((_p_hwfn)->cdev)))
struct qed_mcp_info { struct qed_mcp_info {
......
...@@ -80,7 +80,7 @@ static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -80,7 +80,7 @@ static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
/* MFW doesn't support resource locking, first PF on the port /* MFW doesn't support resource locking, first PF on the port
* has lock ownership. * has lock ownership.
*/ */
if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines) if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine)
return 0; return 0;
DP_INFO(p_hwfn, "PF doesn't have lock ownership\n"); DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
...@@ -108,7 +108,7 @@ static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -108,7 +108,7 @@ static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &params); rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &params);
if (rc == -EINVAL) { if (rc == -EINVAL) {
/* MFW doesn't support locking, first PF has lock ownership */ /* MFW doesn't support locking, first PF has lock ownership */
if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engines) { if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine) {
rc = 0; rc = 0;
} else { } else {
DP_INFO(p_hwfn, "PF doesn't have lock ownership\n"); DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
......
...@@ -391,6 +391,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, ...@@ -391,6 +391,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
* to the internal RAM of the UStorm by the Function Start Ramrod. * to the internal RAM of the UStorm by the Function Start Ramrod.
* *
* @param p_hwfn * @param p_hwfn
* @param p_ptt
* @param p_tunn * @param p_tunn
* @param mode * @param mode
* @param allow_npar_tx_switch * @param allow_npar_tx_switch
...@@ -399,6 +400,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, ...@@ -399,6 +400,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
*/ */
int qed_sp_pf_start(struct qed_hwfn *p_hwfn, int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn, struct qed_tunnel_info *p_tunn,
enum qed_mf_mode mode, bool allow_npar_tx_switch); enum qed_mf_mode mode, bool allow_npar_tx_switch);
...@@ -432,6 +434,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn); ...@@ -432,6 +434,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn); int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn, struct qed_tunnel_info *p_tunn,
enum spq_mode comp_mode, enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data); struct qed_spq_comp_cb *p_comp_data);
......
...@@ -253,17 +253,18 @@ static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn, ...@@ -253,17 +253,18 @@ static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
} }
static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn, static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn) struct qed_tunnel_info *p_tunn)
{ {
if (p_tunn->vxlan_port.b_update_port) if (p_tunn->vxlan_port.b_update_port)
qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt, qed_set_vxlan_dest_port(p_hwfn, p_ptt,
p_tunn->vxlan_port.port); p_tunn->vxlan_port.port);
if (p_tunn->geneve_port.b_update_port) if (p_tunn->geneve_port.b_update_port)
qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt, qed_set_geneve_dest_port(p_hwfn, p_ptt,
p_tunn->geneve_port.port); p_tunn->geneve_port.port);
qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn); qed_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn);
} }
static void static void
...@@ -303,6 +304,7 @@ qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn, ...@@ -303,6 +304,7 @@ qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
} }
int qed_sp_pf_start(struct qed_hwfn *p_hwfn, int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn, struct qed_tunnel_info *p_tunn,
enum qed_mf_mode mode, bool allow_npar_tx_switch) enum qed_mf_mode mode, bool allow_npar_tx_switch)
{ {
...@@ -399,7 +401,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, ...@@ -399,7 +401,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
rc = qed_spq_post(p_hwfn, p_ent, NULL); rc = qed_spq_post(p_hwfn, p_ent, NULL);
if (p_tunn) if (p_tunn)
qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel); qed_set_hw_tunn_mode_port(p_hwfn, p_ptt,
&p_hwfn->cdev->tunnel);
return rc; return rc;
} }
...@@ -430,6 +433,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn) ...@@ -430,6 +433,7 @@ int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
/* Set pf update ramrod command params */ /* Set pf update ramrod command params */
int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_tunnel_info *p_tunn, struct qed_tunnel_info *p_tunn,
enum spq_mode comp_mode, enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data) struct qed_spq_comp_cb *p_comp_data)
...@@ -464,7 +468,7 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, ...@@ -464,7 +468,7 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
if (rc) if (rc)
return rc; return rc;
qed_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->cdev->tunnel); qed_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->cdev->tunnel);
return rc; return rc;
} }
......
...@@ -2209,7 +2209,7 @@ static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn, ...@@ -2209,7 +2209,7 @@ static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn,
if (b_update_required) { if (b_update_required) {
u16 geneve_port; u16 geneve_port;
rc = qed_sp_pf_update_tunn_cfg(p_hwfn, &tunn, rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
QED_SPQ_MODE_EBLOCK, NULL); QED_SPQ_MODE_EBLOCK, NULL);
if (rc) if (rc)
status = PFVF_STATUS_FAILURE; status = PFVF_STATUS_FAILURE;
......
...@@ -313,7 +313,6 @@ static const struct dcbnl_rtnl_ops qede_dcbnl_ops = { ...@@ -313,7 +313,6 @@ static const struct dcbnl_rtnl_ops qede_dcbnl_ops = {
.ieee_setets = qede_dcbnl_ieee_setets, .ieee_setets = qede_dcbnl_ieee_setets,
.ieee_getapp = qede_dcbnl_ieee_getapp, .ieee_getapp = qede_dcbnl_ieee_getapp,
.ieee_setapp = qede_dcbnl_ieee_setapp, .ieee_setapp = qede_dcbnl_ieee_setapp,
.getdcbx = qede_dcbnl_getdcbx,
.ieee_peer_getpfc = qede_dcbnl_ieee_peer_getpfc, .ieee_peer_getpfc = qede_dcbnl_ieee_peer_getpfc,
.ieee_peer_getets = qede_dcbnl_ieee_peer_getets, .ieee_peer_getets = qede_dcbnl_ieee_peer_getets,
.getstate = qede_dcbnl_getstate, .getstate = qede_dcbnl_getstate,
......
...@@ -1290,7 +1290,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, ...@@ -1290,7 +1290,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
struct qede_tx_queue *txq = NULL; struct qede_tx_queue *txq = NULL;
struct eth_tx_1st_bd *first_bd; struct eth_tx_1st_bd *first_bd;
dma_addr_t mapping; dma_addr_t mapping;
int i, idx, val; int i, idx;
u16 val;
for_each_queue(i) { for_each_queue(i) {
if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
...@@ -1312,7 +1313,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, ...@@ -1312,7 +1313,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
first_bd->data.bd_flags.bitfields = val; first_bd->data.bd_flags.bitfields = val;
val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK; val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK;
first_bd->data.bitfields |= (val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT); val = val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
first_bd->data.bitfields |= cpu_to_le16(val);
/* Map skb linear data for DMA and set in the first BD */ /* Map skb linear data for DMA and set in the first BD */
mapping = dma_map_single(&edev->pdev->dev, skb->data, mapping = dma_map_single(&edev->pdev->dev, skb->data,
...@@ -1327,8 +1329,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev, ...@@ -1327,8 +1329,8 @@ static int qede_selftest_transmit_traffic(struct qede_dev *edev,
first_bd->data.nbds = 1; first_bd->data.nbds = 1;
txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
/* 'next page' entries are counted in the producer value */ /* 'next page' entries are counted in the producer value */
val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); val = qed_chain_get_prod_idx(&txq->tx_pbl);
txq->tx_db.data.bd_prod = val; txq->tx_db.data.bd_prod = cpu_to_le16(val);
/* wmb makes sure that the BDs data is updated before updating the /* wmb makes sure that the BDs data is updated before updating the
* producer, otherwise FW may read old data from the BDs. * producer, otherwise FW may read old data from the BDs.
......
...@@ -335,6 +335,7 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, ...@@ -335,6 +335,7 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
struct qede_tx_queue *txq = fp->xdp_tx; struct qede_tx_queue *txq = fp->xdp_tx;
struct eth_tx_1st_bd *first_bd; struct eth_tx_1st_bd *first_bd;
u16 idx = txq->sw_tx_prod; u16 idx = txq->sw_tx_prod;
u16 val;
if (!qed_chain_get_elem_left(&txq->tx_pbl)) { if (!qed_chain_get_elem_left(&txq->tx_pbl)) {
txq->stopped_cnt++; txq->stopped_cnt++;
...@@ -346,9 +347,11 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp, ...@@ -346,9 +347,11 @@ static int qede_xdp_xmit(struct qede_dev *edev, struct qede_fastpath *fp,
memset(first_bd, 0, sizeof(*first_bd)); memset(first_bd, 0, sizeof(*first_bd));
first_bd->data.bd_flags.bitfields = first_bd->data.bd_flags.bitfields =
BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT); BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
first_bd->data.bitfields |=
(length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << val = (length & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
first_bd->data.bitfields |= cpu_to_le16(val);
first_bd->data.nbds = 1; first_bd->data.nbds = 1;
/* We can safely ignore the offset, as it's 0 for XDP */ /* We can safely ignore the offset, as it's 0 for XDP */
...@@ -1424,7 +1427,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -1424,7 +1427,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct eth_tx_2nd_bd *second_bd = NULL; struct eth_tx_2nd_bd *second_bd = NULL;
struct eth_tx_3rd_bd *third_bd = NULL; struct eth_tx_3rd_bd *third_bd = NULL;
struct eth_tx_bd *tx_data_bd = NULL; struct eth_tx_bd *tx_data_bd = NULL;
u16 txq_index; u16 txq_index, val = 0;
u8 nbd = 0; u8 nbd = 0;
dma_addr_t mapping; dma_addr_t mapping;
int rc, frag_idx = 0, ipv6_ext = 0; int rc, frag_idx = 0, ipv6_ext = 0;
...@@ -1513,8 +1516,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -1513,8 +1516,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (xmit_type & XMIT_ENC) { if (xmit_type & XMIT_ENC) {
first_bd->data.bd_flags.bitfields |= first_bd->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
first_bd->data.bitfields |=
1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT; val |= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
} }
/* Legacy FW had flipped behavior in regard to this bit - /* Legacy FW had flipped behavior in regard to this bit -
...@@ -1522,8 +1525,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -1522,8 +1525,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
* packets when it didn't need to. * packets when it didn't need to.
*/ */
if (unlikely(txq->is_legacy)) if (unlikely(txq->is_legacy))
first_bd->data.bitfields ^= val ^= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
/* If the packet is IPv6 with extension header, indicate that /* If the packet is IPv6 with extension header, indicate that
* to FW and pass few params, since the device cracker doesn't * to FW and pass few params, since the device cracker doesn't
...@@ -1587,11 +1589,12 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -1587,11 +1589,12 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
data_split = true; data_split = true;
} }
} else { } else {
first_bd->data.bitfields |= val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
(skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
} }
first_bd->data.bitfields = cpu_to_le16(val);
/* Handle fragmented skb */ /* Handle fragmented skb */
/* special handle for frags inside 2nd and 3rd bds.. */ /* special handle for frags inside 2nd and 3rd bds.. */
while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) { while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
......
...@@ -259,7 +259,7 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event, ...@@ -259,7 +259,7 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event,
/* Notify qed of the name change */ /* Notify qed of the name change */
if (!edev->ops || !edev->ops->common) if (!edev->ops || !edev->ops->common)
goto done; goto done;
edev->ops->common->set_id(edev->cdev, edev->ndev->name, "qede"); edev->ops->common->set_name(edev->cdev, edev->ndev->name);
break; break;
case NETDEV_CHANGEADDR: case NETDEV_CHANGEADDR:
edev = netdev_priv(ndev); edev = netdev_priv(ndev);
...@@ -852,6 +852,43 @@ static void qede_update_pf_params(struct qed_dev *cdev) ...@@ -852,6 +852,43 @@ static void qede_update_pf_params(struct qed_dev *cdev)
qed_ops->common->update_pf_params(cdev, &pf_params); qed_ops->common->update_pf_params(cdev, &pf_params);
} }
#define QEDE_FW_VER_STR_SIZE 80
static void qede_log_probe(struct qede_dev *edev)
{
struct qed_dev_info *p_dev_info = &edev->dev_info.common;
u8 buf[QEDE_FW_VER_STR_SIZE];
size_t left_size;
snprintf(buf, QEDE_FW_VER_STR_SIZE,
"Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
p_dev_info->fw_eng,
(p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
QED_MFW_VERSION_3_OFFSET,
(p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
QED_MFW_VERSION_2_OFFSET,
(p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
QED_MFW_VERSION_1_OFFSET,
(p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
QED_MFW_VERSION_0_OFFSET);
left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
if (p_dev_info->mbi_version && left_size)
snprintf(buf + strlen(buf), left_size,
" [MBI %d.%d.%d]",
(p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
QED_MBI_VERSION_2_OFFSET,
(p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
QED_MBI_VERSION_1_OFFSET,
(p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
QED_MBI_VERSION_0_OFFSET);
pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
buf, edev->ndev->name);
}
enum qede_probe_mode { enum qede_probe_mode {
QEDE_PROBE_NORMAL, QEDE_PROBE_NORMAL,
}; };
...@@ -930,7 +967,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, ...@@ -930,7 +967,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
goto err4; goto err4;
} }
edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION); edev->ops->common->set_name(cdev, edev->ndev->name);
/* PTP not supported on VFs */ /* PTP not supported on VFs */
if (!is_vf) if (!is_vf)
...@@ -945,8 +982,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, ...@@ -945,8 +982,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->rx_copybreak = QEDE_RX_HDR_SIZE; edev->rx_copybreak = QEDE_RX_HDR_SIZE;
DP_INFO(edev, "Ending successfully qede probe\n"); qede_log_probe(edev);
return 0; return 0;
err4: err4:
......
...@@ -221,8 +221,8 @@ static void qede_roce_changeaddr(struct qede_dev *edev) ...@@ -221,8 +221,8 @@ static void qede_roce_changeaddr(struct qede_dev *edev)
qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR); qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR);
} }
struct qede_roce_event_work *qede_roce_get_free_event_node(struct qede_dev static struct qede_roce_event_work *
*edev) qede_roce_get_free_event_node(struct qede_dev *edev)
{ {
struct qede_roce_event_work *event_node = NULL; struct qede_roce_event_work *event_node = NULL;
struct list_head *list_node = NULL; struct list_head *list_node = NULL;
......
...@@ -2954,7 +2954,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) ...@@ -2954,7 +2954,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
"WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn); "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn);
sprintf(host_buf, "host_%d", host->host_no); sprintf(host_buf, "host_%d", host->host_no);
qed_ops->common->set_id(qedf->cdev, host_buf, QEDF_VERSION); qed_ops->common->set_name(qedf->cdev, host_buf);
/* Set xid max values */ /* Set xid max values */
......
...@@ -1843,7 +1843,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode) ...@@ -1843,7 +1843,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
qedi->mac); qedi->mac);
sprintf(host_buf, "host_%d", qedi->shost->host_no); sprintf(host_buf, "host_%d", qedi->shost->host_no);
qedi_ops->common->set_id(qedi->cdev, host_buf, QEDI_MODULE_VERSION); qedi_ops->common->set_name(qedi->cdev, host_buf);
qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi); qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi);
......
...@@ -328,6 +328,14 @@ struct qed_dev_info { ...@@ -328,6 +328,14 @@ struct qed_dev_info {
/* MFW version */ /* MFW version */
u32 mfw_rev; u32 mfw_rev;
#define QED_MFW_VERSION_0_MASK 0x000000FF
#define QED_MFW_VERSION_0_OFFSET 0
#define QED_MFW_VERSION_1_MASK 0x0000FF00
#define QED_MFW_VERSION_1_OFFSET 8
#define QED_MFW_VERSION_2_MASK 0x00FF0000
#define QED_MFW_VERSION_2_OFFSET 16
#define QED_MFW_VERSION_3_MASK 0xFF000000
#define QED_MFW_VERSION_3_OFFSET 24
u32 flash_size; u32 flash_size;
u8 mf_mode; u8 mf_mode;
...@@ -337,6 +345,15 @@ struct qed_dev_info { ...@@ -337,6 +345,15 @@ struct qed_dev_info {
bool wol_support; bool wol_support;
/* MBI version */
u32 mbi_version;
#define QED_MBI_VERSION_0_MASK 0x000000FF
#define QED_MBI_VERSION_0_OFFSET 0
#define QED_MBI_VERSION_1_MASK 0x0000FF00
#define QED_MBI_VERSION_1_OFFSET 8
#define QED_MBI_VERSION_2_MASK 0x00FF0000
#define QED_MBI_VERSION_2_OFFSET 16
enum qed_dev_type dev_type; enum qed_dev_type dev_type;
/* Output parameters for qede */ /* Output parameters for qede */
...@@ -503,9 +520,7 @@ struct qed_common_ops { ...@@ -503,9 +520,7 @@ struct qed_common_ops {
int (*set_power_state)(struct qed_dev *cdev, int (*set_power_state)(struct qed_dev *cdev,
pci_power_t state); pci_power_t state);
void (*set_id)(struct qed_dev *cdev, void (*set_name) (struct qed_dev *cdev, char name[]);
char name[],
char ver_str[]);
/* Client drivers need to make this call before slowpath_start. /* Client drivers need to make this call before slowpath_start.
* PF params required for the call before slowpath_start is * PF params required for the call before slowpath_start is
...@@ -700,11 +715,13 @@ struct qed_common_ops { ...@@ -700,11 +715,13 @@ struct qed_common_ops {
(((value) >> (name ## _SHIFT)) & name ## _MASK) (((value) >> (name ## _SHIFT)) & name ## _MASK)
/* Debug print definitions */ /* Debug print definitions */
#define DP_ERR(cdev, fmt, ...) \ #define DP_ERR(cdev, fmt, ...) \
pr_err("[%s:%d(%s)]" fmt, \ do { \
__func__, __LINE__, \ pr_err("[%s:%d(%s)]" fmt, \
DP_NAME(cdev) ? DP_NAME(cdev) : "", \ __func__, __LINE__, \
## __VA_ARGS__) \ DP_NAME(cdev) ? DP_NAME(cdev) : "", \
## __VA_ARGS__); \
} while (0)
#define DP_NOTICE(cdev, fmt, ...) \ #define DP_NOTICE(cdev, fmt, ...) \
do { \ do { \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment