Commit 1408cc1f authored by Yuval Mintz's avatar Yuval Mintz Committed by David S. Miller

qed: Introduce VFs

This adds the qed VFs for the first time -
The vfs are limited functions, with a very different PCI bar structure
[when compared with PFs] to better impose the related security demands
associated with them.

This patch includes the logic neccesary to allow VFs to successfully probe
[without actually adding the ability to enable iov].
This includes diverging all the flows that would occur as part of the pci
probe of the driver, preventing VF from accessing registers/memories it
can't and instead utilize the VF->PF channel to query the PF for needed
information.
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 37bff2b9
...@@ -3,4 +3,4 @@ obj-$(CONFIG_QED) := qed.o ...@@ -3,4 +3,4 @@ obj-$(CONFIG_QED) := qed.o
qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \ qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \ qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
qed_selftest.o qed_selftest.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
...@@ -311,6 +311,8 @@ struct qed_hwfn { ...@@ -311,6 +311,8 @@ struct qed_hwfn {
bool first_on_engine; bool first_on_engine;
bool hw_init_done; bool hw_init_done;
u8 num_funcs_on_engine;
/* BAR access */ /* BAR access */
void __iomem *regview; void __iomem *regview;
void __iomem *doorbells; void __iomem *doorbells;
...@@ -361,6 +363,7 @@ struct qed_hwfn { ...@@ -361,6 +363,7 @@ struct qed_hwfn {
/* True if the driver requests for the link */ /* True if the driver requests for the link */
bool b_drv_link_init; bool b_drv_link_init;
struct qed_vf_iov *vf_iov_info;
struct qed_pf_iov *pf_iov_info; struct qed_pf_iov *pf_iov_info;
struct qed_mcp_info *mcp_info; struct qed_mcp_info *mcp_info;
...@@ -497,6 +500,8 @@ struct qed_dev { ...@@ -497,6 +500,8 @@ struct qed_dev {
#define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info) #define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
unsigned long tunn_mode; unsigned long tunn_mode;
bool b_is_vf;
u32 drv_type; u32 drv_type;
struct qed_eth_stats *reset_stats; struct qed_eth_stats *reset_stats;
......
This diff is collapsed.
...@@ -51,6 +51,9 @@ enum qed_cxt_elem_type { ...@@ -51,6 +51,9 @@ enum qed_cxt_elem_type {
QED_ELEM_TASK QED_ELEM_TASK
}; };
u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *vf_cid);
/** /**
* @brief qed_cxt_set_pf_params - Set the PF params for cxt init * @brief qed_cxt_set_pf_params - Set the PF params for cxt init
* *
......
...@@ -41,10 +41,14 @@ enum BAR_ID { ...@@ -41,10 +41,14 @@ enum BAR_ID {
static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
enum BAR_ID bar_id) enum BAR_ID bar_id)
{ {
u32 bar_reg = (bar_id == BAR_ID_0 ? u32 bar_reg = (bar_id == BAR_ID_0 ?
PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); u32 val;
if (IS_VF(p_hwfn->cdev))
return 1 << 17;
val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
if (val) if (val)
return 1 << (val + 15); return 1 << (val + 15);
...@@ -114,6 +118,9 @@ void qed_resc_free(struct qed_dev *cdev) ...@@ -114,6 +118,9 @@ void qed_resc_free(struct qed_dev *cdev)
{ {
int i; int i;
if (IS_VF(cdev))
return;
kfree(cdev->fw_data); kfree(cdev->fw_data);
cdev->fw_data = NULL; cdev->fw_data = NULL;
...@@ -144,14 +151,19 @@ void qed_resc_free(struct qed_dev *cdev) ...@@ -144,14 +151,19 @@ void qed_resc_free(struct qed_dev *cdev)
static int qed_init_qm_info(struct qed_hwfn *p_hwfn) static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
{ {
u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct init_qm_port_params *p_qm_port; struct init_qm_port_params *p_qm_port;
u8 num_vports, i, vport_id, num_ports;
u16 num_pqs, multi_cos_tcs = 1; u16 num_pqs, multi_cos_tcs = 1;
u16 num_vfs = 0;
#ifdef CONFIG_QED_SRIOV
if (p_hwfn->cdev->p_iov_info)
num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
#endif
memset(qm_info, 0, sizeof(*qm_info)); memset(qm_info, 0, sizeof(*qm_info));
num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */ num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */
num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT); num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
/* Sanity checking that setup requires legal number of resources */ /* Sanity checking that setup requires legal number of resources */
...@@ -187,8 +199,9 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) ...@@ -187,8 +199,9 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
/* First init per-TC PQs */ /* First init per-TC PQs */
for (i = 0; i < multi_cos_tcs; i++) { for (i = 0; i < multi_cos_tcs; i++, curr_queue++) {
struct init_qm_pq_params *params = &qm_info->qm_pq_params[i]; struct init_qm_pq_params *params =
&qm_info->qm_pq_params[curr_queue];
params->vport_id = vport_id; params->vport_id = vport_id;
params->tc_id = p_hwfn->hw_info.non_offload_tc; params->tc_id = p_hwfn->hw_info.non_offload_tc;
...@@ -196,13 +209,26 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) ...@@ -196,13 +209,26 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
} }
/* Then init pure-LB PQ */ /* Then init pure-LB PQ */
qm_info->pure_lb_pq = i; qm_info->pure_lb_pq = curr_queue;
qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT); qm_info->qm_pq_params[curr_queue].vport_id =
qm_info->qm_pq_params[i].tc_id = PURE_LB_TC; (u8) RESC_START(p_hwfn, QED_VPORT);
qm_info->qm_pq_params[i].wrr_group = 1; qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
i++; qm_info->qm_pq_params[curr_queue].wrr_group = 1;
curr_queue++;
qm_info->offload_pq = 0; qm_info->offload_pq = 0;
/* Then init per-VF PQs */
vf_offset = curr_queue;
for (i = 0; i < num_vfs; i++) {
/* First vport is used by the PF */
qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
qm_info->qm_pq_params[curr_queue].tc_id =
p_hwfn->hw_info.non_offload_tc;
qm_info->qm_pq_params[curr_queue].wrr_group = 1;
curr_queue++;
}
qm_info->vf_queues_offset = vf_offset;
qm_info->num_pqs = num_pqs; qm_info->num_pqs = num_pqs;
qm_info->num_vports = num_vports; qm_info->num_vports = num_vports;
...@@ -220,7 +246,8 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) ...@@ -220,7 +246,8 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ); qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT); qm_info->num_vf_pqs = num_vfs;
qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
for (i = 0; i < qm_info->num_vports; i++) for (i = 0; i < qm_info->num_vports; i++)
qm_info->qm_vport_params[i].vport_wfq = 1; qm_info->qm_vport_params[i].vport_wfq = 1;
...@@ -244,6 +271,9 @@ int qed_resc_alloc(struct qed_dev *cdev) ...@@ -244,6 +271,9 @@ int qed_resc_alloc(struct qed_dev *cdev)
struct qed_eq *p_eq; struct qed_eq *p_eq;
int i, rc = 0; int i, rc = 0;
if (IS_VF(cdev))
return rc;
cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL); cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
if (!cdev->fw_data) if (!cdev->fw_data)
return -ENOMEM; return -ENOMEM;
...@@ -364,6 +394,9 @@ void qed_resc_setup(struct qed_dev *cdev) ...@@ -364,6 +394,9 @@ void qed_resc_setup(struct qed_dev *cdev)
{ {
int i; int i;
if (IS_VF(cdev))
return;
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
...@@ -508,7 +541,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, ...@@ -508,7 +541,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_common_rt_init_params params; struct qed_qm_common_rt_init_params params;
struct qed_dev *cdev = p_hwfn->cdev; struct qed_dev *cdev = p_hwfn->cdev;
u32 concrete_fid;
int rc = 0; int rc = 0;
u8 vf_id;
qed_init_cau_rt_data(cdev); qed_init_cau_rt_data(cdev);
...@@ -558,6 +593,14 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn, ...@@ -558,6 +593,14 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, 0x20b4, qed_wr(p_hwfn, p_ptt, 0x20b4,
qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10); qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
}
/* pretend to original PF */
qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
return rc; return rc;
} }
...@@ -698,13 +741,20 @@ int qed_hw_init(struct qed_dev *cdev, ...@@ -698,13 +741,20 @@ int qed_hw_init(struct qed_dev *cdev,
u32 load_code, param; u32 load_code, param;
int rc, mfw_rc, i; int rc, mfw_rc, i;
rc = qed_init_fw_data(cdev, bin_fw_data); if (IS_PF(cdev)) {
if (rc != 0) rc = qed_init_fw_data(cdev, bin_fw_data);
return rc; if (rc != 0)
return rc;
}
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
if (IS_VF(cdev)) {
p_hwfn->b_int_enabled = 1;
continue;
}
/* Enable DMAE in PXP */ /* Enable DMAE in PXP */
rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
...@@ -829,6 +879,11 @@ int qed_hw_stop(struct qed_dev *cdev) ...@@ -829,6 +879,11 @@ int qed_hw_stop(struct qed_dev *cdev)
DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n"); DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
if (IS_VF(cdev)) {
/* To be implemented in a later patch */
continue;
}
/* mark the hw as uninitialized... */ /* mark the hw as uninitialized... */
p_hwfn->hw_init_done = false; p_hwfn->hw_init_done = false;
...@@ -860,15 +915,16 @@ int qed_hw_stop(struct qed_dev *cdev) ...@@ -860,15 +915,16 @@ int qed_hw_stop(struct qed_dev *cdev)
usleep_range(1000, 2000); usleep_range(1000, 2000);
} }
/* Disable DMAE in PXP - in CMT, this should only be done for if (IS_PF(cdev)) {
* first hw-function, and only after all transactions have /* Disable DMAE in PXP - in CMT, this should only be done for
* stopped for all active hw-functions. * first hw-function, and only after all transactions have
*/ * stopped for all active hw-functions.
t_rc = qed_change_pci_hwfn(&cdev->hwfns[0], */
cdev->hwfns[0].p_main_ptt, t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
false); cdev->hwfns[0].p_main_ptt, false);
if (t_rc != 0) if (t_rc != 0)
rc = t_rc; rc = t_rc;
}
return rc; return rc;
} }
...@@ -932,6 +988,11 @@ int qed_hw_reset(struct qed_dev *cdev) ...@@ -932,6 +988,11 @@ int qed_hw_reset(struct qed_dev *cdev)
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
if (IS_VF(cdev)) {
/* Will be implemented in a later patch */
continue;
}
DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n"); DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
/* Check for incorrect states */ /* Check for incorrect states */
...@@ -1027,11 +1088,10 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) ...@@ -1027,11 +1088,10 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
{ {
u32 *resc_start = p_hwfn->hw_info.resc_start; u32 *resc_start = p_hwfn->hw_info.resc_start;
u8 num_funcs = p_hwfn->num_funcs_on_engine;
u32 *resc_num = p_hwfn->hw_info.resc_num; u32 *resc_num = p_hwfn->hw_info.resc_num;
struct qed_sb_cnt_info sb_cnt_info; struct qed_sb_cnt_info sb_cnt_info;
int num_funcs, i; int i;
num_funcs = MAX_NUM_PFS_BB;
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
...@@ -1238,6 +1298,51 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, ...@@ -1238,6 +1298,51 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
} }
static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 reg_function_hide, tmp, eng_mask;
u8 num_funcs;
num_funcs = MAX_NUM_PFS_BB;
/* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
* in the other bits are selected.
* Bits 1-15 are for functions 1-15, respectively, and their value is
* '0' only for enabled functions (function 0 always exists and
* enabled).
* In case of CMT, only the "even" functions are enabled, and thus the
* number of functions for both hwfns is learnt from the same bits.
*/
reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
if (reg_function_hide & 0x1) {
if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
num_funcs = 0;
eng_mask = 0xaaaa;
} else {
num_funcs = 1;
eng_mask = 0x5554;
}
/* Get the number of the enabled functions on the engine */
tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
while (tmp) {
if (tmp & 0x1)
num_funcs++;
tmp >>= 0x1;
}
}
p_hwfn->num_funcs_on_engine = num_funcs;
DP_VERBOSE(p_hwfn,
NETIF_MSG_PROBE,
"PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
p_hwfn->rel_pf_id,
p_hwfn->abs_pf_id,
p_hwfn->num_funcs_on_engine);
}
static int static int
qed_get_hw_info(struct qed_hwfn *p_hwfn, qed_get_hw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
...@@ -1296,6 +1401,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, ...@@ -1296,6 +1401,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.personality = protocol; p_hwfn->hw_info.personality = protocol;
} }
qed_get_num_funcs(p_hwfn, p_ptt);
qed_hw_get_resc(p_hwfn); qed_hw_get_resc(p_hwfn);
return rc; return rc;
...@@ -1361,6 +1468,9 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, ...@@ -1361,6 +1468,9 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
p_hwfn->regview = p_regview; p_hwfn->regview = p_regview;
p_hwfn->doorbells = p_doorbells; p_hwfn->doorbells = p_doorbells;
if (IS_VF(p_hwfn->cdev))
return qed_vf_hw_prepare(p_hwfn);
/* Validate that chip access is feasible */ /* Validate that chip access is feasible */
if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
DP_ERR(p_hwfn, DP_ERR(p_hwfn,
...@@ -1428,7 +1538,8 @@ int qed_hw_prepare(struct qed_dev *cdev, ...@@ -1428,7 +1538,8 @@ int qed_hw_prepare(struct qed_dev *cdev,
int rc; int rc;
/* Store the precompiled init data ptrs */ /* Store the precompiled init data ptrs */
qed_init_iro_array(cdev); if (IS_PF(cdev))
qed_init_iro_array(cdev);
/* Initialize the first hwfn - will learn number of hwfns */ /* Initialize the first hwfn - will learn number of hwfns */
rc = qed_hw_prepare_single(p_hwfn, rc = qed_hw_prepare_single(p_hwfn,
...@@ -1460,9 +1571,11 @@ int qed_hw_prepare(struct qed_dev *cdev, ...@@ -1460,9 +1571,11 @@ int qed_hw_prepare(struct qed_dev *cdev,
* initiliazed hwfn 0. * initiliazed hwfn 0.
*/ */
if (rc) { if (rc) {
qed_init_free(p_hwfn); if (IS_PF(cdev)) {
qed_mcp_free(p_hwfn); qed_init_free(p_hwfn);
qed_hw_hwfn_free(p_hwfn); qed_mcp_free(p_hwfn);
qed_hw_hwfn_free(p_hwfn);
}
} }
} }
...@@ -1476,6 +1589,11 @@ void qed_hw_remove(struct qed_dev *cdev) ...@@ -1476,6 +1589,11 @@ void qed_hw_remove(struct qed_dev *cdev)
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
if (IS_VF(cdev)) {
/* Will be implemented in a later patch */
continue;
}
qed_init_free(p_hwfn); qed_init_free(p_hwfn);
qed_hw_hwfn_free(p_hwfn); qed_hw_hwfn_free(p_hwfn);
qed_mcp_free(p_hwfn); qed_mcp_free(p_hwfn);
......
...@@ -29,7 +29,7 @@ struct qed_ptt; ...@@ -29,7 +29,7 @@ struct qed_ptt;
enum common_event_opcode { enum common_event_opcode {
COMMON_EVENT_PF_START, COMMON_EVENT_PF_START,
COMMON_EVENT_PF_STOP, COMMON_EVENT_PF_STOP,
COMMON_EVENT_RESERVED, COMMON_EVENT_VF_START,
COMMON_EVENT_RESERVED2, COMMON_EVENT_RESERVED2,
COMMON_EVENT_VF_PF_CHANNEL, COMMON_EVENT_VF_PF_CHANNEL,
COMMON_EVENT_RESERVED4, COMMON_EVENT_RESERVED4,
...@@ -44,7 +44,7 @@ enum common_ramrod_cmd_id { ...@@ -44,7 +44,7 @@ enum common_ramrod_cmd_id {
COMMON_RAMROD_UNUSED, COMMON_RAMROD_UNUSED,
COMMON_RAMROD_PF_START /* PF Function Start Ramrod */, COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */, COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
COMMON_RAMROD_RESERVED, COMMON_RAMROD_VF_START,
COMMON_RAMROD_RESERVED2, COMMON_RAMROD_RESERVED2,
COMMON_RAMROD_PF_UPDATE, COMMON_RAMROD_PF_UPDATE,
COMMON_RAMROD_EMPTY, COMMON_RAMROD_EMPTY,
...@@ -573,6 +573,14 @@ union event_ring_element { ...@@ -573,6 +573,14 @@ union event_ring_element {
struct event_ring_next_addr next_addr; struct event_ring_next_addr next_addr;
}; };
struct mstorm_non_trigger_vf_zone {
struct eth_mstorm_per_queue_stat eth_queue_stat;
};
struct mstorm_vf_zone {
struct mstorm_non_trigger_vf_zone non_trigger;
};
enum personality_type { enum personality_type {
BAD_PERSONALITY_TYP, BAD_PERSONALITY_TYP,
PERSONALITY_RESERVED, PERSONALITY_RESERVED,
...@@ -671,6 +679,16 @@ enum ports_mode { ...@@ -671,6 +679,16 @@ enum ports_mode {
MAX_PORTS_MODE MAX_PORTS_MODE
}; };
struct pstorm_non_trigger_vf_zone {
struct eth_pstorm_per_queue_stat eth_queue_stat;
struct regpair reserved[2];
};
struct pstorm_vf_zone {
struct pstorm_non_trigger_vf_zone non_trigger;
struct regpair reserved[7];
};
/* Ramrod Header of SPQE */ /* Ramrod Header of SPQE */
struct ramrod_header { struct ramrod_header {
__le32 cid /* Slowpath Connection CID */; __le32 cid /* Slowpath Connection CID */;
...@@ -700,6 +718,29 @@ struct tstorm_per_port_stat { ...@@ -700,6 +718,29 @@ struct tstorm_per_port_stat {
struct regpair preroce_irregular_pkt; struct regpair preroce_irregular_pkt;
}; };
struct ustorm_non_trigger_vf_zone {
struct eth_ustorm_per_queue_stat eth_queue_stat;
struct regpair vf_pf_msg_addr;
};
struct ustorm_trigger_vf_zone {
u8 vf_pf_msg_valid;
u8 reserved[7];
};
struct ustorm_vf_zone {
struct ustorm_non_trigger_vf_zone non_trigger;
struct ustorm_trigger_vf_zone trigger;
};
struct vf_start_ramrod_data {
u8 vf_id;
u8 enable_flr_ack;
__le16 opaque_fid;
u8 personality;
u8 reserved[3];
};
struct atten_status_block { struct atten_status_block {
__le32 atten_bits; __le32 atten_bits;
__le32 atten_ack; __le32 atten_ack;
...@@ -1026,7 +1067,7 @@ enum init_phases { ...@@ -1026,7 +1067,7 @@ enum init_phases {
PHASE_ENGINE, PHASE_ENGINE,
PHASE_PORT, PHASE_PORT,
PHASE_PF, PHASE_PF,
PHASE_RESERVED, PHASE_VF,
PHASE_QM_PF, PHASE_QM_PF,
MAX_INIT_PHASES MAX_INIT_PHASES
}; };
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include "qed_hsi.h" #include "qed_hsi.h"
#include "qed_hw.h" #include "qed_hw.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sriov.h"
#define QED_BAR_ACQUIRE_TIMEOUT 1000 #define QED_BAR_ACQUIRE_TIMEOUT 1000
...@@ -236,8 +237,12 @@ static void qed_memcpy_hw(struct qed_hwfn *p_hwfn, ...@@ -236,8 +237,12 @@ static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
quota = min_t(size_t, n - done, quota = min_t(size_t, n - done,
PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE); PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done); if (IS_PF(p_hwfn->cdev)) {
hw_offset = qed_ptt_get_bar_addr(p_ptt); qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
hw_offset = qed_ptt_get_bar_addr(p_ptt);
} else {
hw_offset = hw_addr + done;
}
dw_count = quota / 4; dw_count = quota / 4;
host_addr = (u32 *)((u8 *)addr + done); host_addr = (u32 *)((u8 *)addr + done);
...@@ -808,6 +813,9 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn, ...@@ -808,6 +813,9 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
break; break;
case PROTOCOLID_ETH: case PROTOCOLID_ETH:
pq_id = p_params->eth.tc; pq_id = p_params->eth.tc;
if (p_params->eth.is_vf)
pq_id += p_hwfn->qm_info.vf_queues_offset +
p_params->eth.vf_id;
break; break;
default: default:
pq_id = 0; pq_id = 0;
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "qed_hw.h" #include "qed_hw.h"
#include "qed_init_ops.h" #include "qed_init_ops.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sriov.h"
#define QED_INIT_MAX_POLL_COUNT 100 #define QED_INIT_MAX_POLL_COUNT 100
#define QED_INIT_POLL_PERIOD_US 500 #define QED_INIT_POLL_PERIOD_US 500
...@@ -128,6 +129,9 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn) ...@@ -128,6 +129,9 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn)
{ {
struct qed_rt_data *rt_data = &p_hwfn->rt_data; struct qed_rt_data *rt_data = &p_hwfn->rt_data;
if (IS_VF(p_hwfn->cdev))
return 0;
rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE, rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE,
GFP_KERNEL); GFP_KERNEL);
if (!rt_data->b_valid) if (!rt_data->b_valid)
......
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
#include "qed_mcp.h" #include "qed_mcp.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_vf.h"
struct qed_pi_info { struct qed_pi_info {
qed_int_comp_cb_t comp_cb; qed_int_comp_cb_t comp_cb;
...@@ -2513,6 +2515,9 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, ...@@ -2513,6 +2515,9 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
u32 sb_offset; u32 sb_offset;
u32 pi_offset; u32 pi_offset;
if (IS_VF(p_hwfn->cdev))
return;
sb_offset = igu_sb_id * PIS_PER_SB; sb_offset = igu_sb_id * PIS_PER_SB;
memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
...@@ -2542,8 +2547,9 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn, ...@@ -2542,8 +2547,9 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
sb_info->sb_ack = 0; sb_info->sb_ack = 0;
memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, if (IS_PF(p_hwfn->cdev))
sb_info->igu_sb_id, 0, 0); qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
sb_info->igu_sb_id, 0, 0);
} }
/** /**
...@@ -2563,8 +2569,10 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, ...@@ -2563,8 +2569,10 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
/* Assuming continuous set of IGU SBs dedicated for given PF */ /* Assuming continuous set of IGU SBs dedicated for given PF */
if (sb_id == QED_SP_SB_ID) if (sb_id == QED_SP_SB_ID)
igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
else else if (IS_PF(p_hwfn->cdev))
igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb; igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
else
igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n", DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
(sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id); (sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
...@@ -2594,9 +2602,16 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn, ...@@ -2594,9 +2602,16 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
/* The igu address will hold the absolute address that needs to be /* The igu address will hold the absolute address that needs to be
* written to for a specific status block * written to for a specific status block
*/ */
sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + if (IS_PF(p_hwfn->cdev)) {
GTT_BAR0_MAP_REG_IGU_CMD + sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
(sb_info->igu_sb_id << 3); GTT_BAR0_MAP_REG_IGU_CMD +
(sb_info->igu_sb_id << 3);
} else {
sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
PXP_VF_BAR0_START_IGU +
((IGU_CMD_INT_ACK_BASE +
sb_info->igu_sb_id) << 3);
}
sb_info->flags |= QED_SB_INFO_INIT; sb_info->flags |= QED_SB_INFO_INIT;
...@@ -2783,6 +2798,9 @@ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, ...@@ -2783,6 +2798,9 @@ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
{ {
p_hwfn->b_int_enabled = 0; p_hwfn->b_int_enabled = 0;
if (IS_VF(p_hwfn->cdev))
return;
qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
} }
...@@ -2935,9 +2953,9 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, ...@@ -2935,9 +2953,9 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt) struct qed_ptt *p_ptt)
{ {
struct qed_igu_info *p_igu_info; struct qed_igu_info *p_igu_info;
u32 val, min_vf = 0, max_vf = 0;
u16 sb_id, last_iov_sb_id = 0;
struct qed_igu_block *blk; struct qed_igu_block *blk;
u32 val;
u16 sb_id;
u16 prev_sb_id = 0xFF; u16 prev_sb_id = 0xFF;
p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL); p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
...@@ -2947,12 +2965,19 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, ...@@ -2947,12 +2965,19 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
p_igu_info = p_hwfn->hw_info.p_igu_info; p_igu_info = p_hwfn->hw_info.p_igu_info;
/* Initialize base sb / sb cnt for PFs */ /* Initialize base sb / sb cnt for PFs and VFs */
p_igu_info->igu_base_sb = 0xffff; p_igu_info->igu_base_sb = 0xffff;
p_igu_info->igu_sb_cnt = 0; p_igu_info->igu_sb_cnt = 0;
p_igu_info->igu_dsb_id = 0xffff; p_igu_info->igu_dsb_id = 0xffff;
p_igu_info->igu_base_sb_iov = 0xffff; p_igu_info->igu_base_sb_iov = 0xffff;
if (p_hwfn->cdev->p_iov_info) {
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
min_vf = p_iov->first_vf_in_pf;
max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
}
for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
sb_id++) { sb_id++) {
blk = &p_igu_info->igu_map.igu_blocks[sb_id]; blk = &p_igu_info->igu_map.igu_blocks[sb_id];
...@@ -2986,14 +3011,43 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, ...@@ -2986,14 +3011,43 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
(p_igu_info->igu_sb_cnt)++; (p_igu_info->igu_sb_cnt)++;
} }
} }
} else {
if ((blk->function_id >= min_vf) &&
(blk->function_id < max_vf)) {
/* Available for VFs of this PF */
if (p_igu_info->igu_base_sb_iov == 0xffff) {
p_igu_info->igu_base_sb_iov = sb_id;
} else if (last_iov_sb_id != sb_id - 1) {
if (!val) {
DP_VERBOSE(p_hwfn->cdev,
NETIF_MSG_INTR,
"First uninitialized IGU CAM entry at index 0x%04x\n",
sb_id);
} else {
DP_NOTICE(p_hwfn->cdev,
"Consecutive igu vectors for HWFN %x vfs is broken [jumps from %04x to %04x]\n",
p_hwfn->rel_pf_id,
last_iov_sb_id,
sb_id); }
break;
}
blk->status |= QED_IGU_STATUS_FREE;
p_hwfn->hw_info.p_igu_info->free_blks++;
last_iov_sb_id = sb_id;
}
} }
} }
p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
"IGU igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n", DP_VERBOSE(
p_igu_info->igu_base_sb, p_hwfn,
p_igu_info->igu_sb_cnt, NETIF_MSG_INTR,
p_igu_info->igu_dsb_id); "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] igu_dsb_id=0x%x\n",
p_igu_info->igu_base_sb,
p_igu_info->igu_base_sb_iov,
p_igu_info->igu_sb_cnt,
p_igu_info->igu_sb_cnt_iov,
p_igu_info->igu_dsb_id);
if (p_igu_info->igu_base_sb == 0xffff || if (p_igu_info->igu_base_sb == 0xffff ||
p_igu_info->igu_dsb_id == 0xffff || p_igu_info->igu_dsb_id == 0xffff ||
...@@ -3116,6 +3170,23 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, ...@@ -3116,6 +3170,23 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
p_sb_cnt_info->sb_free_blk = info->free_blks; p_sb_cnt_info->sb_free_blk = info->free_blks;
} }
u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
{
struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
/* Determine origin of SB id */
if ((sb_id >= p_info->igu_base_sb) &&
(sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
return sb_id - p_info->igu_base_sb;
} else if ((sb_id >= p_info->igu_base_sb_iov) &&
(sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
} else {
DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id);
return 0;
}
}
void qed_int_disable_post_isr_release(struct qed_dev *cdev) void qed_int_disable_post_isr_release(struct qed_dev *cdev)
{ {
int i; int i;
......
...@@ -20,6 +20,12 @@ ...@@ -20,6 +20,12 @@
#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */ #define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */
#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */ #define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */ #define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */
/* Fields of IGU VF CONFIGRATION REGISTER */
#define IGU_VF_CONF_FUNC_EN (0x1 << 0) /* function enable */
#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
#define IGU_VF_CONF_PARENT_MASK (0xF) /* Parent PF */
#define IGU_VF_CONF_PARENT_SHIFT 5 /* Parent PF */
/* Igu control commands /* Igu control commands
*/ */
...@@ -364,6 +370,16 @@ void qed_int_free(struct qed_hwfn *p_hwfn); ...@@ -364,6 +370,16 @@ void qed_int_free(struct qed_hwfn *p_hwfn);
void qed_int_setup(struct qed_hwfn *p_hwfn, void qed_int_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt); struct qed_ptt *p_ptt);
/**
* @brief - Returns an Rx queue index appropriate for usage with given SB.
*
* @param p_hwfn
* @param sb_id - absolute index of SB
*
* @return index of Rx queue
*/
u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/** /**
* @brief - Enable Interrupt & Attention for hw function * @brief - Enable Interrupt & Attention for hw function
* *
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include "qed_mcp.h" #include "qed_mcp.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_sriov.h"
struct qed_rss_params { struct qed_rss_params {
u8 update_rss_config; u8 update_rss_config;
...@@ -1580,32 +1581,53 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, ...@@ -1580,32 +1581,53 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
info->num_tc = 1; info->num_tc = 1;
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { if (IS_PF(cdev)) {
for_each_hwfn(cdev, i) if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
info->num_queues += FEAT_NUM(&cdev->hwfns[i], for_each_hwfn(cdev, i)
QED_PF_L2_QUE); info->num_queues +=
if (cdev->int_params.fp_msix_cnt) FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
info->num_queues = min_t(u8, info->num_queues, if (cdev->int_params.fp_msix_cnt)
cdev->int_params.fp_msix_cnt); info->num_queues =
min_t(u8, info->num_queues,
cdev->int_params.fp_msix_cnt);
} else {
info->num_queues = cdev->num_hwfns;
}
info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
ether_addr_copy(info->port_mac,
cdev->hwfns[0].hw_info.hw_mac_addr);
} else { } else {
info->num_queues = cdev->num_hwfns; qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues);
} if (cdev->num_hwfns > 1) {
u8 queues = 0;
info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN); qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues);
ether_addr_copy(info->port_mac, info->num_queues += queues;
cdev->hwfns[0].hw_info.hw_mac_addr); }
qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
&info->num_vlan_filters);
qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
}
qed_fill_dev_info(cdev, &info->common); qed_fill_dev_info(cdev, &info->common);
if (IS_VF(cdev))
memset(info->common.hw_mac, 0, ETH_ALEN);
return 0; return 0;
} }
static void qed_register_eth_ops(struct qed_dev *cdev, static void qed_register_eth_ops(struct qed_dev *cdev,
struct qed_eth_cb_ops *ops, struct qed_eth_cb_ops *ops, void *cookie)
void *cookie)
{ {
cdev->protocol_ops.eth = ops; cdev->protocol_ops.eth = ops;
cdev->ops_cookie = cookie; cdev->ops_cookie = cookie;
/* For VF, we start bulletin reading */
if (IS_VF(cdev))
qed_vf_start_iov_wq(cdev);
} }
static int qed_start_vport(struct qed_dev *cdev, static int qed_start_vport(struct qed_dev *cdev,
...@@ -1890,6 +1912,9 @@ static int qed_tunn_configure(struct qed_dev *cdev, ...@@ -1890,6 +1912,9 @@ static int qed_tunn_configure(struct qed_dev *cdev,
struct qed_tunn_update_params tunn_info; struct qed_tunn_update_params tunn_info;
int i, rc; int i, rc;
if (IS_VF(cdev))
return 0;
memset(&tunn_info, 0, sizeof(tunn_info)); memset(&tunn_info, 0, sizeof(tunn_info));
if (tunn_params->update_vxlan_port == 1) { if (tunn_params->update_vxlan_port == 1) {
tunn_info.update_vxlan_udp_port = 1; tunn_info.update_vxlan_udp_port = 1;
......
...@@ -126,7 +126,7 @@ static int qed_init_pci(struct qed_dev *cdev, ...@@ -126,7 +126,7 @@ static int qed_init_pci(struct qed_dev *cdev,
goto err1; goto err1;
} }
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
DP_NOTICE(cdev, "No memory region found in bar #2\n"); DP_NOTICE(cdev, "No memory region found in bar #2\n");
rc = -EIO; rc = -EIO;
goto err1; goto err1;
...@@ -176,12 +176,14 @@ static int qed_init_pci(struct qed_dev *cdev, ...@@ -176,12 +176,14 @@ static int qed_init_pci(struct qed_dev *cdev,
goto err2; goto err2;
} }
cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); if (IS_PF(cdev)) {
cdev->db_size = pci_resource_len(cdev->pdev, 2); cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); cdev->db_size = pci_resource_len(cdev->pdev, 2);
if (!cdev->doorbells) { cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
DP_NOTICE(cdev, "Cannot map doorbell space\n"); if (!cdev->doorbells) {
return -ENOMEM; DP_NOTICE(cdev, "Cannot map doorbell space\n");
return -ENOMEM;
}
} }
return 0; return 0;
...@@ -208,20 +210,32 @@ int qed_fill_dev_info(struct qed_dev *cdev, ...@@ -208,20 +210,32 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
dev_info->fw_major = FW_MAJOR_VERSION; if (IS_PF(cdev)) {
dev_info->fw_minor = FW_MINOR_VERSION; dev_info->fw_major = FW_MAJOR_VERSION;
dev_info->fw_rev = FW_REVISION_VERSION; dev_info->fw_minor = FW_MINOR_VERSION;
dev_info->fw_eng = FW_ENGINEERING_VERSION; dev_info->fw_rev = FW_REVISION_VERSION;
dev_info->mf_mode = cdev->mf_mode; dev_info->fw_eng = FW_ENGINEERING_VERSION;
dev_info->mf_mode = cdev->mf_mode;
} else {
qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
&dev_info->fw_minor, &dev_info->fw_rev,
&dev_info->fw_eng);
}
qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev); if (IS_PF(cdev)) {
ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
if (ptt) {
qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
&dev_info->mfw_rev, NULL);
ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
if (ptt) { &dev_info->flash_size);
qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
&dev_info->flash_size);
qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
}
} else {
qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
&dev_info->mfw_rev, NULL);
} }
return 0; return 0;
...@@ -258,9 +272,7 @@ static int qed_set_power_state(struct qed_dev *cdev, ...@@ -258,9 +272,7 @@ static int qed_set_power_state(struct qed_dev *cdev,
/* probing */ /* probing */
static struct qed_dev *qed_probe(struct pci_dev *pdev, static struct qed_dev *qed_probe(struct pci_dev *pdev,
enum qed_protocol protocol, struct qed_probe_params *params)
u32 dp_module,
u8 dp_level)
{ {
struct qed_dev *cdev; struct qed_dev *cdev;
int rc; int rc;
...@@ -269,9 +281,12 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev, ...@@ -269,9 +281,12 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
if (!cdev) if (!cdev)
goto err0; goto err0;
cdev->protocol = protocol; cdev->protocol = params->protocol;
qed_init_dp(cdev, dp_module, dp_level); if (params->is_vf)
cdev->b_is_vf = true;
qed_init_dp(cdev, params->dp_module, params->dp_level);
rc = qed_init_pci(cdev, pdev); rc = qed_init_pci(cdev, pdev);
if (rc) { if (rc) {
...@@ -665,6 +680,35 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev, ...@@ -665,6 +680,35 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
return 0; return 0;
} }
static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
{
int rc;
memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
&cdev->int_params.in.num_vectors);
if (cdev->num_hwfns > 1) {
u8 vectors = 0;
qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
cdev->int_params.in.num_vectors += vectors;
}
/* We want a minimum of one fastpath vector per vf hwfn */
cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
rc = qed_set_int_mode(cdev, true);
if (rc)
return rc;
cdev->int_params.fp_msix_base = 0;
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
return 0;
}
u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf) u8 *input_buf, u32 max_size, u8 *unzip_buf)
{ {
...@@ -755,32 +799,38 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -755,32 +799,38 @@ static int qed_slowpath_start(struct qed_dev *cdev,
if (qed_iov_wq_start(cdev)) if (qed_iov_wq_start(cdev))
goto err; goto err;
rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, if (IS_PF(cdev)) {
&cdev->pdev->dev); rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
if (rc) { &cdev->pdev->dev);
DP_NOTICE(cdev, if (rc) {
"Failed to find fw file - /lib/firmware/%s\n", DP_NOTICE(cdev,
QED_FW_FILE_NAME); "Failed to find fw file - /lib/firmware/%s\n",
goto err; QED_FW_FILE_NAME);
goto err;
}
} }
rc = qed_nic_setup(cdev); rc = qed_nic_setup(cdev);
if (rc) if (rc)
goto err; goto err;
rc = qed_slowpath_setup_int(cdev, params->int_mode); if (IS_PF(cdev))
rc = qed_slowpath_setup_int(cdev, params->int_mode);
else
rc = qed_slowpath_vf_setup_int(cdev);
if (rc) if (rc)
goto err1; goto err1;
/* Allocate stream for unzipping */ if (IS_PF(cdev)) {
rc = qed_alloc_stream_mem(cdev); /* Allocate stream for unzipping */
if (rc) { rc = qed_alloc_stream_mem(cdev);
DP_NOTICE(cdev, "Failed to allocate stream memory\n"); if (rc) {
goto err2; DP_NOTICE(cdev, "Failed to allocate stream memory\n");
} goto err2;
}
/* Start the slowpath */ data = cdev->firmware->data;
data = cdev->firmware->data; }
memset(&tunn_info, 0, sizeof(tunn_info)); memset(&tunn_info, 0, sizeof(tunn_info));
tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN | tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
...@@ -793,6 +843,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -793,6 +843,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN; tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN; tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
/* Start the slowpath */
rc = qed_hw_init(cdev, &tunn_info, true, rc = qed_hw_init(cdev, &tunn_info, true,
cdev->int_params.out.int_mode, cdev->int_params.out.int_mode,
true, data); true, data);
...@@ -802,18 +853,20 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -802,18 +853,20 @@ static int qed_slowpath_start(struct qed_dev *cdev,
DP_INFO(cdev, DP_INFO(cdev,
"HW initialization and function start completed successfully\n"); "HW initialization and function start completed successfully\n");
hwfn = QED_LEADING_HWFN(cdev); if (IS_PF(cdev)) {
drv_version.version = (params->drv_major << 24) | hwfn = QED_LEADING_HWFN(cdev);
(params->drv_minor << 16) | drv_version.version = (params->drv_major << 24) |
(params->drv_rev << 8) | (params->drv_minor << 16) |
(params->drv_eng); (params->drv_rev << 8) |
strlcpy(drv_version.name, params->name, (params->drv_eng);
MCP_DRV_VER_STR_SIZE - 4); strlcpy(drv_version.name, params->name,
rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, MCP_DRV_VER_STR_SIZE - 4);
&drv_version); rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
if (rc) { &drv_version);
DP_NOTICE(cdev, "Failed sending drv version command\n"); if (rc) {
return rc; DP_NOTICE(cdev, "Failed sending drv version command\n");
return rc;
}
} }
qed_reset_vport_stats(cdev); qed_reset_vport_stats(cdev);
...@@ -822,13 +875,15 @@ static int qed_slowpath_start(struct qed_dev *cdev, ...@@ -822,13 +875,15 @@ static int qed_slowpath_start(struct qed_dev *cdev,
err2: err2:
qed_hw_timers_stop_all(cdev); qed_hw_timers_stop_all(cdev);
qed_slowpath_irq_free(cdev); if (IS_PF(cdev))
qed_slowpath_irq_free(cdev);
qed_free_stream_mem(cdev); qed_free_stream_mem(cdev);
qed_disable_msix(cdev); qed_disable_msix(cdev);
err1: err1:
qed_resc_free(cdev); qed_resc_free(cdev);
err: err:
release_firmware(cdev->firmware); if (IS_PF(cdev))
release_firmware(cdev->firmware);
qed_iov_wq_stop(cdev, false); qed_iov_wq_stop(cdev, false);
...@@ -840,17 +895,20 @@ static int qed_slowpath_stop(struct qed_dev *cdev) ...@@ -840,17 +895,20 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
if (!cdev) if (!cdev)
return -ENODEV; return -ENODEV;
qed_free_stream_mem(cdev); if (IS_PF(cdev)) {
qed_free_stream_mem(cdev);
qed_nic_stop(cdev); qed_nic_stop(cdev);
qed_slowpath_irq_free(cdev); qed_slowpath_irq_free(cdev);
}
qed_disable_msix(cdev); qed_disable_msix(cdev);
qed_nic_reset(cdev); qed_nic_reset(cdev);
qed_iov_wq_stop(cdev, true); qed_iov_wq_stop(cdev, true);
release_firmware(cdev->firmware); if (IS_PF(cdev))
release_firmware(cdev->firmware);
return 0; return 0;
} }
...@@ -940,6 +998,9 @@ static int qed_set_link(struct qed_dev *cdev, ...@@ -940,6 +998,9 @@ static int qed_set_link(struct qed_dev *cdev,
if (!cdev) if (!cdev)
return -ENODEV; return -ENODEV;
if (IS_VF(cdev))
return 0;
/* The link should be set only once per PF */ /* The link should be set only once per PF */
hwfn = &cdev->hwfns[0]; hwfn = &cdev->hwfns[0];
...@@ -1051,10 +1112,16 @@ static void qed_fill_link(struct qed_hwfn *hwfn, ...@@ -1051,10 +1112,16 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
memset(if_link, 0, sizeof(*if_link)); memset(if_link, 0, sizeof(*if_link));
/* Prepare source inputs */ /* Prepare source inputs */
memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params)); if (IS_PF(hwfn->cdev)) {
memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn), memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
sizeof(link_caps)); memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
sizeof(link_caps));
} else {
memset(&params, 0, sizeof(params));
memset(&link, 0, sizeof(link));
memset(&link_caps, 0, sizeof(link_caps));
}
/* Set the link parameters to pass to protocol driver */ /* Set the link parameters to pass to protocol driver */
if (link.link_up) if (link.link_up)
...@@ -1177,6 +1244,9 @@ static int qed_drain(struct qed_dev *cdev) ...@@ -1177,6 +1244,9 @@ static int qed_drain(struct qed_dev *cdev)
struct qed_ptt *ptt; struct qed_ptt *ptt;
int i, rc; int i, rc;
if (IS_VF(cdev))
return 0;
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
hwfn = &cdev->hwfns[i]; hwfn = &cdev->hwfns[i];
ptt = qed_ptt_acquire(hwfn); ptt = qed_ptt_acquire(hwfn);
......
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
#include "qed_hw.h" #include "qed_hw.h"
#include "qed_mcp.h" #include "qed_mcp.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sriov.h"
#define CHIP_MCP_RESP_ITER_US 10 #define CHIP_MCP_RESP_ITER_US 10
#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
...@@ -787,26 +789,42 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, ...@@ -787,26 +789,42 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
return rc; return rc;
} }
int qed_mcp_get_mfw_ver(struct qed_dev *cdev, int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
u32 *p_mfw_ver) struct qed_ptt *p_ptt,
u32 *p_mfw_ver, u32 *p_running_bundle_id)
{ {
struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
struct qed_ptt *p_ptt;
u32 global_offsize; u32 global_offsize;
p_ptt = qed_ptt_acquire(p_hwfn); if (IS_VF(p_hwfn->cdev)) {
if (!p_ptt) if (p_hwfn->vf_iov_info) {
return -EBUSY; struct pfvf_acquire_resp_tlv *p_resp;
p_resp = &p_hwfn->vf_iov_info->acquire_resp;
*p_mfw_ver = p_resp->pfdev_info.mfw_ver;
return 0;
} else {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF requested MFW version prior to ACQUIRE\n");
return -EINVAL;
}
}
global_offsize = qed_rd(p_hwfn, p_ptt, global_offsize = qed_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info-> SECTION_OFFSIZE_ADDR(p_hwfn->
public_base, mcp_info->public_base,
PUBLIC_GLOBAL)); PUBLIC_GLOBAL));
*p_mfw_ver = qed_rd(p_hwfn, p_ptt, *p_mfw_ver =
SECTION_ADDR(global_offsize, 0) + qed_rd(p_hwfn, p_ptt,
offsetof(struct public_global, mfw_ver)); SECTION_ADDR(global_offsize,
0) + offsetof(struct public_global, mfw_ver));
qed_ptt_release(p_hwfn, p_ptt);
if (p_running_bundle_id != NULL) {
*p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
SECTION_ADDR(global_offsize, 0) +
offsetof(struct public_global,
running_bundle_id));
}
return 0; return 0;
} }
...@@ -817,6 +835,9 @@ int qed_mcp_get_media_type(struct qed_dev *cdev, ...@@ -817,6 +835,9 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn = &cdev->hwfns[0]; struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
struct qed_ptt *p_ptt; struct qed_ptt *p_ptt;
if (IS_VF(cdev))
return -EINVAL;
if (!qed_mcp_is_init(p_hwfn)) { if (!qed_mcp_is_init(p_hwfn)) {
DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
return -EBUSY; return -EBUSY;
...@@ -951,6 +972,9 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, ...@@ -951,6 +972,9 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
{ {
u32 flash_size; u32 flash_size;
if (IS_VF(p_hwfn->cdev))
return -EINVAL;
flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
...@@ -961,6 +985,37 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, ...@@ -961,6 +985,37 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
return 0; return 0;
} }
int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 vf_id, u8 num)
{
u32 resp = 0, param = 0, rc_param = 0;
int rc;
/* Only Leader can configure MSIX, and need to take CMT into account */
if (!IS_LEAD_HWFN(p_hwfn))
return 0;
num *= p_hwfn->cdev->num_hwfns;
param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
&resp, &rc_param);
if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
rc = -EINVAL;
} else {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
num, vf_id);
}
return rc;
}
int int
qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
......
...@@ -149,13 +149,16 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, ...@@ -149,13 +149,16 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
/** /**
* @brief Get the management firmware version value * @brief Get the management firmware version value
* *
* @param cdev - qed dev pointer * @param p_hwfn
* @param mfw_ver - mfw version value * @param p_ptt
* @param p_mfw_ver - mfw version value
* @param p_running_bundle_id - image id in nvram; Optional.
* *
* @return int - 0 - operation was successul. * @return int - 0 - operation was successful.
*/ */
int qed_mcp_get_mfw_ver(struct qed_dev *cdev, int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
u32 *mfw_ver); struct qed_ptt *p_ptt,
u32 *p_mfw_ver, u32 *p_running_bundle_id);
/** /**
* @brief Get media type value of the port. * @brief Get media type value of the port.
...@@ -418,6 +421,20 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, ...@@ -418,6 +421,20 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn,
* @return true iff MFW is running and mcp_info is initialized * @return true iff MFW is running and mcp_info is initialized
*/ */
bool qed_mcp_is_init(struct qed_hwfn *p_hwfn); bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
/**
* @brief request MFW to configure MSI-X for a VF
*
* @param p_hwfn
* @param p_ptt
* @param vf_id - absolute inside engine
* @param num_sbs - number of entries to request
*
* @return int
*/
int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 vf_id, u8 num);
int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw); int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw);
int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw); int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw);
int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn, int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
......
...@@ -39,6 +39,8 @@ ...@@ -39,6 +39,8 @@
0x2aae04UL 0x2aae04UL
#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \ #define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \
0x2aa16cUL 0x2aa16cUL
#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR \
0x2aa118UL
#define BAR0_MAP_REG_MSDM_RAM \ #define BAR0_MAP_REG_MSDM_RAM \
0x1d00000UL 0x1d00000UL
#define BAR0_MAP_REG_USDM_RAM \ #define BAR0_MAP_REG_USDM_RAM \
...@@ -111,6 +113,8 @@ ...@@ -111,6 +113,8 @@
0x009778UL 0x009778UL
#define MISCS_REG_CHIP_METAL \ #define MISCS_REG_CHIP_METAL \
0x009774UL 0x009774UL
#define MISCS_REG_FUNCTION_HIDE \
0x0096f0UL
#define BRB_REG_HEADER_SIZE \ #define BRB_REG_HEADER_SIZE \
0x340804UL 0x340804UL
#define BTB_REG_HEADER_SIZE \ #define BTB_REG_HEADER_SIZE \
...@@ -119,6 +123,8 @@ ...@@ -119,6 +123,8 @@
0x1c0708UL 0x1c0708UL
#define CCFC_REG_ACTIVITY_COUNTER \ #define CCFC_REG_ACTIVITY_COUNTER \
0x2e8800UL 0x2e8800UL
#define CCFC_REG_STRONG_ENABLE_VF \
0x2e070cUL
#define CDU_REG_CID_ADDR_PARAMS \ #define CDU_REG_CID_ADDR_PARAMS \
0x580900UL 0x580900UL
#define DBG_REG_CLIENT_ENABLE \ #define DBG_REG_CLIENT_ENABLE \
......
...@@ -62,6 +62,8 @@ union ramrod_data { ...@@ -62,6 +62,8 @@ union ramrod_data {
struct vport_stop_ramrod_data vport_stop; struct vport_stop_ramrod_data vport_stop;
struct vport_update_ramrod_data vport_update; struct vport_update_ramrod_data vport_update;
struct vport_filter_update_ramrod_data vport_filter_update; struct vport_filter_update_ramrod_data vport_filter_update;
struct vf_start_ramrod_data vf_start;
}; };
#define EQ_MAX_CREDIT 0xffffffff #define EQ_MAX_CREDIT 0xffffffff
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include "qed_int.h" #include "qed_int.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_sriov.h"
int qed_sp_init_request(struct qed_hwfn *p_hwfn, int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent, struct qed_spq_entry **pp_ent,
...@@ -357,6 +358,13 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, ...@@ -357,6 +358,13 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
&p_ramrod->tunnel_config); &p_ramrod->tunnel_config);
p_hwfn->hw_info.personality = PERSONALITY_ETH; p_hwfn->hw_info.personality = PERSONALITY_ETH;
if (p_hwfn->cdev->p_iov_info) {
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
p_ramrod->num_vfs = (u8) p_iov->total_vfs;
}
DP_VERBOSE(p_hwfn, QED_MSG_SPQ, DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
sb, sb_index, sb, sb_index,
......
...@@ -387,6 +387,9 @@ static int qed_cqe_completion( ...@@ -387,6 +387,9 @@ static int qed_cqe_completion(
struct eth_slow_path_rx_cqe *cqe, struct eth_slow_path_rx_cqe *cqe,
enum protocol_type protocol) enum protocol_type protocol)
{ {
if (IS_VF(p_hwfn->cdev))
return 0;
/* @@@tmp - it's possible we'll eventually want to handle some /* @@@tmp - it's possible we'll eventually want to handle some
* actual commands that can arrive here, but for now this is only * actual commands that can arrive here, but for now this is only
* used to complete the ramrod using the echo value on the cqe * used to complete the ramrod using the echo value on the cqe
......
This diff is collapsed.
...@@ -21,6 +21,9 @@ ...@@ -21,6 +21,9 @@
#endif #endif
#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) #define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
#define QED_MAX_VF_CHAINS_PER_PF 16
#define QED_ETH_VF_NUM_VLAN_FILTERS 2
/* This struct is part of qed_dev and contains data relevant to all hwfns; /* This struct is part of qed_dev and contains data relevant to all hwfns;
* Initialized only if SR-IOV cpabability is exposed in PCIe config space. * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
*/ */
...@@ -60,7 +63,17 @@ struct qed_iov_vf_mbx { ...@@ -60,7 +63,17 @@ struct qed_iov_vf_mbx {
struct vfpf_first_tlv first_tlv; struct vfpf_first_tlv first_tlv;
}; };
struct qed_vf_q_info {
u16 fw_rx_qid;
u16 fw_tx_qid;
u8 fw_cid;
u8 rxq_active;
u8 txq_active;
};
enum vf_state { enum vf_state {
VF_FREE = 0, /* VF ready to be acquired holds no resc */
VF_ACQUIRED, /* VF, acquired, but not initalized */
VF_STOPPED /* VF, Stopped */ VF_STOPPED /* VF, Stopped */
}; };
...@@ -82,6 +95,17 @@ struct qed_vf_info { ...@@ -82,6 +95,17 @@ struct qed_vf_info {
#define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \ #define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \
(p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \ (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
(p_vf)->abs_vf_id) (p_vf)->abs_vf_id)
u8 num_rxqs;
u8 num_txqs;
u8 num_sbs;
u8 num_mac_filters;
u8 num_vlan_filters;
struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF];
u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
}; };
/* This structure is part of qed_hwfn and used only for PFs that have sriov /* This structure is part of qed_hwfn and used only for PFs that have sriov
...@@ -133,6 +157,26 @@ u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id); ...@@ -133,6 +157,26 @@ u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
*/ */
int qed_iov_hw_info(struct qed_hwfn *p_hwfn); int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
/**
* @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset
*
* @param p_hwfn
* @param p_iov
* @param type
* @param length
*
* @return pointer to the newly placed tlv
*/
void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
/**
* @brief list the types and lengths of the tlvs on the buffer
*
* @param p_hwfn
* @param tlvs_list
*/
void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
/** /**
* @brief qed_iov_alloc - allocate sriov related resources * @brief qed_iov_alloc - allocate sriov related resources
* *
...@@ -179,6 +223,7 @@ void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first); ...@@ -179,6 +223,7 @@ void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
int qed_iov_wq_start(struct qed_dev *cdev); int qed_iov_wq_start(struct qed_dev *cdev);
void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag); void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
void qed_vf_start_iov_wq(struct qed_dev *cdev);
#else #else
static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
u16 rel_vf_id) u16 rel_vf_id)
...@@ -228,6 +273,10 @@ static inline void qed_schedule_iov(struct qed_hwfn *hwfn, ...@@ -228,6 +273,10 @@ static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
enum qed_iov_wq_flag flag) enum qed_iov_wq_flag flag)
{ {
} }
static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
{
}
#endif #endif
#define qed_for_each_vf(_p_hwfn, _i) \ #define qed_for_each_vf(_p_hwfn, _i) \
......
This diff is collapsed.
...@@ -9,6 +9,22 @@ ...@@ -9,6 +9,22 @@
#ifndef _QED_VF_H #ifndef _QED_VF_H
#define _QED_VF_H #define _QED_VF_H
struct vf_pf_resc_request {
u8 num_rxqs;
u8 num_txqs;
u8 num_sbs;
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters;
u16 padding;
};
struct hw_sb_info {
u16 hw_sb_id;
u8 sb_qid;
u8 padding[5];
};
enum { enum {
PFVF_STATUS_WAITING, PFVF_STATUS_WAITING,
PFVF_STATUS_SUCCESS, PFVF_STATUS_SUCCESS,
...@@ -52,6 +68,107 @@ struct channel_list_end_tlv { ...@@ -52,6 +68,107 @@ struct channel_list_end_tlv {
u8 padding[4]; u8 padding[4];
}; };
#define VFPF_ACQUIRE_OS_LINUX (0)
#define VFPF_ACQUIRE_OS_WINDOWS (1)
#define VFPF_ACQUIRE_OS_ESX (2)
#define VFPF_ACQUIRE_OS_SOLARIS (3)
#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
struct vfpf_acquire_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_vfdev_info {
#define VFPF_ACQUIRE_CAP_OBSOLETE (1 << 0)
#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
u64 capabilities;
u8 fw_major;
u8 fw_minor;
u8 fw_revision;
u8 fw_engineering;
u32 driver_version;
u16 opaque_fid; /* ME register value */
u8 os_type; /* VFPF_ACQUIRE_OS_* value */
u8 padding[5];
} vfdev_info;
struct vf_pf_resc_request resc_request;
u64 bulletin_addr;
u32 bulletin_size;
u32 padding;
};
struct pfvf_storm_stats {
u32 address;
u32 len;
};
struct pfvf_stats_info {
struct pfvf_storm_stats mstats;
struct pfvf_storm_stats pstats;
struct pfvf_storm_stats tstats;
struct pfvf_storm_stats ustats;
};
struct pfvf_acquire_resp_tlv {
struct pfvf_tlv hdr;
struct pf_vf_pfdev_info {
u32 chip_num;
u32 mfw_ver;
u16 fw_major;
u16 fw_minor;
u16 fw_rev;
u16 fw_eng;
u64 capabilities;
#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED BIT(0)
#define PFVF_ACQUIRE_CAP_100G BIT(1) /* If set, 100g PF */
/* There are old PF versions where the PF might mistakenly override the sanity
* mechanism [version-based] and allow a VF that can't be supported to pass
* the acquisition phase.
* To overcome this, PFs now indicate that they're past that point and the new
* VFs would fail probe on the older PFs that fail to do so.
*/
#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2)
u16 db_size;
u8 indices_per_sb;
u8 os_type;
/* These should match the PF's qed_dev values */
u16 chip_rev;
u8 dev_type;
u8 padding;
struct pfvf_stats_info stats_info;
u8 port_mac[ETH_ALEN];
u8 padding2[2];
} pfdev_info;
struct pf_vf_resc {
#define PFVF_MAX_QUEUES_PER_VF 16
#define PFVF_MAX_SBS_PER_VF 16
struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
u8 cid[PFVF_MAX_QUEUES_PER_VF];
u8 num_rxqs;
u8 num_txqs;
u8 num_sbs;
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters;
u8 padding[2];
} resc;
u32 bulletin_size;
u32 padding;
};
#define TLV_BUFFER_SIZE 1024 #define TLV_BUFFER_SIZE 1024
struct tlv_buffer_size { struct tlv_buffer_size {
u8 tlv_buffer[TLV_BUFFER_SIZE]; u8 tlv_buffer[TLV_BUFFER_SIZE];
...@@ -59,12 +176,14 @@ struct tlv_buffer_size { ...@@ -59,12 +176,14 @@ struct tlv_buffer_size {
union vfpf_tlvs { union vfpf_tlvs {
struct vfpf_first_tlv first_tlv; struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire;
struct channel_list_end_tlv list_end; struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size; struct tlv_buffer_size tlv_buf_size;
}; };
union pfvf_tlvs { union pfvf_tlvs {
struct pfvf_def_resp_tlv default_resp; struct pfvf_def_resp_tlv default_resp;
struct pfvf_acquire_resp_tlv acquire_resp;
struct tlv_buffer_size tlv_buf_size; struct tlv_buffer_size tlv_buf_size;
}; };
...@@ -86,8 +205,118 @@ struct qed_bulletin { ...@@ -86,8 +205,118 @@ struct qed_bulletin {
enum { enum {
CHANNEL_TLV_NONE, /* ends tlv sequence */ CHANNEL_TLV_NONE, /* ends tlv sequence */
CHANNEL_TLV_ACQUIRE,
CHANNEL_TLV_LIST_END, CHANNEL_TLV_LIST_END,
CHANNEL_TLV_MAX CHANNEL_TLV_MAX
}; };
/* This data is held in the qed_hwfn structure for VFs only. */
struct qed_vf_iov {
union vfpf_tlvs *vf2pf_request;
dma_addr_t vf2pf_request_phys;
union pfvf_tlvs *pf2vf_reply;
dma_addr_t pf2vf_reply_phys;
/* Should be taken whenever the mailbox buffers are accessed */
struct mutex mutex;
u8 *offset;
/* Bulletin Board */
struct qed_bulletin bulletin;
struct qed_bulletin_content bulletin_shadow;
/* we set aside a copy of the acquire response */
struct pfvf_acquire_resp_tlv acquire_resp;
};
#ifdef CONFIG_QED_SRIOV
/**
* @brief Get number of Rx queues allocated for VF by qed
*
* @param p_hwfn
* @param num_rxqs - allocated RX queues
*/
void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
/**
* @brief Get port mac address for VF
*
* @param p_hwfn
* @param port_mac - destination location for port mac
*/
void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
/**
* @brief Get number of VLAN filters allocated for VF by qed
*
* @param p_hwfn
* @param num_rxqs - allocated VLAN filters
*/
void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
u8 *num_vlan_filters);
/**
* @brief Set firmware version information in dev_info from VFs acquire response tlv
*
* @param p_hwfn
* @param fw_major
* @param fw_minor
* @param fw_rev
* @param fw_eng
*/
void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
u16 *fw_major, u16 *fw_minor,
u16 *fw_rev, u16 *fw_eng);
/**
* @brief hw preparation for VF
* sends ACQUIRE message
*
* @param p_hwfn
*
* @return int
*/
int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
/**
* @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
* sb_id. For VFs igu sbs don't have to be contiguous
*
* @param p_hwfn
* @param sb_id
*
* @return INLINE u16
*/
u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
#else
static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
{
}
static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
{
}
static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
u8 *num_vlan_filters)
{
}
static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
u16 *fw_major, u16 *fw_minor,
u16 *fw_rev, u16 *fw_eng)
{
}
static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
{
return -EINVAL;
}
static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
{
return 0;
}
#endif
#endif #endif
...@@ -2283,8 +2283,9 @@ enum qede_probe_mode { ...@@ -2283,8 +2283,9 @@ enum qede_probe_mode {
}; };
static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
enum qede_probe_mode mode) bool is_vf, enum qede_probe_mode mode)
{ {
struct qed_probe_params probe_params;
struct qed_slowpath_params params; struct qed_slowpath_params params;
struct qed_dev_eth_info dev_info; struct qed_dev_eth_info dev_info;
struct qede_dev *edev; struct qede_dev *edev;
...@@ -2294,8 +2295,12 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, ...@@ -2294,8 +2295,12 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
if (unlikely(dp_level & QED_LEVEL_INFO)) if (unlikely(dp_level & QED_LEVEL_INFO))
pr_notice("Starting qede probe\n"); pr_notice("Starting qede probe\n");
cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH, memset(&probe_params, 0, sizeof(probe_params));
dp_module, dp_level); probe_params.protocol = QED_PROTOCOL_ETH;
probe_params.dp_module = dp_module;
probe_params.dp_level = dp_level;
probe_params.is_vf = is_vf;
cdev = qed_ops->common->probe(pdev, &probe_params);
if (!cdev) { if (!cdev) {
rc = -ENODEV; rc = -ENODEV;
goto err0; goto err0;
...@@ -2365,7 +2370,7 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -2365,7 +2370,7 @@ static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
qede_config_debug(debug, &dp_module, &dp_level); qede_config_debug(debug, &dp_module, &dp_level);
return __qede_probe(pdev, dp_module, dp_level, return __qede_probe(pdev, dp_module, dp_level, false,
QEDE_PROBE_NORMAL); QEDE_PROBE_NORMAL);
} }
......
...@@ -285,6 +285,63 @@ ...@@ -285,6 +285,63 @@
#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 #define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 #define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
#define PXP_VF_BAR0_START_IGU 0
#define PXP_VF_BAR0_IGU_LENGTH 0x3000
#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \
PXP_VF_BAR0_IGU_LENGTH - 1)
#define PXP_VF_BAR0_START_DQ 0x3000
#define PXP_VF_BAR0_DQ_LENGTH 0x200
#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0
#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \
PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
+ 4)
#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \
PXP_VF_BAR0_DQ_LENGTH - 1)
#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B \
+ \
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B \
+ \
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B \
+ \
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B \
+ \
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B \
+ \
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B \
+ \
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
/* ILT Records */ /* ILT Records */
#define PXP_NUM_ILT_RECORDS_BB 7600 #define PXP_NUM_ILT_RECORDS_BB 7600
#define PXP_NUM_ILT_RECORDS_K2 11000 #define PXP_NUM_ILT_RECORDS_K2 11000
......
...@@ -140,6 +140,13 @@ struct qed_link_output { ...@@ -140,6 +140,13 @@ struct qed_link_output {
u32 pause_config; u32 pause_config;
}; };
struct qed_probe_params {
enum qed_protocol protocol;
u32 dp_module;
u8 dp_level;
bool is_vf;
};
#define QED_DRV_VER_STR_SIZE 12 #define QED_DRV_VER_STR_SIZE 12
struct qed_slowpath_params { struct qed_slowpath_params {
u32 int_mode; u32 int_mode;
...@@ -207,8 +214,7 @@ struct qed_common_ops { ...@@ -207,8 +214,7 @@ struct qed_common_ops {
struct qed_selftest_ops *selftest; struct qed_selftest_ops *selftest;
struct qed_dev* (*probe)(struct pci_dev *dev, struct qed_dev* (*probe)(struct pci_dev *dev,
enum qed_protocol protocol, struct qed_probe_params *params);
u32 dp_module, u8 dp_level);
void (*remove)(struct qed_dev *cdev); void (*remove)(struct qed_dev *cdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment