Commit 37bff2b9 authored by Yuval Mintz's avatar Yuval Mintz Committed by David S. Miller

qed: Add VF->PF channel infrastructure

Communication between VF and PF is based on a dedicated HW channel;
VF will prepare a messge, and by signaling the HW the PF would get a
notification of that message existance. The PF would then copy the
message, process it and DMA an answer back to the VF as a response.

The messages themselves are TLV-based - allowing easier backward/forward
compatibility.

This patch adds the infrastructure of the channel on the PF side -
starting with the arrival of the notification and ending with DMAing
the response back to the VF.

It also adds a dummy-response as reference, as it only lays the
groundwork of the communication; it doesn't really add support of any
actual messages.
Signed-off-by: default avatarYuval Mintz <Yuval.Mintz@qlogic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 32a47e72
......@@ -378,6 +378,12 @@ struct qed_hwfn {
struct qed_simd_fp_handler simd_proto_handler[64];
#ifdef CONFIG_QED_SRIOV
struct workqueue_struct *iov_wq;
struct delayed_work iov_task;
unsigned long iov_task_flags;
#endif
struct z_stream_s *stream;
};
......
......@@ -182,11 +182,15 @@ enum qed_dmae_address_type_t {
* used mostly to write a zeroed buffer to destination address
* using DMA
*/
#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
#define QED_DMAE_FLAG_VF_SRC 0x00000002
#define QED_DMAE_FLAG_VF_DST 0x00000004
#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
struct qed_dmae_params {
u32 flags; /* consists of QED_DMAE_FLAG_* values */
u32 flags; /* consists of QED_DMAE_FLAG_* values */
u8 src_vfid;
u8 dst_vfid;
};
/**
......@@ -208,6 +212,23 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
u32 size_in_dwords,
u32 flags);
/**
* @brief qed_dmae_host2host - copy data from to source address
* to a destination adress (for SRIOV) using the given ptt
*
* @param p_hwfn
* @param p_ptt
* @param source_addr
* @param dest_addr
* @param size_in_dwords
* @param params
*/
int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
dma_addr_t source_addr,
dma_addr_t dest_addr,
u32 size_in_dwords, struct qed_dmae_params *p_params);
/**
* @brief qed_chain_alloc - Allocate and initialize a chain
*
......
......@@ -31,7 +31,7 @@ enum common_event_opcode {
COMMON_EVENT_PF_STOP,
COMMON_EVENT_RESERVED,
COMMON_EVENT_RESERVED2,
COMMON_EVENT_RESERVED3,
COMMON_EVENT_VF_PF_CHANNEL,
COMMON_EVENT_RESERVED4,
COMMON_EVENT_RESERVED5,
COMMON_EVENT_RESERVED6,
......
......@@ -355,8 +355,8 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
const u8 is_dst_type_grc,
struct qed_dmae_params *p_params)
{
u16 opcode_b = 0;
u32 opcode = 0;
u16 opcodeB = 0;
/* Whether the source is the PCIe or the GRC.
* 0- The source is the PCIe
......@@ -398,14 +398,24 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
DMAE_CMD_DST_ADDR_RESET_SHIFT);
opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK <<
DMAE_CMD_SRC_VF_ID_SHIFT);
/* SRC/DST VFID: all 1's - pf, otherwise VF id */
if (p_params->flags & QED_DMAE_FLAG_VF_SRC) {
opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
} else {
opcode_b |= DMAE_CMD_SRC_VF_ID_MASK <<
DMAE_CMD_SRC_VF_ID_SHIFT;
}
opcodeB |= (DMAE_CMD_DST_VF_ID_MASK <<
DMAE_CMD_DST_VF_ID_SHIFT);
if (p_params->flags & QED_DMAE_FLAG_VF_DST) {
opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
} else {
opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
}
p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB);
p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b);
}
u32 qed_dmae_idx_to_go_cmd(u8 idx)
......@@ -753,6 +763,28 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
return rc;
}
int
qed_dmae_host2host(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
dma_addr_t source_addr,
dma_addr_t dest_addr,
u32 size_in_dwords, struct qed_dmae_params *p_params)
{
int rc;
mutex_lock(&(p_hwfn->dmae_info.mutex));
rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
dest_addr,
QED_DMAE_ADDRESS_HOST_PHYS,
QED_DMAE_ADDRESS_HOST_PHYS,
size_in_dwords, p_params);
mutex_unlock(&(p_hwfn->dmae_info.mutex));
return rc;
}
u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
enum protocol_type proto,
union qed_qm_pq_params *p_params)
......
......@@ -24,6 +24,7 @@
#include <linux/qed/qed_if.h>
#include "qed.h"
#include "qed_sriov.h"
#include "qed_sp.h"
#include "qed_dev_api.h"
#include "qed_mcp.h"
......@@ -749,7 +750,10 @@ static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_mcp_drv_version drv_version;
const u8 *data = NULL;
struct qed_hwfn *hwfn;
int rc;
int rc = -EINVAL;
if (qed_iov_wq_start(cdev))
goto err;
rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
&cdev->pdev->dev);
......@@ -826,6 +830,8 @@ static int qed_slowpath_start(struct qed_dev *cdev,
err:
release_firmware(cdev->firmware);
qed_iov_wq_stop(cdev, false);
return rc;
}
......@@ -842,6 +848,8 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
qed_disable_msix(cdev);
qed_nic_reset(cdev);
qed_iov_wq_stop(cdev, true);
release_firmware(cdev->firmware);
return 0;
......
......@@ -27,6 +27,7 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
/***************************************************************************
* Structures & Definitions
......@@ -242,10 +243,17 @@ static int
qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
DP_NOTICE(p_hwfn,
"Unknown Async completion for protocol: %d\n",
p_eqe->protocol_id);
return -EINVAL;
switch (p_eqe->protocol_id) {
case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
p_eqe->echo, &p_eqe->data);
default:
DP_NOTICE(p_hwfn,
"Unknown Async completion for protocol: %d\n",
p_eqe->protocol_id);
return -EINVAL;
}
}
/***************************************************************************
......
This diff is collapsed.
......@@ -50,6 +50,14 @@ struct qed_iov_vf_mbx {
dma_addr_t req_phys;
union pfvf_tlvs *reply_virt;
dma_addr_t reply_phys;
/* Address in VF where a pending message is located */
dma_addr_t pending_req;
u8 *offset;
/* saved VF request header */
struct vfpf_first_tlv first_tlv;
};
enum vf_state {
......@@ -96,6 +104,14 @@ struct qed_pf_iov {
u32 bulletins_size;
};
enum qed_iov_wq_flag {
QED_IOV_WQ_MSG_FLAG,
QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
QED_IOV_WQ_STOP_WQ_FLAG,
QED_IOV_WQ_FLR_FLAG,
};
#ifdef CONFIG_QED_SRIOV
/**
* @brief - Given a VF index, return index of next [including that] active VF.
......@@ -147,6 +163,22 @@ void qed_iov_free(struct qed_hwfn *p_hwfn);
* @param cdev
*/
void qed_iov_free_hw_info(struct qed_dev *cdev);
/**
* @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe.
*
* @param p_hwfn
* @param opcode
* @param echo
* @param data
*/
int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode, __le16 echo, union event_ring_data *data);
void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
int qed_iov_wq_start(struct qed_dev *cdev);
void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
#else
static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
u16 rel_vf_id)
......@@ -175,6 +207,27 @@ static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
{
}
static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode,
__le16 echo, union event_ring_data *data)
{
return -EINVAL;
}
static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
{
}
static inline int qed_iov_wq_start(struct qed_dev *cdev)
{
return 0;
}
static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
enum qed_iov_wq_flag flag)
{
}
#endif
#define qed_for_each_vf(_p_hwfn, _i) \
......
......@@ -9,16 +9,62 @@
#ifndef _QED_VF_H
#define _QED_VF_H
enum {
PFVF_STATUS_WAITING,
PFVF_STATUS_SUCCESS,
PFVF_STATUS_FAILURE,
PFVF_STATUS_NOT_SUPPORTED,
PFVF_STATUS_NO_RESOURCE,
PFVF_STATUS_FORCED,
};
/* vf pf channel tlvs */
/* general tlv header (used for both vf->pf request and pf->vf response) */
struct channel_tlv {
u16 type;
u16 length;
};
/* header of first vf->pf tlv carries the offset used to calculate reponse
* buffer address
*/
struct vfpf_first_tlv {
struct channel_tlv tl;
u32 padding;
u64 reply_address;
};
/* header of pf->vf tlvs, carries the status of handling the request */
struct pfvf_tlv {
struct channel_tlv tl;
u8 status;
u8 padding[3];
};
/* response tlv used for most tlvs */
struct pfvf_def_resp_tlv {
struct pfvf_tlv hdr;
};
/* used to terminate and pad a tlv list */
struct channel_list_end_tlv {
struct channel_tlv tl;
u8 padding[4];
};
#define TLV_BUFFER_SIZE 1024
struct tlv_buffer_size {
u8 tlv_buffer[TLV_BUFFER_SIZE];
};
union vfpf_tlvs {
struct vfpf_first_tlv first_tlv;
struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
};
union pfvf_tlvs {
struct pfvf_def_resp_tlv default_resp;
struct tlv_buffer_size tlv_buf_size;
};
......@@ -38,4 +84,10 @@ struct qed_bulletin {
u32 size;
};
enum {
CHANNEL_TLV_NONE, /* ends tlv sequence */
CHANNEL_TLV_LIST_END,
CHANNEL_TLV_MAX
};
#endif
......@@ -327,9 +327,14 @@ struct regpair {
__le32 hi;
};
struct vf_pf_channel_eqe_data {
struct regpair msg_addr;
};
/* Event Data Union */
union event_ring_data {
u8 bytes[8];
struct vf_pf_channel_eqe_data vf_pf_channel;
struct async_data async_info;
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment