Commit 44a7b3b6 authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-next'

Michael Chan says:

====================
bnxt_en: Update for net-next.

Three main changes in this series, besides the usual firmware spec
update:

1. Add support for a new firmware communication channel direct to the
firmware processor that handles flow offloads.  This speeds up
flow offload operations.

2. Use 64-bit internal flow handles to increase the number of flows
that can be offloaded.

3. Add level-2 context memory paging so that we can configure more
context memory for RDMA on the 57500 chips.  Allocate more context
memory if RDMA is enabled on the 57500 chips.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ac68a3d3 0c2ff8d7
This diff is collapsed.
...@@ -567,7 +567,6 @@ struct nqe_cn { ...@@ -567,7 +567,6 @@ struct nqe_cn {
#define HWRM_RESP_LEN_MASK 0xffff0000 #define HWRM_RESP_LEN_MASK 0xffff0000
#define HWRM_RESP_LEN_SFT 16 #define HWRM_RESP_LEN_SFT 16
#define HWRM_RESP_VALID_MASK 0xff000000 #define HWRM_RESP_VALID_MASK 0xff000000
#define HWRM_SEQ_ID_INVALID -1
#define BNXT_HWRM_REQ_MAX_SIZE 128 #define BNXT_HWRM_REQ_MAX_SIZE 128
#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ #define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \
BNXT_HWRM_REQ_MAX_SIZE) BNXT_HWRM_REQ_MAX_SIZE)
...@@ -585,6 +584,9 @@ struct nqe_cn { ...@@ -585,6 +584,9 @@ struct nqe_cn {
#define HWRM_VALID_BIT_DELAY_USEC 20 #define HWRM_VALID_BIT_DELAY_USEC 20
#define BNXT_HWRM_CHNL_CHIMP 0
#define BNXT_HWRM_CHNL_KONG 1
#define BNXT_RX_EVENT 1 #define BNXT_RX_EVENT 1
#define BNXT_AGG_EVENT 2 #define BNXT_AGG_EVENT 2
#define BNXT_TX_EVENT 4 #define BNXT_TX_EVENT 4
...@@ -615,9 +617,12 @@ struct bnxt_sw_rx_agg_bd { ...@@ -615,9 +617,12 @@ struct bnxt_sw_rx_agg_bd {
struct bnxt_ring_mem_info { struct bnxt_ring_mem_info {
int nr_pages; int nr_pages;
int page_size; int page_size;
u32 flags; u16 flags;
#define BNXT_RMEM_VALID_PTE_FLAG 1 #define BNXT_RMEM_VALID_PTE_FLAG 1
#define BNXT_RMEM_RING_PTE_FLAG 2 #define BNXT_RMEM_RING_PTE_FLAG 2
#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
u16 depth;
void **pg_arr; void **pg_arr;
dma_addr_t *dma_arr; dma_addr_t *dma_arr;
...@@ -1113,9 +1118,14 @@ struct bnxt_test_info { ...@@ -1113,9 +1118,14 @@ struct bnxt_test_info {
char string[BNXT_MAX_TEST][ETH_GSTRING_LEN]; char string[BNXT_MAX_TEST][ETH_GSTRING_LEN];
}; };
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400 #define BNXT_GRCPF_REG_CHIMP_COMM 0x0
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 #define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
#define BNXT_CAG_REG_BASE 0x300000 #define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
#define BNXT_GRCPF_REG_KONG_COMM 0xA00
#define BNXT_GRCPF_REG_KONG_COMM_TRIGGER 0xB00
struct bnxt_tc_flow_stats { struct bnxt_tc_flow_stats {
u64 packets; u64 packets;
...@@ -1183,12 +1193,15 @@ struct bnxt_vf_rep { ...@@ -1183,12 +1193,15 @@ struct bnxt_vf_rep {
#define PTU_PTE_NEXT_TO_LAST 0x4UL #define PTU_PTE_NEXT_TO_LAST 0x4UL
#define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8) #define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8)
#define MAX_CTX_TOTAL_PAGES (MAX_CTX_PAGES * MAX_CTX_PAGES)
struct bnxt_ctx_pg_info { struct bnxt_ctx_pg_info {
u32 entries; u32 entries;
u32 nr_pages;
void *ctx_pg_arr[MAX_CTX_PAGES]; void *ctx_pg_arr[MAX_CTX_PAGES];
dma_addr_t ctx_dma_arr[MAX_CTX_PAGES]; dma_addr_t ctx_dma_arr[MAX_CTX_PAGES];
struct bnxt_ring_mem_info ring_mem; struct bnxt_ring_mem_info ring_mem;
struct bnxt_ctx_pg_info **ctx_pg_tbl;
}; };
struct bnxt_ctx_mem_info { struct bnxt_ctx_mem_info {
...@@ -1224,6 +1237,8 @@ struct bnxt_ctx_mem_info { ...@@ -1224,6 +1237,8 @@ struct bnxt_ctx_mem_info {
struct bnxt_ctx_pg_info cq_mem; struct bnxt_ctx_pg_info cq_mem;
struct bnxt_ctx_pg_info vnic_mem; struct bnxt_ctx_pg_info vnic_mem;
struct bnxt_ctx_pg_info stat_mem; struct bnxt_ctx_pg_info stat_mem;
struct bnxt_ctx_pg_info mrav_mem;
struct bnxt_ctx_pg_info tim_mem;
struct bnxt_ctx_pg_info *tqm_mem[9]; struct bnxt_ctx_pg_info *tqm_mem[9];
}; };
...@@ -1457,20 +1472,25 @@ struct bnxt { ...@@ -1457,20 +1472,25 @@ struct bnxt {
u32 msg_enable; u32 msg_enable;
u32 fw_cap; u32 fw_cap;
#define BNXT_FW_CAP_SHORT_CMD 0x00000001 #define BNXT_FW_CAP_SHORT_CMD 0x00000001
#define BNXT_FW_CAP_LLDP_AGENT 0x00000002 #define BNXT_FW_CAP_LLDP_AGENT 0x00000002
#define BNXT_FW_CAP_DCBX_AGENT 0x00000004 #define BNXT_FW_CAP_DCBX_AGENT 0x00000004
#define BNXT_FW_CAP_NEW_RM 0x00000008 #define BNXT_FW_CAP_NEW_RM 0x00000008
#define BNXT_FW_CAP_IF_CHANGE 0x00000010 #define BNXT_FW_CAP_IF_CHANGE 0x00000010
#define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080
#define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code; u32 hwrm_spec_code;
u16 hwrm_cmd_seq; u16 hwrm_cmd_seq;
u32 hwrm_intr_seq_id; u16 hwrm_cmd_kong_seq;
u16 hwrm_intr_seq_id;
void *hwrm_short_cmd_req_addr; void *hwrm_short_cmd_req_addr;
dma_addr_t hwrm_short_cmd_req_dma_addr; dma_addr_t hwrm_short_cmd_req_dma_addr;
void *hwrm_cmd_resp_addr; void *hwrm_cmd_resp_addr;
dma_addr_t hwrm_cmd_resp_dma_addr; dma_addr_t hwrm_cmd_resp_dma_addr;
void *hwrm_cmd_kong_resp_addr;
dma_addr_t hwrm_cmd_kong_resp_dma_addr;
struct rtnl_link_stats64 net_stats_prev; struct rtnl_link_stats64 net_stats_prev;
struct rx_port_stats *hw_rx_port_stats; struct rx_port_stats *hw_rx_port_stats;
...@@ -1672,6 +1692,66 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db, ...@@ -1672,6 +1692,66 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db,
} }
} }
static inline bool bnxt_cfa_hwrm_message(u16 req_type)
{
switch (req_type) {
case HWRM_CFA_ENCAP_RECORD_ALLOC:
case HWRM_CFA_ENCAP_RECORD_FREE:
case HWRM_CFA_DECAP_FILTER_ALLOC:
case HWRM_CFA_DECAP_FILTER_FREE:
case HWRM_CFA_NTUPLE_FILTER_ALLOC:
case HWRM_CFA_NTUPLE_FILTER_FREE:
case HWRM_CFA_NTUPLE_FILTER_CFG:
case HWRM_CFA_EM_FLOW_ALLOC:
case HWRM_CFA_EM_FLOW_FREE:
case HWRM_CFA_EM_FLOW_CFG:
case HWRM_CFA_FLOW_ALLOC:
case HWRM_CFA_FLOW_FREE:
case HWRM_CFA_FLOW_INFO:
case HWRM_CFA_FLOW_FLUSH:
case HWRM_CFA_FLOW_STATS:
case HWRM_CFA_METER_PROFILE_ALLOC:
case HWRM_CFA_METER_PROFILE_FREE:
case HWRM_CFA_METER_PROFILE_CFG:
case HWRM_CFA_METER_INSTANCE_ALLOC:
case HWRM_CFA_METER_INSTANCE_FREE:
return true;
default:
return false;
}
}
static inline bool bnxt_kong_hwrm_message(struct bnxt *bp, struct input *req)
{
return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL &&
bnxt_cfa_hwrm_message(le16_to_cpu(req->req_type)));
}
static inline bool bnxt_hwrm_kong_chnl(struct bnxt *bp, struct input *req)
{
return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL &&
req->resp_addr == cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr));
}
static inline void *bnxt_get_hwrm_resp_addr(struct bnxt *bp, void *req)
{
if (bnxt_hwrm_kong_chnl(bp, (struct input *)req))
return bp->hwrm_cmd_kong_resp_addr;
else
return bp->hwrm_cmd_resp_addr;
}
static inline u16 bnxt_get_hwrm_seq_id(struct bnxt *bp, u16 dst)
{
u16 seq_id;
if (dst == BNXT_HWRM_CHNL_CHIMP)
seq_id = bp->hwrm_cmd_seq++;
else
seq_id = bp->hwrm_cmd_kong_seq++;
return seq_id;
}
extern const u16 bnxt_lhint_arr[]; extern const u16 bnxt_lhint_arr[];
int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
......
...@@ -337,18 +337,21 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, ...@@ -337,18 +337,21 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts); return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
} }
static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle) static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
struct bnxt_tc_flow_node *flow_node)
{ {
struct hwrm_cfa_flow_free_input req = { 0 }; struct hwrm_cfa_flow_free_input req = { 0 };
int rc; int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
req.flow_handle = flow_handle; if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
req.ext_flow_handle = flow_node->ext_flow_handle;
else
req.flow_handle = flow_node->flow_handle;
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) if (rc)
netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d", netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
__func__, flow_handle, rc);
if (rc) if (rc)
rc = -EIO; rc = -EIO;
...@@ -418,13 +421,14 @@ static bool bits_set(void *key, int len) ...@@ -418,13 +421,14 @@ static bool bits_set(void *key, int len)
static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
__le16 ref_flow_handle, __le16 ref_flow_handle,
__le32 tunnel_handle, __le16 *flow_handle) __le32 tunnel_handle,
struct bnxt_tc_flow_node *flow_node)
{ {
struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
struct bnxt_tc_actions *actions = &flow->actions; struct bnxt_tc_actions *actions = &flow->actions;
struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask; struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
struct bnxt_tc_l3_key *l3_key = &flow->l3_key; struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
struct hwrm_cfa_flow_alloc_input req = { 0 }; struct hwrm_cfa_flow_alloc_input req = { 0 };
struct hwrm_cfa_flow_alloc_output *resp;
u16 flow_flags = 0, action_flags = 0; u16 flow_flags = 0, action_flags = 0;
int rc; int rc;
...@@ -527,8 +531,23 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, ...@@ -527,8 +531,23 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
mutex_lock(&bp->hwrm_cmd_lock); mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) if (!rc) {
*flow_handle = resp->flow_handle; resp = bnxt_get_hwrm_resp_addr(bp, &req);
/* CFA_FLOW_ALLOC response interpretation:
* fw with fw with
* 16-bit 64-bit
* flow handle flow handle
* =========== ===========
* flow_handle flow handle flow context id
* ext_flow_handle INVALID flow handle
* flow_id INVALID flow counter id
*/
flow_node->flow_handle = resp->flow_handle;
if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) {
flow_node->ext_flow_handle = resp->ext_flow_handle;
flow_node->flow_id = resp->flow_id;
}
}
mutex_unlock(&bp->hwrm_cmd_lock); mutex_unlock(&bp->hwrm_cmd_lock);
if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
...@@ -544,9 +563,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, ...@@ -544,9 +563,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
__le32 ref_decap_handle, __le32 ref_decap_handle,
__le32 *decap_filter_handle) __le32 *decap_filter_handle)
{ {
struct hwrm_cfa_decap_filter_alloc_output *resp =
bp->hwrm_cmd_resp_addr;
struct hwrm_cfa_decap_filter_alloc_input req = { 0 }; struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
struct hwrm_cfa_decap_filter_alloc_output *resp;
struct ip_tunnel_key *tun_key = &flow->tun_key; struct ip_tunnel_key *tun_key = &flow->tun_key;
u32 enables = 0; u32 enables = 0;
int rc; int rc;
...@@ -599,10 +617,12 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, ...@@ -599,10 +617,12 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
mutex_lock(&bp->hwrm_cmd_lock); mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) if (!rc) {
resp = bnxt_get_hwrm_resp_addr(bp, &req);
*decap_filter_handle = resp->decap_filter_id; *decap_filter_handle = resp->decap_filter_id;
else } else {
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
}
mutex_unlock(&bp->hwrm_cmd_lock); mutex_unlock(&bp->hwrm_cmd_lock);
if (rc) if (rc)
...@@ -633,9 +653,8 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, ...@@ -633,9 +653,8 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
struct bnxt_tc_l2_key *l2_info, struct bnxt_tc_l2_key *l2_info,
__le32 *encap_record_handle) __le32 *encap_record_handle)
{ {
struct hwrm_cfa_encap_record_alloc_output *resp =
bp->hwrm_cmd_resp_addr;
struct hwrm_cfa_encap_record_alloc_input req = { 0 }; struct hwrm_cfa_encap_record_alloc_input req = { 0 };
struct hwrm_cfa_encap_record_alloc_output *resp;
struct hwrm_cfa_encap_data_vxlan *encap = struct hwrm_cfa_encap_data_vxlan *encap =
(struct hwrm_cfa_encap_data_vxlan *)&req.encap_data; (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
struct hwrm_vxlan_ipv4_hdr *encap_ipv4 = struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
...@@ -667,10 +686,12 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, ...@@ -667,10 +686,12 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
mutex_lock(&bp->hwrm_cmd_lock); mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) if (!rc) {
resp = bnxt_get_hwrm_resp_addr(bp, &req);
*encap_record_handle = resp->encap_record_id; *encap_record_handle = resp->encap_record_id;
else } else {
netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
}
mutex_unlock(&bp->hwrm_cmd_lock); mutex_unlock(&bp->hwrm_cmd_lock);
if (rc) if (rc)
...@@ -1224,7 +1245,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp, ...@@ -1224,7 +1245,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
int rc; int rc;
/* send HWRM cmd to free the flow-id */ /* send HWRM cmd to free the flow-id */
bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle); bnxt_hwrm_cfa_flow_free(bp, flow_node);
mutex_lock(&tc_info->lock); mutex_lock(&tc_info->lock);
...@@ -1246,6 +1267,12 @@ static int __bnxt_tc_del_flow(struct bnxt *bp, ...@@ -1246,6 +1267,12 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
return 0; return 0;
} }
static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow,
u16 src_fid)
{
flow->dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX;
}
static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow, static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
u16 src_fid) u16 src_fid)
{ {
...@@ -1293,6 +1320,9 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, ...@@ -1293,6 +1320,9 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
bnxt_tc_set_src_fid(bp, flow, src_fid); bnxt_tc_set_src_fid(bp, flow, src_fid);
if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
bnxt_tc_set_flow_dir(bp, flow, src_fid);
if (!bnxt_tc_can_offload(bp, flow)) { if (!bnxt_tc_can_offload(bp, flow)) {
rc = -ENOSPC; rc = -ENOSPC;
goto free_node; goto free_node;
...@@ -1320,7 +1350,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, ...@@ -1320,7 +1350,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
/* send HWRM cmd to alloc the flow */ /* send HWRM cmd to alloc the flow */
rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle, rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
tunnel_handle, &new_node->flow_handle); tunnel_handle, new_node);
if (rc) if (rc)
goto put_tunnel; goto put_tunnel;
...@@ -1336,7 +1366,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, ...@@ -1336,7 +1366,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
return 0; return 0;
hwrm_flow_free: hwrm_flow_free:
bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle); bnxt_hwrm_cfa_flow_free(bp, new_node);
put_tunnel: put_tunnel:
bnxt_tc_put_tunnel_handle(bp, flow, new_node); bnxt_tc_put_tunnel_handle(bp, flow, new_node);
put_l2: put_l2:
...@@ -1397,13 +1427,40 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp, ...@@ -1397,13 +1427,40 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
return 0; return 0;
} }
static void bnxt_fill_cfa_stats_req(struct bnxt *bp,
struct bnxt_tc_flow_node *flow_node,
__le16 *flow_handle, __le32 *flow_id)
{
u16 handle;
if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) {
*flow_id = flow_node->flow_id;
/* If flow_id is used to fetch flow stats then:
* 1. lower 12 bits of flow_handle must be set to all 1s.
* 2. 15th bit of flow_handle must specify the flow
* direction (TX/RX).
*/
if (flow_node->flow.dir == BNXT_DIR_RX)
handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX |
CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
else
handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
*flow_handle = cpu_to_le16(handle);
} else {
*flow_handle = flow_node->flow_handle;
}
}
static int static int
bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
struct bnxt_tc_stats_batch stats_batch[]) struct bnxt_tc_stats_batch stats_batch[])
{ {
struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_cfa_flow_stats_input req = { 0 }; struct hwrm_cfa_flow_stats_input req = { 0 };
struct hwrm_cfa_flow_stats_output *resp;
__le16 *req_flow_handles = &req.flow_handle_0; __le16 *req_flow_handles = &req.flow_handle_0;
__le32 *req_flow_ids = &req.flow_id_0;
int rc, i; int rc, i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
...@@ -1411,14 +1468,19 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, ...@@ -1411,14 +1468,19 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
for (i = 0; i < num_flows; i++) { for (i = 0; i < num_flows; i++) {
struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node; struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
req_flow_handles[i] = flow_node->flow_handle; bnxt_fill_cfa_stats_req(bp, flow_node,
&req_flow_handles[i], &req_flow_ids[i]);
} }
mutex_lock(&bp->hwrm_cmd_lock); mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) { if (!rc) {
__le64 *resp_packets = &resp->packet_0; __le64 *resp_packets;
__le64 *resp_bytes = &resp->byte_0; __le64 *resp_bytes;
resp = bnxt_get_hwrm_resp_addr(bp, &req);
resp_packets = &resp->packet_0;
resp_bytes = &resp->byte_0;
for (i = 0; i < num_flows; i++) { for (i = 0; i < num_flows; i++) {
stats_batch[i].hw_stats.packets = stats_batch[i].hw_stats.packets =
......
...@@ -98,6 +98,9 @@ struct bnxt_tc_flow { ...@@ -98,6 +98,9 @@ struct bnxt_tc_flow {
/* flow applicable to pkts ingressing on this fid */ /* flow applicable to pkts ingressing on this fid */
u16 src_fid; u16 src_fid;
u8 dir;
#define BNXT_DIR_RX 1
#define BNXT_DIR_TX 0
struct bnxt_tc_l2_key l2_key; struct bnxt_tc_l2_key l2_key;
struct bnxt_tc_l2_key l2_mask; struct bnxt_tc_l2_key l2_mask;
struct bnxt_tc_l3_key l3_key; struct bnxt_tc_l3_key l3_key;
...@@ -170,7 +173,9 @@ struct bnxt_tc_flow_node { ...@@ -170,7 +173,9 @@ struct bnxt_tc_flow_node {
struct bnxt_tc_flow flow; struct bnxt_tc_flow flow;
__le64 ext_flow_handle;
__le16 flow_handle; __le16 flow_handle;
__le32 flow_id;
/* L2 node in l2 hashtable that shares flow's l2 key */ /* L2 node in l2 hashtable that shares flow's l2 key */
struct bnxt_tc_l2_node *l2_node; struct bnxt_tc_l2_node *l2_node;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment