Commit 2c4b14ea authored by Shiraz Saleem's avatar Shiraz Saleem Committed by Jason Gunthorpe

RDMA/irdma: Remove enum irdma_status_code

Replace use of custom irdma_status_code with linux error codes.

Remove enum irdma_status_code and header in which its defined.

Link: https://lore.kernel.org/r/20220217151851.1518-2-shiraz.saleem@intel.comSigned-off-by: default avatarShiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 4eaa29b4
...@@ -1501,15 +1501,14 @@ irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port, ...@@ -1501,15 +1501,14 @@ irdma_find_listener(struct irdma_cm_core *cm_core, u32 *dst_addr, u16 dst_port,
* @cm_info: CM info for parent listen node * @cm_info: CM info for parent listen node
* @cm_parent_listen_node: The parent listen node * @cm_parent_listen_node: The parent listen node
*/ */
static enum irdma_status_code static int irdma_del_multiple_qhash(struct irdma_device *iwdev,
irdma_del_multiple_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cm_info,
struct irdma_cm_info *cm_info, struct irdma_cm_listener *cm_parent_listen_node)
struct irdma_cm_listener *cm_parent_listen_node)
{ {
struct irdma_cm_listener *child_listen_node; struct irdma_cm_listener *child_listen_node;
enum irdma_status_code ret = IRDMA_ERR_CFG;
struct list_head *pos, *tpos; struct list_head *pos, *tpos;
unsigned long flags; unsigned long flags;
int ret = -EINVAL;
spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags); spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags);
list_for_each_safe (pos, tpos, list_for_each_safe (pos, tpos,
...@@ -1618,16 +1617,16 @@ u16 irdma_get_vlan_ipv4(u32 *addr) ...@@ -1618,16 +1617,16 @@ u16 irdma_get_vlan_ipv4(u32 *addr)
* Adds a qhash and a child listen node for every IPv6 address * Adds a qhash and a child listen node for every IPv6 address
* on the adapter and adds the associated qhash filter * on the adapter and adds the associated qhash filter
*/ */
static enum irdma_status_code static int irdma_add_mqh_6(struct irdma_device *iwdev,
irdma_add_mqh_6(struct irdma_device *iwdev, struct irdma_cm_info *cm_info, struct irdma_cm_info *cm_info,
struct irdma_cm_listener *cm_parent_listen_node) struct irdma_cm_listener *cm_parent_listen_node)
{ {
struct net_device *ip_dev; struct net_device *ip_dev;
struct inet6_dev *idev; struct inet6_dev *idev;
struct inet6_ifaddr *ifp, *tmp; struct inet6_ifaddr *ifp, *tmp;
enum irdma_status_code ret = 0;
struct irdma_cm_listener *child_listen_node; struct irdma_cm_listener *child_listen_node;
unsigned long flags; unsigned long flags;
int ret = 0;
rtnl_lock(); rtnl_lock();
for_each_netdev(&init_net, ip_dev) { for_each_netdev(&init_net, ip_dev) {
...@@ -1653,7 +1652,7 @@ irdma_add_mqh_6(struct irdma_device *iwdev, struct irdma_cm_info *cm_info, ...@@ -1653,7 +1652,7 @@ irdma_add_mqh_6(struct irdma_device *iwdev, struct irdma_cm_info *cm_info,
child_listen_node); child_listen_node);
if (!child_listen_node) { if (!child_listen_node) {
ibdev_dbg(&iwdev->ibdev, "CM: listener memory allocation\n"); ibdev_dbg(&iwdev->ibdev, "CM: listener memory allocation\n");
ret = IRDMA_ERR_NO_MEMORY; ret = -ENOMEM;
goto exit; goto exit;
} }
...@@ -1700,16 +1699,16 @@ irdma_add_mqh_6(struct irdma_device *iwdev, struct irdma_cm_info *cm_info, ...@@ -1700,16 +1699,16 @@ irdma_add_mqh_6(struct irdma_device *iwdev, struct irdma_cm_info *cm_info,
* Adds a qhash and a child listen node for every IPv4 address * Adds a qhash and a child listen node for every IPv4 address
* on the adapter and adds the associated qhash filter * on the adapter and adds the associated qhash filter
*/ */
static enum irdma_status_code static int irdma_add_mqh_4(struct irdma_device *iwdev,
irdma_add_mqh_4(struct irdma_device *iwdev, struct irdma_cm_info *cm_info, struct irdma_cm_info *cm_info,
struct irdma_cm_listener *cm_parent_listen_node) struct irdma_cm_listener *cm_parent_listen_node)
{ {
struct net_device *ip_dev; struct net_device *ip_dev;
struct in_device *idev; struct in_device *idev;
struct irdma_cm_listener *child_listen_node; struct irdma_cm_listener *child_listen_node;
enum irdma_status_code ret = 0;
unsigned long flags; unsigned long flags;
const struct in_ifaddr *ifa; const struct in_ifaddr *ifa;
int ret = 0;
rtnl_lock(); rtnl_lock();
for_each_netdev(&init_net, ip_dev) { for_each_netdev(&init_net, ip_dev) {
...@@ -1734,7 +1733,7 @@ irdma_add_mqh_4(struct irdma_device *iwdev, struct irdma_cm_info *cm_info, ...@@ -1734,7 +1733,7 @@ irdma_add_mqh_4(struct irdma_device *iwdev, struct irdma_cm_info *cm_info,
if (!child_listen_node) { if (!child_listen_node) {
ibdev_dbg(&iwdev->ibdev, "CM: listener memory allocation\n"); ibdev_dbg(&iwdev->ibdev, "CM: listener memory allocation\n");
in_dev_put(idev); in_dev_put(idev);
ret = IRDMA_ERR_NO_MEMORY; ret = -ENOMEM;
goto exit; goto exit;
} }
...@@ -1781,9 +1780,9 @@ irdma_add_mqh_4(struct irdma_device *iwdev, struct irdma_cm_info *cm_info, ...@@ -1781,9 +1780,9 @@ irdma_add_mqh_4(struct irdma_device *iwdev, struct irdma_cm_info *cm_info,
* @cm_info: CM info for parent listen node * @cm_info: CM info for parent listen node
* @cm_listen_node: The parent listen node * @cm_listen_node: The parent listen node
*/ */
static enum irdma_status_code static int irdma_add_mqh(struct irdma_device *iwdev,
irdma_add_mqh(struct irdma_device *iwdev, struct irdma_cm_info *cm_info, struct irdma_cm_info *cm_info,
struct irdma_cm_listener *cm_listen_node) struct irdma_cm_listener *cm_listen_node)
{ {
if (cm_info->ipv4) if (cm_info->ipv4)
return irdma_add_mqh_4(iwdev, cm_info, cm_listen_node); return irdma_add_mqh_4(iwdev, cm_info, cm_listen_node);
...@@ -3205,8 +3204,7 @@ static void irdma_cm_free_ah_nop(struct irdma_cm_node *cm_node) ...@@ -3205,8 +3204,7 @@ static void irdma_cm_free_ah_nop(struct irdma_cm_node *cm_node)
* @iwdev: iwarp device structure * @iwdev: iwarp device structure
* @rdma_ver: HW version * @rdma_ver: HW version
*/ */
enum irdma_status_code irdma_setup_cm_core(struct irdma_device *iwdev, int irdma_setup_cm_core(struct irdma_device *iwdev, u8 rdma_ver)
u8 rdma_ver)
{ {
struct irdma_cm_core *cm_core = &iwdev->cm_core; struct irdma_cm_core *cm_core = &iwdev->cm_core;
...@@ -3216,7 +3214,7 @@ enum irdma_status_code irdma_setup_cm_core(struct irdma_device *iwdev, ...@@ -3216,7 +3214,7 @@ enum irdma_status_code irdma_setup_cm_core(struct irdma_device *iwdev,
/* Handles CM event work items send to Iwarp core */ /* Handles CM event work items send to Iwarp core */
cm_core->event_wq = alloc_ordered_workqueue("iwarp-event-wq", 0); cm_core->event_wq = alloc_ordered_workqueue("iwarp-event-wq", 0);
if (!cm_core->event_wq) if (!cm_core->event_wq)
return IRDMA_ERR_NO_MEMORY; return -ENOMEM;
INIT_LIST_HEAD(&cm_core->listen_list); INIT_LIST_HEAD(&cm_core->listen_list);
...@@ -3923,10 +3921,10 @@ int irdma_create_listen(struct iw_cm_id *cm_id, int backlog) ...@@ -3923,10 +3921,10 @@ int irdma_create_listen(struct iw_cm_id *cm_id, int backlog)
struct irdma_device *iwdev; struct irdma_device *iwdev;
struct irdma_cm_listener *cm_listen_node; struct irdma_cm_listener *cm_listen_node;
struct irdma_cm_info cm_info = {}; struct irdma_cm_info cm_info = {};
enum irdma_status_code err;
struct sockaddr_in *laddr; struct sockaddr_in *laddr;
struct sockaddr_in6 *laddr6; struct sockaddr_in6 *laddr6;
bool wildcard = false; bool wildcard = false;
int err;
iwdev = to_iwdev(cm_id->device); iwdev = to_iwdev(cm_id->device);
if (!iwdev) if (!iwdev)
...@@ -4337,11 +4335,11 @@ static void irdma_qhash_ctrl(struct irdma_device *iwdev, ...@@ -4337,11 +4335,11 @@ static void irdma_qhash_ctrl(struct irdma_device *iwdev,
struct list_head *child_listen_list = &parent_listen_node->child_listen_list; struct list_head *child_listen_list = &parent_listen_node->child_listen_list;
struct irdma_cm_listener *child_listen_node; struct irdma_cm_listener *child_listen_node;
struct list_head *pos, *tpos; struct list_head *pos, *tpos;
enum irdma_status_code err;
bool node_allocated = false; bool node_allocated = false;
enum irdma_quad_hash_manage_type op = ifup ? enum irdma_quad_hash_manage_type op = ifup ?
IRDMA_QHASH_MANAGE_TYPE_ADD : IRDMA_QHASH_MANAGE_TYPE_ADD :
IRDMA_QHASH_MANAGE_TYPE_DELETE; IRDMA_QHASH_MANAGE_TYPE_DELETE;
int err;
list_for_each_safe (pos, tpos, child_listen_list) { list_for_each_safe (pos, tpos, child_listen_list) {
child_listen_node = list_entry(pos, struct irdma_cm_listener, child_listen_node = list_entry(pos, struct irdma_cm_listener,
......
This diff is collapsed.
...@@ -964,7 +964,7 @@ enum irdma_cqp_op_type { ...@@ -964,7 +964,7 @@ enum irdma_cqp_op_type {
(_ring).head = ((_ring).head + 1) % size; \ (_ring).head = ((_ring).head + 1) % size; \
(_retcode) = 0; \ (_retcode) = 0; \
} else { \ } else { \
(_retcode) = IRDMA_ERR_RING_FULL; \ (_retcode) = -ENOMEM; \
} \ } \
} }
#define IRDMA_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \ #define IRDMA_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
...@@ -975,7 +975,7 @@ enum irdma_cqp_op_type { ...@@ -975,7 +975,7 @@ enum irdma_cqp_op_type {
(_ring).head = ((_ring).head + (_count)) % size; \ (_ring).head = ((_ring).head + (_count)) % size; \
(_retcode) = 0; \ (_retcode) = 0; \
} else { \ } else { \
(_retcode) = IRDMA_ERR_RING_FULL; \ (_retcode) = -ENOMEM; \
} \ } \
} }
#define IRDMA_SQ_RING_MOVE_HEAD(_ring, _retcode) \ #define IRDMA_SQ_RING_MOVE_HEAD(_ring, _retcode) \
...@@ -986,7 +986,7 @@ enum irdma_cqp_op_type { ...@@ -986,7 +986,7 @@ enum irdma_cqp_op_type {
(_ring).head = ((_ring).head + 1) % size; \ (_ring).head = ((_ring).head + 1) % size; \
(_retcode) = 0; \ (_retcode) = 0; \
} else { \ } else { \
(_retcode) = IRDMA_ERR_RING_FULL; \ (_retcode) = -ENOMEM; \
} \ } \
} }
#define IRDMA_SQ_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \ #define IRDMA_SQ_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
...@@ -997,7 +997,7 @@ enum irdma_cqp_op_type { ...@@ -997,7 +997,7 @@ enum irdma_cqp_op_type {
(_ring).head = ((_ring).head + (_count)) % size; \ (_ring).head = ((_ring).head + (_count)) % size; \
(_retcode) = 0; \ (_retcode) = 0; \
} else { \ } else { \
(_retcode) = IRDMA_ERR_RING_FULL; \ (_retcode) = -ENOMEM; \
} \ } \
} }
#define IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(_ring, _count) \ #define IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(_ring, _count) \
......
This diff is collapsed.
...@@ -141,40 +141,29 @@ struct irdma_hmc_del_obj_info { ...@@ -141,40 +141,29 @@ struct irdma_hmc_del_obj_info {
bool privileged; bool privileged;
}; };
enum irdma_status_code irdma_copy_dma_mem(struct irdma_hw *hw, void *dest_buf, int irdma_copy_dma_mem(struct irdma_hw *hw, void *dest_buf,
struct irdma_dma_mem *src_mem, struct irdma_dma_mem *src_mem, u64 src_offset, u64 size);
u64 src_offset, u64 size); int irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev,
enum irdma_status_code struct irdma_hmc_create_obj_info *info);
irdma_sc_create_hmc_obj(struct irdma_sc_dev *dev, int irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev,
struct irdma_hmc_create_obj_info *info); struct irdma_hmc_del_obj_info *info, bool reset);
enum irdma_status_code irdma_sc_del_hmc_obj(struct irdma_sc_dev *dev, int irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, u64 pa, u32 sd_idx,
struct irdma_hmc_del_obj_info *info, enum irdma_sd_entry_type type,
bool reset); bool setsd);
enum irdma_status_code irdma_hmc_sd_one(struct irdma_sc_dev *dev, u8 hmc_fn_id, int irdma_update_sds_noccq(struct irdma_sc_dev *dev,
u64 pa, u32 sd_idx, struct irdma_update_sds_info *info);
enum irdma_sd_entry_type type,
bool setsd);
enum irdma_status_code
irdma_update_sds_noccq(struct irdma_sc_dev *dev,
struct irdma_update_sds_info *info);
struct irdma_vfdev *irdma_vfdev_from_fpm(struct irdma_sc_dev *dev, struct irdma_vfdev *irdma_vfdev_from_fpm(struct irdma_sc_dev *dev,
u8 hmc_fn_id); u8 hmc_fn_id);
struct irdma_hmc_info *irdma_vf_hmcinfo_from_fpm(struct irdma_sc_dev *dev, struct irdma_hmc_info *irdma_vf_hmcinfo_from_fpm(struct irdma_sc_dev *dev,
u8 hmc_fn_id); u8 hmc_fn_id);
enum irdma_status_code irdma_add_sd_table_entry(struct irdma_hw *hw, int irdma_add_sd_table_entry(struct irdma_hw *hw,
struct irdma_hmc_info *hmc_info, struct irdma_hmc_info *hmc_info, u32 sd_index,
u32 sd_index, enum irdma_sd_entry_type type, u64 direct_mode_sz);
enum irdma_sd_entry_type type, int irdma_add_pd_table_entry(struct irdma_sc_dev *dev,
u64 direct_mode_sz); struct irdma_hmc_info *hmc_info, u32 pd_index,
enum irdma_status_code irdma_add_pd_table_entry(struct irdma_sc_dev *dev, struct irdma_dma_mem *rsrc_pg);
struct irdma_hmc_info *hmc_info, int irdma_remove_pd_bp(struct irdma_sc_dev *dev,
u32 pd_index, struct irdma_hmc_info *hmc_info, u32 idx);
struct irdma_dma_mem *rsrc_pg); int irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info, u32 idx);
enum irdma_status_code irdma_remove_pd_bp(struct irdma_sc_dev *dev, int irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx);
struct irdma_hmc_info *hmc_info,
u32 idx);
enum irdma_status_code irdma_prep_remove_sd_bp(struct irdma_hmc_info *hmc_info,
u32 idx);
enum irdma_status_code
irdma_prep_remove_pd_page(struct irdma_hmc_info *hmc_info, u32 idx);
#endif /* IRDMA_HMC_H */ #endif /* IRDMA_HMC_H */
This diff is collapsed.
...@@ -3,7 +3,6 @@ ...@@ -3,7 +3,6 @@
#include "osdep.h" #include "osdep.h"
#include "type.h" #include "type.h"
#include "i40iw_hw.h" #include "i40iw_hw.h"
#include "status.h"
#include "protos.h" #include "protos.h"
static u32 i40iw_regs[IRDMA_MAX_REGS] = { static u32 i40iw_regs[IRDMA_MAX_REGS] = {
......
...@@ -162,8 +162,8 @@ static void irdma_request_reset(struct irdma_pci_f *rf) ...@@ -162,8 +162,8 @@ static void irdma_request_reset(struct irdma_pci_f *rf)
* @vsi: vsi structure * @vsi: vsi structure
* @tc_node: Traffic class node * @tc_node: Traffic class node
*/ */
static enum irdma_status_code irdma_lan_register_qset(struct irdma_sc_vsi *vsi, static int irdma_lan_register_qset(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node) struct irdma_ws_node *tc_node)
{ {
struct irdma_device *iwdev = vsi->back_vsi; struct irdma_device *iwdev = vsi->back_vsi;
struct ice_pf *pf = iwdev->rf->cdev; struct ice_pf *pf = iwdev->rf->cdev;
...@@ -176,7 +176,7 @@ static enum irdma_status_code irdma_lan_register_qset(struct irdma_sc_vsi *vsi, ...@@ -176,7 +176,7 @@ static enum irdma_status_code irdma_lan_register_qset(struct irdma_sc_vsi *vsi,
ret = ice_add_rdma_qset(pf, &qset); ret = ice_add_rdma_qset(pf, &qset);
if (ret) { if (ret) {
ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n"); ibdev_dbg(&iwdev->ibdev, "WS: LAN alloc_res for rdma qset failed.\n");
return IRDMA_ERR_REG_QSET; return -EINVAL;
} }
tc_node->l2_sched_node_id = qset.teid; tc_node->l2_sched_node_id = qset.teid;
......
...@@ -40,7 +40,6 @@ ...@@ -40,7 +40,6 @@
#include <rdma/ib_umem.h> #include <rdma/ib_umem.h>
#include <rdma/ib_cache.h> #include <rdma/ib_cache.h>
#include <rdma/uverbs_ioctl.h> #include <rdma/uverbs_ioctl.h>
#include "status.h"
#include "osdep.h" #include "osdep.h"
#include "defs.h" #include "defs.h"
#include "hmc.h" #include "hmc.h"
...@@ -242,8 +241,8 @@ struct irdma_qvlist_info { ...@@ -242,8 +241,8 @@ struct irdma_qvlist_info {
struct irdma_gen_ops { struct irdma_gen_ops {
void (*request_reset)(struct irdma_pci_f *rf); void (*request_reset)(struct irdma_pci_f *rf);
enum irdma_status_code (*register_qset)(struct irdma_sc_vsi *vsi, int (*register_qset)(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node); struct irdma_ws_node *tc_node);
void (*unregister_qset)(struct irdma_sc_vsi *vsi, void (*unregister_qset)(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node); struct irdma_ws_node *tc_node);
}; };
...@@ -457,10 +456,10 @@ static inline void irdma_free_rsrc(struct irdma_pci_f *rf, ...@@ -457,10 +456,10 @@ static inline void irdma_free_rsrc(struct irdma_pci_f *rf,
spin_unlock_irqrestore(&rf->rsrc_lock, flags); spin_unlock_irqrestore(&rf->rsrc_lock, flags);
} }
enum irdma_status_code irdma_ctrl_init_hw(struct irdma_pci_f *rf); int irdma_ctrl_init_hw(struct irdma_pci_f *rf);
void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf); void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf);
enum irdma_status_code irdma_rt_init_hw(struct irdma_device *iwdev, int irdma_rt_init_hw(struct irdma_device *iwdev,
struct irdma_l2params *l2params); struct irdma_l2params *l2params);
void irdma_rt_deinit_hw(struct irdma_device *iwdev); void irdma_rt_deinit_hw(struct irdma_device *iwdev);
void irdma_qp_add_ref(struct ib_qp *ibqp); void irdma_qp_add_ref(struct ib_qp *ibqp);
void irdma_qp_rem_ref(struct ib_qp *ibqp); void irdma_qp_rem_ref(struct ib_qp *ibqp);
...@@ -489,9 +488,8 @@ void irdma_cm_disconn(struct irdma_qp *qp); ...@@ -489,9 +488,8 @@ void irdma_cm_disconn(struct irdma_qp *qp);
bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd, bool irdma_cqp_crit_err(struct irdma_sc_dev *dev, u8 cqp_cmd,
u16 maj_err_code, u16 min_err_code); u16 maj_err_code, u16 min_err_code);
enum irdma_status_code int irdma_handle_cqp_op(struct irdma_pci_f *rf,
irdma_handle_cqp_op(struct irdma_pci_f *rf, struct irdma_cqp_request *cqp_request);
struct irdma_cqp_request *cqp_request);
int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, int irdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
struct ib_udata *udata); struct ib_udata *udata);
...@@ -500,21 +498,17 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -500,21 +498,17 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq); void irdma_cq_wq_destroy(struct irdma_pci_f *rf, struct irdma_sc_cq *cq);
void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf); void irdma_cleanup_pending_cqp_op(struct irdma_pci_f *rf);
enum irdma_status_code irdma_hw_modify_qp(struct irdma_device *iwdev, int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
struct irdma_qp *iwqp, struct irdma_modify_qp_info *info, bool wait);
struct irdma_modify_qp_info *info, int irdma_qp_suspend_resume(struct irdma_sc_qp *qp, bool suspend);
bool wait); int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
enum irdma_status_code irdma_qp_suspend_resume(struct irdma_sc_qp *qp, enum irdma_quad_entry_type etype,
bool suspend); enum irdma_quad_hash_manage_type mtype, void *cmnode,
enum irdma_status_code bool wait);
irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo,
enum irdma_quad_entry_type etype,
enum irdma_quad_hash_manage_type mtype, void *cmnode,
bool wait);
void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf); void irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf);
void irdma_free_sqbuf(struct irdma_sc_vsi *vsi, void *bufp); void irdma_free_sqbuf(struct irdma_sc_vsi *vsi, void *bufp);
void irdma_free_qp_rsrc(struct irdma_qp *iwqp); void irdma_free_qp_rsrc(struct irdma_qp *iwqp);
enum irdma_status_code irdma_setup_cm_core(struct irdma_device *iwdev, u8 ver); int irdma_setup_cm_core(struct irdma_device *iwdev, u8 ver);
void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core); void irdma_cleanup_cm_core(struct irdma_cm_core *cm_core);
void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term, void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
u8 term_len); u8 term_len);
...@@ -523,10 +517,8 @@ int irdma_send_reset(struct irdma_cm_node *cm_node); ...@@ -523,10 +517,8 @@ int irdma_send_reset(struct irdma_cm_node *cm_node);
struct irdma_cm_node *irdma_find_node(struct irdma_cm_core *cm_core, struct irdma_cm_node *irdma_find_node(struct irdma_cm_core *cm_core,
u16 rem_port, u32 *rem_addr, u16 loc_port, u16 rem_port, u32 *rem_addr, u16 loc_port,
u32 *loc_addr, u16 vlan_id); u32 *loc_addr, u16 vlan_id);
enum irdma_status_code irdma_hw_flush_wqes(struct irdma_pci_f *rf, int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
struct irdma_sc_qp *qp, struct irdma_qp_flush_info *info, bool wait);
struct irdma_qp_flush_info *info,
bool wait);
void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp, void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp,
struct irdma_gen_ae_info *info, bool wait); struct irdma_gen_ae_info *info, bool wait);
void irdma_copy_ip_ntohl(u32 *dst, __be32 *src); void irdma_copy_ip_ntohl(u32 *dst, __be32 *src);
......
...@@ -43,32 +43,28 @@ enum irdma_status_code irdma_vf_wait_vchnl_resp(struct irdma_sc_dev *dev); ...@@ -43,32 +43,28 @@ enum irdma_status_code irdma_vf_wait_vchnl_resp(struct irdma_sc_dev *dev);
bool irdma_vf_clear_to_send(struct irdma_sc_dev *dev); bool irdma_vf_clear_to_send(struct irdma_sc_dev *dev);
void irdma_add_dev_ref(struct irdma_sc_dev *dev); void irdma_add_dev_ref(struct irdma_sc_dev *dev);
void irdma_put_dev_ref(struct irdma_sc_dev *dev); void irdma_put_dev_ref(struct irdma_sc_dev *dev);
enum irdma_status_code irdma_ieq_check_mpacrc(struct shash_desc *desc, int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len,
void *addr, u32 len, u32 val); u32 val);
struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev, struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
struct irdma_puda_buf *buf); struct irdma_puda_buf *buf);
void irdma_send_ieq_ack(struct irdma_sc_qp *qp); void irdma_send_ieq_ack(struct irdma_sc_qp *qp);
void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len, void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len,
u32 seqnum); u32 seqnum);
void irdma_free_hash_desc(struct shash_desc *hash_desc); void irdma_free_hash_desc(struct shash_desc *hash_desc);
enum irdma_status_code irdma_init_hash_desc(struct shash_desc **hash_desc); int irdma_init_hash_desc(struct shash_desc **hash_desc);
enum irdma_status_code int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info, struct irdma_puda_buf *buf);
struct irdma_puda_buf *buf); int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
enum irdma_status_code irdma_cqp_sds_cmd(struct irdma_sc_dev *dev, struct irdma_update_sds_info *info);
struct irdma_update_sds_info *info); int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
enum irdma_status_code struct irdma_hmc_fcn_info *hmcfcninfo,
irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev, u16 *pmf_idx);
struct irdma_hmc_fcn_info *hmcfcninfo, int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
u16 *pmf_idx); struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
enum irdma_status_code int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev, struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
struct irdma_dma_mem *val_mem, u8 hmc_fn_id); int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
enum irdma_status_code struct irdma_dma_mem *mem);
irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
enum irdma_status_code irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
struct irdma_dma_mem *mem);
void *irdma_remove_cqp_head(struct irdma_sc_dev *dev); void *irdma_remove_cqp_head(struct irdma_sc_dev *dev);
void irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term, void irdma_term_modify_qp(struct irdma_sc_qp *qp, u8 next_state, u8 term,
u8 term_len); u8 term_len);
...@@ -80,7 +76,7 @@ void irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi); ...@@ -80,7 +76,7 @@ void irdma_hw_stats_stop_timer(struct irdma_sc_vsi *vsi);
void wr32(struct irdma_hw *hw, u32 reg, u32 val); void wr32(struct irdma_hw *hw, u32 reg, u32 val);
u32 rd32(struct irdma_hw *hw, u32 reg); u32 rd32(struct irdma_hw *hw, u32 reg);
u64 rd64(struct irdma_hw *hw, u32 reg); u64 rd64(struct irdma_hw *hw, u32 reg);
enum irdma_status_code irdma_map_vm_page_list(struct irdma_hw *hw, void *va, int irdma_map_vm_page_list(struct irdma_hw *hw, void *va, dma_addr_t *pg_dma,
dma_addr_t *pg_dma, u32 pg_cnt); u32 pg_cnt);
void irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt); void irdma_unmap_vm_page_list(struct irdma_hw *hw, dma_addr_t *pg_dma, u32 pg_cnt);
#endif /* IRDMA_OSDEP_H */ #endif /* IRDMA_OSDEP_H */
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
/* Copyright (c) 2015 - 2021 Intel Corporation */ /* Copyright (c) 2015 - 2021 Intel Corporation */
#include "osdep.h" #include "osdep.h"
#include "status.h"
#include "hmc.h" #include "hmc.h"
#include "defs.h" #include "defs.h"
#include "type.h" #include "type.h"
#include "protos.h" #include "protos.h"
#include "pble.h" #include "pble.h"
static enum irdma_status_code static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
/** /**
* irdma_destroy_pble_prm - destroy prm during module unload * irdma_destroy_pble_prm - destroy prm during module unload
...@@ -35,13 +33,12 @@ void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc) ...@@ -35,13 +33,12 @@ void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
* @dev: irdma_sc_dev struct * @dev: irdma_sc_dev struct
* @pble_rsrc: pble resources * @pble_rsrc: pble resources
*/ */
enum irdma_status_code int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
irdma_hmc_init_pble(struct irdma_sc_dev *dev, struct irdma_hmc_pble_rsrc *pble_rsrc)
struct irdma_hmc_pble_rsrc *pble_rsrc)
{ {
struct irdma_hmc_info *hmc_info; struct irdma_hmc_info *hmc_info;
u32 fpm_idx = 0; u32 fpm_idx = 0;
enum irdma_status_code status = 0; int status = 0;
hmc_info = dev->hmc_info; hmc_info = dev->hmc_info;
pble_rsrc->dev = dev; pble_rsrc->dev = dev;
...@@ -60,7 +57,7 @@ irdma_hmc_init_pble(struct irdma_sc_dev *dev, ...@@ -60,7 +57,7 @@ irdma_hmc_init_pble(struct irdma_sc_dev *dev,
INIT_LIST_HEAD(&pble_rsrc->pinfo.clist); INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
if (add_pble_prm(pble_rsrc)) { if (add_pble_prm(pble_rsrc)) {
irdma_destroy_pble_prm(pble_rsrc); irdma_destroy_pble_prm(pble_rsrc);
status = IRDMA_ERR_NO_MEMORY; status = -ENOMEM;
} }
return status; return status;
...@@ -84,12 +81,11 @@ static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, ...@@ -84,12 +81,11 @@ static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
* @pble_rsrc: pble resource ptr * @pble_rsrc: pble resource ptr
* @info: page info for sd * @info: page info for sd
*/ */
static enum irdma_status_code static int add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc,
add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc, struct irdma_add_page_info *info)
struct irdma_add_page_info *info)
{ {
struct irdma_sc_dev *dev = pble_rsrc->dev; struct irdma_sc_dev *dev = pble_rsrc->dev;
enum irdma_status_code ret_code = 0; int ret_code = 0;
struct sd_pd_idx *idx = &info->idx; struct sd_pd_idx *idx = &info->idx;
struct irdma_chunk *chunk = info->chunk; struct irdma_chunk *chunk = info->chunk;
struct irdma_hmc_info *hmc_info = info->hmc_info; struct irdma_hmc_info *hmc_info = info->hmc_info;
...@@ -137,9 +133,8 @@ static u32 fpm_to_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, u64 addr) ...@@ -137,9 +133,8 @@ static u32 fpm_to_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, u64 addr)
* @pble_rsrc: pble resource management * @pble_rsrc: pble resource management
* @info: page info for sd * @info: page info for sd
*/ */
static enum irdma_status_code static int add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc,
add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc, struct irdma_add_page_info *info)
struct irdma_add_page_info *info)
{ {
struct irdma_sc_dev *dev = pble_rsrc->dev; struct irdma_sc_dev *dev = pble_rsrc->dev;
u8 *addr; u8 *addr;
...@@ -148,13 +143,13 @@ add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc, ...@@ -148,13 +143,13 @@ add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_hmc_sd_entry *sd_entry = info->sd_entry; struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
struct irdma_hmc_info *hmc_info = info->hmc_info; struct irdma_hmc_info *hmc_info = info->hmc_info;
struct irdma_chunk *chunk = info->chunk; struct irdma_chunk *chunk = info->chunk;
enum irdma_status_code status = 0; int status = 0;
u32 rel_pd_idx = info->idx.rel_pd_idx; u32 rel_pd_idx = info->idx.rel_pd_idx;
u32 pd_idx = info->idx.pd_idx; u32 pd_idx = info->idx.pd_idx;
u32 i; u32 i;
if (irdma_pble_get_paged_mem(chunk, info->pages)) if (irdma_pble_get_paged_mem(chunk, info->pages))
return IRDMA_ERR_NO_MEMORY; return -ENOMEM;
status = irdma_add_sd_table_entry(dev->hw, hmc_info, info->idx.sd_idx, status = irdma_add_sd_table_entry(dev->hw, hmc_info, info->idx.sd_idx,
IRDMA_SD_TYPE_PAGED, IRDMA_SD_TYPE_PAGED,
...@@ -207,8 +202,7 @@ static enum irdma_sd_entry_type irdma_get_type(struct irdma_sc_dev *dev, ...@@ -207,8 +202,7 @@ static enum irdma_sd_entry_type irdma_get_type(struct irdma_sc_dev *dev,
* add_pble_prm - add a sd entry for pble resoure * add_pble_prm - add a sd entry for pble resoure
* @pble_rsrc: pble resource management * @pble_rsrc: pble resource management
*/ */
static enum irdma_status_code static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
{ {
struct irdma_sc_dev *dev = pble_rsrc->dev; struct irdma_sc_dev *dev = pble_rsrc->dev;
struct irdma_hmc_sd_entry *sd_entry; struct irdma_hmc_sd_entry *sd_entry;
...@@ -216,22 +210,22 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc) ...@@ -216,22 +210,22 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
struct irdma_chunk *chunk; struct irdma_chunk *chunk;
struct irdma_add_page_info info; struct irdma_add_page_info info;
struct sd_pd_idx *idx = &info.idx; struct sd_pd_idx *idx = &info.idx;
enum irdma_status_code ret_code = 0; int ret_code = 0;
enum irdma_sd_entry_type sd_entry_type; enum irdma_sd_entry_type sd_entry_type;
u64 sd_reg_val = 0; u64 sd_reg_val = 0;
struct irdma_virt_mem chunkmem; struct irdma_virt_mem chunkmem;
u32 pages; u32 pages;
if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE) if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
return IRDMA_ERR_NO_MEMORY; return -ENOMEM;
if (pble_rsrc->next_fpm_addr & 0xfff) if (pble_rsrc->next_fpm_addr & 0xfff)
return IRDMA_ERR_INVALID_PAGE_DESC_INDEX; return -EINVAL;
chunkmem.size = sizeof(*chunk); chunkmem.size = sizeof(*chunk);
chunkmem.va = kzalloc(chunkmem.size, GFP_KERNEL); chunkmem.va = kzalloc(chunkmem.size, GFP_KERNEL);
if (!chunkmem.va) if (!chunkmem.va)
return IRDMA_ERR_NO_MEMORY; return -ENOMEM;
chunk = chunkmem.va; chunk = chunkmem.va;
chunk->chunkmem = chunkmem; chunk->chunkmem = chunkmem;
...@@ -337,9 +331,8 @@ static void free_lvl2(struct irdma_hmc_pble_rsrc *pble_rsrc, ...@@ -337,9 +331,8 @@ static void free_lvl2(struct irdma_hmc_pble_rsrc *pble_rsrc,
* @pble_rsrc: pble resource management * @pble_rsrc: pble resource management
* @palloc: level 2 pble allocation * @palloc: level 2 pble allocation
*/ */
static enum irdma_status_code static int get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, struct irdma_pble_alloc *palloc)
struct irdma_pble_alloc *palloc)
{ {
u32 lf4k, lflast, total, i; u32 lf4k, lflast, total, i;
u32 pblcnt = PBLE_PER_PAGE; u32 pblcnt = PBLE_PER_PAGE;
...@@ -347,7 +340,7 @@ get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, ...@@ -347,7 +340,7 @@ get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_level2 *lvl2 = &palloc->level2; struct irdma_pble_level2 *lvl2 = &palloc->level2;
struct irdma_pble_info *root = &lvl2->root; struct irdma_pble_info *root = &lvl2->root;
struct irdma_pble_info *leaf; struct irdma_pble_info *leaf;
enum irdma_status_code ret_code; int ret_code;
u64 fpm_addr; u64 fpm_addr;
/* number of full 512 (4K) leafs) */ /* number of full 512 (4K) leafs) */
...@@ -359,7 +352,7 @@ get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, ...@@ -359,7 +352,7 @@ get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
lvl2->leafmem.size = (sizeof(*leaf) * total); lvl2->leafmem.size = (sizeof(*leaf) * total);
lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_KERNEL); lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_KERNEL);
if (!lvl2->leafmem.va) if (!lvl2->leafmem.va)
return IRDMA_ERR_NO_MEMORY; return -ENOMEM;
lvl2->leaf = lvl2->leafmem.va; lvl2->leaf = lvl2->leafmem.va;
leaf = lvl2->leaf; leaf = lvl2->leaf;
...@@ -368,7 +361,7 @@ get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, ...@@ -368,7 +361,7 @@ get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
if (ret_code) { if (ret_code) {
kfree(lvl2->leafmem.va); kfree(lvl2->leafmem.va);
lvl2->leaf = NULL; lvl2->leaf = NULL;
return IRDMA_ERR_NO_MEMORY; return -ENOMEM;
} }
root->idx = fpm_to_idx(pble_rsrc, fpm_addr); root->idx = fpm_to_idx(pble_rsrc, fpm_addr);
...@@ -397,7 +390,7 @@ get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, ...@@ -397,7 +390,7 @@ get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
error: error:
free_lvl2(pble_rsrc, palloc); free_lvl2(pble_rsrc, palloc);
return IRDMA_ERR_NO_MEMORY; return -ENOMEM;
} }
/** /**
...@@ -405,11 +398,10 @@ get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, ...@@ -405,11 +398,10 @@ get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
* @pble_rsrc: pble resource management * @pble_rsrc: pble resource management
* @palloc: level 1 pble allocation * @palloc: level 1 pble allocation
*/ */
static enum irdma_status_code static int get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, struct irdma_pble_alloc *palloc)
struct irdma_pble_alloc *palloc)
{ {
enum irdma_status_code ret_code; int ret_code;
u64 fpm_addr; u64 fpm_addr;
struct irdma_pble_info *lvl1 = &palloc->level1; struct irdma_pble_info *lvl1 = &palloc->level1;
...@@ -417,7 +409,7 @@ get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, ...@@ -417,7 +409,7 @@ get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
palloc->total_cnt << 3, &lvl1->addr, palloc->total_cnt << 3, &lvl1->addr,
&fpm_addr); &fpm_addr);
if (ret_code) if (ret_code)
return IRDMA_ERR_NO_MEMORY; return -ENOMEM;
palloc->level = PBLE_LEVEL_1; palloc->level = PBLE_LEVEL_1;
lvl1->idx = fpm_to_idx(pble_rsrc, fpm_addr); lvl1->idx = fpm_to_idx(pble_rsrc, fpm_addr);
...@@ -433,11 +425,10 @@ get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, ...@@ -433,11 +425,10 @@ get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
* @palloc: contains all inforamtion regarding pble (idx + pble addr) * @palloc: contains all inforamtion regarding pble (idx + pble addr)
* @level1_only: flag for a level 1 PBLE * @level1_only: flag for a level 1 PBLE
*/ */
static enum irdma_status_code static int get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, struct irdma_pble_alloc *palloc, bool level1_only)
struct irdma_pble_alloc *palloc, bool level1_only)
{ {
enum irdma_status_code status = 0; int status = 0;
status = get_lvl1_pble(pble_rsrc, palloc); status = get_lvl1_pble(pble_rsrc, palloc);
if (!status || level1_only || palloc->total_cnt <= PBLE_PER_PAGE) if (!status || level1_only || palloc->total_cnt <= PBLE_PER_PAGE)
...@@ -455,11 +446,11 @@ get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, ...@@ -455,11 +446,11 @@ get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
* @pble_cnt: #of pbles requested * @pble_cnt: #of pbles requested
* @level1_only: true if only pble level 1 to acquire * @level1_only: true if only pble level 1 to acquire
*/ */
enum irdma_status_code irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc, struct irdma_pble_alloc *palloc, u32 pble_cnt,
u32 pble_cnt, bool level1_only) bool level1_only)
{ {
enum irdma_status_code status = 0; int status = 0;
int max_sds = 0; int max_sds = 0;
int i; int i;
......
...@@ -108,20 +108,18 @@ struct irdma_hmc_pble_rsrc { ...@@ -108,20 +108,18 @@ struct irdma_hmc_pble_rsrc {
}; };
void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc); void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
enum irdma_status_code int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
irdma_hmc_init_pble(struct irdma_sc_dev *dev, struct irdma_hmc_pble_rsrc *pble_rsrc);
struct irdma_hmc_pble_rsrc *pble_rsrc);
void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc); struct irdma_pble_alloc *palloc);
enum irdma_status_code irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc, struct irdma_pble_alloc *palloc, u32 pble_cnt,
u32 pble_cnt, bool level1_only); bool level1_only);
enum irdma_status_code irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm, int irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
struct irdma_chunk *pchunk); struct irdma_chunk *pchunk);
enum irdma_status_code int irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
irdma_prm_get_pbles(struct irdma_pble_prm *pprm, struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size,
struct irdma_pble_chunkinfo *chunkinfo, u64 mem_size, u64 **vaddr, u64 *fpm_addr);
u64 **vaddr, u64 *fpm_addr);
void irdma_prm_return_pbles(struct irdma_pble_prm *pprm, void irdma_prm_return_pbles(struct irdma_pble_prm *pprm,
struct irdma_pble_chunkinfo *chunkinfo); struct irdma_pble_chunkinfo *chunkinfo);
void irdma_pble_acquire_lock(struct irdma_hmc_pble_rsrc *pble_rsrc, void irdma_pble_acquire_lock(struct irdma_hmc_pble_rsrc *pble_rsrc,
...@@ -129,7 +127,6 @@ void irdma_pble_acquire_lock(struct irdma_hmc_pble_rsrc *pble_rsrc, ...@@ -129,7 +127,6 @@ void irdma_pble_acquire_lock(struct irdma_hmc_pble_rsrc *pble_rsrc,
void irdma_pble_release_lock(struct irdma_hmc_pble_rsrc *pble_rsrc, void irdma_pble_release_lock(struct irdma_hmc_pble_rsrc *pble_rsrc,
unsigned long *flags); unsigned long *flags);
void irdma_pble_free_paged_mem(struct irdma_chunk *chunk); void irdma_pble_free_paged_mem(struct irdma_chunk *chunk);
enum irdma_status_code irdma_pble_get_paged_mem(struct irdma_chunk *chunk, int irdma_pble_get_paged_mem(struct irdma_chunk *chunk, u32 pg_cnt);
u32 pg_cnt);
void irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk); void irdma_prm_rem_bitmapmem(struct irdma_hw *hw, struct irdma_chunk *chunk);
#endif /* IRDMA_PBLE_H */ #endif /* IRDMA_PBLE_H */
...@@ -12,58 +12,51 @@ ...@@ -12,58 +12,51 @@
#define CQP_TIMEOUT_THRESHOLD 500 #define CQP_TIMEOUT_THRESHOLD 500
/* init operations */ /* init operations */
enum irdma_status_code irdma_sc_dev_init(enum irdma_vers ver, int irdma_sc_dev_init(enum irdma_vers ver, struct irdma_sc_dev *dev,
struct irdma_sc_dev *dev, struct irdma_device_init_info *info);
struct irdma_device_init_info *info);
void irdma_sc_rt_init(struct irdma_sc_dev *dev); void irdma_sc_rt_init(struct irdma_sc_dev *dev);
void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp); void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
__le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch); __le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch);
enum irdma_status_code int irdma_sc_mr_fast_register(struct irdma_sc_qp *qp,
irdma_sc_mr_fast_register(struct irdma_sc_qp *qp, struct irdma_fast_reg_stag_info *info,
struct irdma_fast_reg_stag_info *info, bool post_sq); bool post_sq);
/* HMC/FPM functions */ /* HMC/FPM functions */
enum irdma_status_code irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, int irdma_sc_init_iw_hmc(struct irdma_sc_dev *dev, u8 hmc_fn_id);
u8 hmc_fn_id);
/* stats misc */ /* stats misc */
enum irdma_status_code int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev,
irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev, struct irdma_vsi_pestat *pestat, bool wait);
struct irdma_vsi_pestat *pestat, bool wait);
void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev, void irdma_cqp_gather_stats_gen1(struct irdma_sc_dev *dev,
struct irdma_vsi_pestat *pestat); struct irdma_vsi_pestat *pestat);
void irdma_hw_stats_read_all(struct irdma_vsi_pestat *stats, void irdma_hw_stats_read_all(struct irdma_vsi_pestat *stats,
struct irdma_dev_hw_stats *stats_values, struct irdma_dev_hw_stats *stats_values,
u64 *hw_stats_regs_32, u64 *hw_stats_regs_64, u64 *hw_stats_regs_32, u64 *hw_stats_regs_64,
u8 hw_rev); u8 hw_rev);
enum irdma_status_code int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd, struct irdma_ws_node_info *node_info);
struct irdma_ws_node_info *node_info); int irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq,
enum irdma_status_code irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, u8 op);
struct irdma_sc_ceq *sc_ceq, u8 op); int irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_aeq *sc_aeq,
enum irdma_status_code irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, u8 op);
struct irdma_sc_aeq *sc_aeq, u8 op); int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
enum irdma_status_code struct irdma_stats_inst_info *stats_info);
irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd,
struct irdma_stats_inst_info *stats_info);
u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev); u16 irdma_alloc_ws_node_id(struct irdma_sc_dev *dev);
void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id); void irdma_free_ws_node_id(struct irdma_sc_dev *dev, u16 node_id);
void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats, void irdma_update_stats(struct irdma_dev_hw_stats *hw_stats,
struct irdma_gather_stats *gather_stats, struct irdma_gather_stats *gather_stats,
struct irdma_gather_stats *last_gather_stats); struct irdma_gather_stats *last_gather_stats);
/* vsi functions */ /* vsi functions */
enum irdma_status_code irdma_vsi_stats_init(struct irdma_sc_vsi *vsi, int irdma_vsi_stats_init(struct irdma_sc_vsi *vsi,
struct irdma_vsi_stats_info *info); struct irdma_vsi_stats_info *info);
void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi); void irdma_vsi_stats_free(struct irdma_sc_vsi *vsi);
void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi, void irdma_sc_vsi_init(struct irdma_sc_vsi *vsi,
struct irdma_vsi_init_info *info); struct irdma_vsi_init_info *info);
enum irdma_status_code irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, int irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq);
struct irdma_sc_cq *cq);
void irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq); void irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq);
/* misc L2 param change functions */ /* misc L2 param change functions */
void irdma_change_l2params(struct irdma_sc_vsi *vsi, void irdma_change_l2params(struct irdma_sc_vsi *vsi,
struct irdma_l2params *l2params); struct irdma_l2params *l2params);
void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 suspend); void irdma_sc_suspend_resume_qps(struct irdma_sc_vsi *vsi, u8 suspend);
enum irdma_status_code irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, int irdma_cqp_qp_suspend_resume(struct irdma_sc_qp *qp, u8 cmd);
u8 cmd);
void irdma_qp_add_qos(struct irdma_sc_qp *qp); void irdma_qp_add_qos(struct irdma_sc_qp *qp);
void irdma_qp_rem_qos(struct irdma_sc_qp *qp); void irdma_qp_rem_qos(struct irdma_sc_qp *qp);
struct irdma_sc_qp *irdma_get_qp_from_list(struct list_head *head, struct irdma_sc_qp *irdma_get_qp_from_list(struct list_head *head,
...@@ -81,31 +74,26 @@ void irdma_terminate_received(struct irdma_sc_qp *qp, ...@@ -81,31 +74,26 @@ void irdma_terminate_received(struct irdma_sc_qp *qp,
/* misc */ /* misc */
u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type); u8 irdma_get_encoded_wqe_size(u32 wqsize, enum irdma_queue_type queue_type);
void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp); void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp);
enum irdma_status_code int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch, u8 hmc_fn_id, bool post_sq,
u8 hmc_fn_id, bool post_sq, bool poll_registers);
bool poll_registers); int irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count);
enum irdma_status_code irdma_cfg_fpm_val(struct irdma_sc_dev *dev, int irdma_get_rdma_features(struct irdma_sc_dev *dev);
u32 qp_count);
enum irdma_status_code irdma_get_rdma_features(struct irdma_sc_dev *dev);
void free_sd_mem(struct irdma_sc_dev *dev); void free_sd_mem(struct irdma_sc_dev *dev);
enum irdma_status_code irdma_process_cqp_cmd(struct irdma_sc_dev *dev, int irdma_process_cqp_cmd(struct irdma_sc_dev *dev,
struct cqp_cmds_info *pcmdinfo); struct cqp_cmds_info *pcmdinfo);
enum irdma_status_code irdma_process_bh(struct irdma_sc_dev *dev); int irdma_process_bh(struct irdma_sc_dev *dev);
enum irdma_status_code irdma_cqp_sds_cmd(struct irdma_sc_dev *dev, int irdma_cqp_sds_cmd(struct irdma_sc_dev *dev,
struct irdma_update_sds_info *info); struct irdma_update_sds_info *info);
enum irdma_status_code int irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev,
irdma_cqp_query_fpm_val_cmd(struct irdma_sc_dev *dev, struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
struct irdma_dma_mem *val_mem, u8 hmc_fn_id); int irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev,
enum irdma_status_code struct irdma_dma_mem *val_mem, u8 hmc_fn_id);
irdma_cqp_commit_fpm_val_cmd(struct irdma_sc_dev *dev, int irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev,
struct irdma_dma_mem *val_mem, u8 hmc_fn_id); struct irdma_dma_mem *mem);
enum irdma_status_code irdma_alloc_query_fpm_buf(struct irdma_sc_dev *dev, int irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
struct irdma_dma_mem *mem); struct irdma_hmc_fcn_info *hmcfcninfo,
enum irdma_status_code u16 *pmf_idx);
irdma_cqp_manage_hmc_fcn_cmd(struct irdma_sc_dev *dev,
struct irdma_hmc_fcn_info *hmcfcninfo,
u16 *pmf_idx);
void irdma_add_dev_ref(struct irdma_sc_dev *dev); void irdma_add_dev_ref(struct irdma_sc_dev *dev);
void irdma_put_dev_ref(struct irdma_sc_dev *dev); void irdma_put_dev_ref(struct irdma_sc_dev *dev);
void *irdma_remove_cqp_head(struct irdma_sc_dev *dev); void *irdma_remove_cqp_head(struct irdma_sc_dev *dev);
......
This diff is collapsed.
...@@ -151,42 +151,33 @@ void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc, ...@@ -151,42 +151,33 @@ void irdma_puda_ret_bufpool(struct irdma_puda_rsrc *rsrc,
struct irdma_puda_buf *buf); struct irdma_puda_buf *buf);
void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc, void irdma_puda_send_buf(struct irdma_puda_rsrc *rsrc,
struct irdma_puda_buf *buf); struct irdma_puda_buf *buf);
enum irdma_status_code irdma_puda_send(struct irdma_sc_qp *qp, int irdma_puda_send(struct irdma_sc_qp *qp, struct irdma_puda_send_info *info);
struct irdma_puda_send_info *info); int irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
enum irdma_status_code struct irdma_puda_rsrc_info *info);
irdma_puda_create_rsrc(struct irdma_sc_vsi *vsi,
struct irdma_puda_rsrc_info *info);
void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type, void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type,
bool reset); bool reset);
enum irdma_status_code irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, int irdma_puda_poll_cmpl(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq,
struct irdma_sc_cq *cq, u32 *compl_err);
u32 *compl_err);
struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev, struct irdma_sc_qp *irdma_ieq_get_qp(struct irdma_sc_dev *dev,
struct irdma_puda_buf *buf); struct irdma_puda_buf *buf);
enum irdma_status_code int irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info,
irdma_puda_get_tcpip_info(struct irdma_puda_cmpl_info *info, struct irdma_puda_buf *buf);
struct irdma_puda_buf *buf); int irdma_ieq_check_mpacrc(struct shash_desc *desc, void *addr, u32 len, u32 val);
enum irdma_status_code irdma_ieq_check_mpacrc(struct shash_desc *desc, int irdma_init_hash_desc(struct shash_desc **desc);
void *addr, u32 len, u32 val);
enum irdma_status_code irdma_init_hash_desc(struct shash_desc **desc);
void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp); void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
void irdma_free_hash_desc(struct shash_desc *desc); void irdma_free_hash_desc(struct shash_desc *desc);
void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len, void irdma_ieq_update_tcpip_info(struct irdma_puda_buf *buf, u16 len, u32 seqnum);
u32 seqnum); int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
enum irdma_status_code irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, int irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq);
struct irdma_sc_qp *qp); int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
enum irdma_status_code irdma_cqp_cq_create_cmd(struct irdma_sc_dev *dev,
struct irdma_sc_cq *cq);
enum irdma_status_code irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq); void irdma_cqp_cq_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq);
void irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp, void irdma_puda_ieq_get_ah_info(struct irdma_sc_qp *qp,
struct irdma_ah_info *ah_info); struct irdma_ah_info *ah_info);
enum irdma_status_code irdma_puda_create_ah(struct irdma_sc_dev *dev, int irdma_puda_create_ah(struct irdma_sc_dev *dev,
struct irdma_ah_info *ah_info, struct irdma_ah_info *ah_info, bool wait,
bool wait, enum puda_rsrc_type type, enum puda_rsrc_type type, void *cb_param,
void *cb_param, struct irdma_sc_ah **ah);
struct irdma_sc_ah **ah);
void irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah); void irdma_puda_free_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah);
void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp, void irdma_ieq_process_fpdus(struct irdma_sc_qp *qp,
struct irdma_puda_rsrc *ieq); struct irdma_puda_rsrc *ieq);
......
/* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
/* Copyright (c) 2015 - 2020 Intel Corporation */
#ifndef IRDMA_STATUS_H
#define IRDMA_STATUS_H
/* Error Codes */
enum irdma_status_code {
IRDMA_SUCCESS = 0,
IRDMA_ERR_NVM = -1,
IRDMA_ERR_NVM_CHECKSUM = -2,
IRDMA_ERR_CFG = -4,
IRDMA_ERR_PARAM = -5,
IRDMA_ERR_DEVICE_NOT_SUPPORTED = -6,
IRDMA_ERR_RESET_FAILED = -7,
IRDMA_ERR_SWFW_SYNC = -8,
IRDMA_ERR_NO_MEMORY = -9,
IRDMA_ERR_BAD_PTR = -10,
IRDMA_ERR_INVALID_PD_ID = -11,
IRDMA_ERR_INVALID_QP_ID = -12,
IRDMA_ERR_INVALID_CQ_ID = -13,
IRDMA_ERR_INVALID_CEQ_ID = -14,
IRDMA_ERR_INVALID_AEQ_ID = -15,
IRDMA_ERR_INVALID_SIZE = -16,
IRDMA_ERR_INVALID_ARP_INDEX = -17,
IRDMA_ERR_INVALID_FPM_FUNC_ID = -18,
IRDMA_ERR_QP_INVALID_MSG_SIZE = -19,
IRDMA_ERR_QP_TOOMANY_WRS_POSTED = -20,
IRDMA_ERR_INVALID_FRAG_COUNT = -21,
IRDMA_ERR_Q_EMPTY = -22,
IRDMA_ERR_INVALID_ALIGNMENT = -23,
IRDMA_ERR_FLUSHED_Q = -24,
IRDMA_ERR_INVALID_PUSH_PAGE_INDEX = -25,
IRDMA_ERR_INVALID_INLINE_DATA_SIZE = -26,
IRDMA_ERR_TIMEOUT = -27,
IRDMA_ERR_OPCODE_MISMATCH = -28,
IRDMA_ERR_CQP_COMPL_ERROR = -29,
IRDMA_ERR_INVALID_VF_ID = -30,
IRDMA_ERR_INVALID_HMCFN_ID = -31,
IRDMA_ERR_BACKING_PAGE_ERROR = -32,
IRDMA_ERR_NO_PBLCHUNKS_AVAILABLE = -33,
IRDMA_ERR_INVALID_PBLE_INDEX = -34,
IRDMA_ERR_INVALID_SD_INDEX = -35,
IRDMA_ERR_INVALID_PAGE_DESC_INDEX = -36,
IRDMA_ERR_INVALID_SD_TYPE = -37,
IRDMA_ERR_MEMCPY_FAILED = -38,
IRDMA_ERR_INVALID_HMC_OBJ_INDEX = -39,
IRDMA_ERR_INVALID_HMC_OBJ_COUNT = -40,
IRDMA_ERR_BUF_TOO_SHORT = -43,
IRDMA_ERR_BAD_IWARP_CQE = -44,
IRDMA_ERR_NVM_BLANK_MODE = -45,
IRDMA_ERR_NOT_IMPL = -46,
IRDMA_ERR_PE_DOORBELL_NOT_ENA = -47,
IRDMA_ERR_NOT_READY = -48,
IRDMA_NOT_SUPPORTED = -49,
IRDMA_ERR_FIRMWARE_API_VER = -50,
IRDMA_ERR_RING_FULL = -51,
IRDMA_ERR_MPA_CRC = -61,
IRDMA_ERR_NO_TXBUFS = -62,
IRDMA_ERR_SEQ_NUM = -63,
IRDMA_ERR_list_empty = -64,
IRDMA_ERR_INVALID_MAC_ADDR = -65,
IRDMA_ERR_BAD_STAG = -66,
IRDMA_ERR_CQ_COMPL_ERROR = -67,
IRDMA_ERR_Q_DESTROYED = -68,
IRDMA_ERR_INVALID_FEAT_CNT = -69,
IRDMA_ERR_REG_CQ_FULL = -70,
IRDMA_ERR_VF_MSG_ERROR = -71,
IRDMA_ERR_NO_INTR = -72,
IRDMA_ERR_REG_QSET = -73,
};
#endif /* IRDMA_STATUS_H */
...@@ -2,7 +2,6 @@ ...@@ -2,7 +2,6 @@
/* Copyright (c) 2015 - 2021 Intel Corporation */ /* Copyright (c) 2015 - 2021 Intel Corporation */
#ifndef IRDMA_TYPE_H #ifndef IRDMA_TYPE_H
#define IRDMA_TYPE_H #define IRDMA_TYPE_H
#include "status.h"
#include "osdep.h" #include "osdep.h"
#include "irdma.h" #include "irdma.h"
#include "user.h" #include "user.h"
...@@ -402,8 +401,8 @@ struct irdma_sc_cqp { ...@@ -402,8 +401,8 @@ struct irdma_sc_cqp {
u64 host_ctx_pa; u64 host_ctx_pa;
void *back_cqp; void *back_cqp;
struct irdma_sc_dev *dev; struct irdma_sc_dev *dev;
enum irdma_status_code (*process_cqp_sds)(struct irdma_sc_dev *dev, int (*process_cqp_sds)(struct irdma_sc_dev *dev,
struct irdma_update_sds_info *info); struct irdma_update_sds_info *info);
struct irdma_dma_mem sdbuf; struct irdma_dma_mem sdbuf;
struct irdma_ring sq_ring; struct irdma_ring sq_ring;
struct irdma_cqp_quanta *sq_base; struct irdma_cqp_quanta *sq_base;
...@@ -605,8 +604,8 @@ struct irdma_sc_vsi { ...@@ -605,8 +604,8 @@ struct irdma_sc_vsi {
struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY]; struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
struct irdma_vsi_pestat *pestat; struct irdma_vsi_pestat *pestat;
atomic_t qp_suspend_reqs; atomic_t qp_suspend_reqs;
enum irdma_status_code (*register_qset)(struct irdma_sc_vsi *vsi, int (*register_qset)(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node); struct irdma_ws_node *tc_node);
void (*unregister_qset)(struct irdma_sc_vsi *vsi, void (*unregister_qset)(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node); struct irdma_ws_node *tc_node);
u8 qos_rel_bw; u8 qos_rel_bw;
...@@ -657,7 +656,7 @@ struct irdma_sc_dev { ...@@ -657,7 +656,7 @@ struct irdma_sc_dev {
bool vchnl_up:1; bool vchnl_up:1;
bool ceq_valid:1; bool ceq_valid:1;
u8 pci_rev; u8 pci_rev;
enum irdma_status_code (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri); int (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri); void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri);
void (*ws_reset)(struct irdma_sc_vsi *vsi); void (*ws_reset)(struct irdma_sc_vsi *vsi);
}; };
...@@ -754,8 +753,8 @@ struct irdma_vsi_init_info { ...@@ -754,8 +753,8 @@ struct irdma_vsi_init_info {
u16 pf_data_vsi_num; u16 pf_data_vsi_num;
enum irdma_vm_vf_type vm_vf_type; enum irdma_vm_vf_type vm_vf_type;
u16 vm_id; u16 vm_id;
enum irdma_status_code (*register_qset)(struct irdma_sc_vsi *vsi, int (*register_qset)(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node); struct irdma_ws_node *tc_node);
void (*unregister_qset)(struct irdma_sc_vsi *vsi, void (*unregister_qset)(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node); struct irdma_ws_node *tc_node);
}; };
...@@ -1202,29 +1201,27 @@ struct irdma_irq_ops { ...@@ -1202,29 +1201,27 @@ struct irdma_irq_ops {
}; };
void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq); void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq);
enum irdma_status_code irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch, int irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
bool check_overflow, bool post_sq); bool check_overflow, bool post_sq);
enum irdma_status_code irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, int irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch, bool post_sq);
bool post_sq); int irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq, struct irdma_ccq_cqe_info *info);
struct irdma_ccq_cqe_info *info); int irdma_sc_ccq_init(struct irdma_sc_cq *ccq,
enum irdma_status_code irdma_sc_ccq_init(struct irdma_sc_cq *ccq, struct irdma_ccq_init_info *info);
struct irdma_ccq_init_info *info);
int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch);
enum irdma_status_code irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch); int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq);
enum irdma_status_code irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq);
int irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, bool post_sq);
enum irdma_status_code irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch, int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
bool post_sq); struct irdma_ceq_init_info *info);
enum irdma_status_code irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
struct irdma_ceq_init_info *info);
void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq); void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq);
void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq); void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq);
enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq, int irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
struct irdma_aeq_init_info *info); struct irdma_aeq_init_info *info);
enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq, int irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
struct irdma_aeqe_info *info); struct irdma_aeqe_info *info);
void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count); void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id, void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
...@@ -1232,31 +1229,27 @@ void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_i ...@@ -1232,31 +1229,27 @@ void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_i
void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable); void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable);
void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout, void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout,
struct irdma_sc_dev *dev); struct irdma_sc_dev *dev);
enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, int irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err, u16 *min_err);
u16 *min_err); int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp);
enum irdma_status_code irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp); int irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
enum irdma_status_code irdma_sc_cqp_init(struct irdma_sc_cqp *cqp, struct irdma_cqp_init_info *info);
struct irdma_cqp_init_info *info);
void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp); void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
enum irdma_status_code irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode, int irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode,
struct irdma_ccq_cqe_info *cmpl_info); struct irdma_ccq_cqe_info *cmpl_info);
enum irdma_status_code irdma_sc_fast_register(struct irdma_sc_qp *qp, int irdma_sc_fast_register(struct irdma_sc_qp *qp,
struct irdma_fast_reg_stag_info *info, struct irdma_fast_reg_stag_info *info, bool post_sq);
bool post_sq); int irdma_sc_qp_create(struct irdma_sc_qp *qp,
enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp, struct irdma_create_qp_info *info, u64 scratch,
struct irdma_create_qp_info *info, bool post_sq);
u64 scratch, bool post_sq); int irdma_sc_qp_destroy(struct irdma_sc_qp *qp, u64 scratch,
enum irdma_status_code irdma_sc_qp_destroy(struct irdma_sc_qp *qp, bool remove_hash_idx, bool ignore_mw_bnd, bool post_sq);
u64 scratch, bool remove_hash_idx, int irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
bool ignore_mw_bnd, bool post_sq); struct irdma_qp_flush_info *info, u64 scratch,
enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp, bool post_sq);
struct irdma_qp_flush_info *info, int irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_qp_init_info *info);
u64 scratch, bool post_sq); int irdma_sc_qp_modify(struct irdma_sc_qp *qp,
enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp, struct irdma_modify_qp_info *info, u64 scratch,
struct irdma_qp_init_info *info); bool post_sq);
enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp,
struct irdma_modify_qp_info *info,
u64 scratch, bool post_sq);
void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size, void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
irdma_stag stag); irdma_stag stag);
...@@ -1265,14 +1258,12 @@ void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx, ...@@ -1265,14 +1258,12 @@ void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
struct irdma_qp_host_ctx_info *info); struct irdma_qp_host_ctx_info *info);
void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx, void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
struct irdma_qp_host_ctx_info *info); struct irdma_qp_host_ctx_info *info);
enum irdma_status_code irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq);
bool post_sq); int irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info);
enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq,
struct irdma_cq_init_info *info);
void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info); void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info);
enum irdma_status_code irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, int irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp, u64 scratch,
u64 scratch, u8 hmc_fn_id, u8 hmc_fn_id, bool post_sq,
bool post_sq, bool poll_registers); bool poll_registers);
void sc_vsi_update_stats(struct irdma_sc_vsi *vsi); void sc_vsi_update_stats(struct irdma_sc_vsi *vsi);
struct cqp_info { struct cqp_info {
......
...@@ -3,7 +3,6 @@ ...@@ -3,7 +3,6 @@
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include "osdep.h" #include "osdep.h"
#include "status.h"
#include "hmc.h" #include "hmc.h"
#include "defs.h" #include "defs.h"
#include "type.h" #include "type.h"
...@@ -18,16 +17,15 @@ ...@@ -18,16 +17,15 @@
* @op: Operation * @op: Operation
* @scratch: u64 saved to be used during cqp completion * @scratch: u64 saved to be used during cqp completion
*/ */
enum irdma_status_code irdma_sc_access_ah(struct irdma_sc_cqp *cqp, int irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
struct irdma_ah_info *info, u32 op, u64 scratch)
u32 op, u64 scratch)
{ {
__le64 *wqe; __le64 *wqe;
u64 qw1, qw2; u64 qw1, qw2;
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe) if (!wqe)
return IRDMA_ERR_RING_FULL; return -ENOMEM;
set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16); set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16);
qw1 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXLO, info->pd_idx) | qw1 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXLO, info->pd_idx) |
...@@ -86,8 +84,7 @@ enum irdma_status_code irdma_sc_access_ah(struct irdma_sc_cqp *cqp, ...@@ -86,8 +84,7 @@ enum irdma_status_code irdma_sc_access_ah(struct irdma_sc_cqp *cqp,
* irdma_create_mg_ctx() - create a mcg context * irdma_create_mg_ctx() - create a mcg context
* @info: multicast group context info * @info: multicast group context info
*/ */
static enum irdma_status_code static int irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
{ {
struct irdma_mcast_grp_ctx_entry_info *entry_info = NULL; struct irdma_mcast_grp_ctx_entry_info *entry_info = NULL;
u8 idx = 0; /* index in the array */ u8 idx = 0; /* index in the array */
...@@ -117,22 +114,22 @@ irdma_create_mg_ctx(struct irdma_mcast_grp_info *info) ...@@ -117,22 +114,22 @@ irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
* @op: operation to perform * @op: operation to perform
* @scratch: u64 saved to be used during cqp completion * @scratch: u64 saved to be used during cqp completion
*/ */
enum irdma_status_code irdma_access_mcast_grp(struct irdma_sc_cqp *cqp, int irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
struct irdma_mcast_grp_info *info, struct irdma_mcast_grp_info *info, u32 op,
u32 op, u64 scratch) u64 scratch)
{ {
__le64 *wqe; __le64 *wqe;
enum irdma_status_code ret_code = 0; int ret_code = 0;
if (info->mg_id >= IRDMA_UDA_MAX_FSI_MGS) { if (info->mg_id >= IRDMA_UDA_MAX_FSI_MGS) {
ibdev_dbg(to_ibdev(cqp->dev), "WQE: mg_id out of range\n"); ibdev_dbg(to_ibdev(cqp->dev), "WQE: mg_id out of range\n");
return IRDMA_ERR_PARAM; return -EINVAL;
} }
wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
if (!wqe) { if (!wqe) {
ibdev_dbg(to_ibdev(cqp->dev), "WQE: ring full\n"); ibdev_dbg(to_ibdev(cqp->dev), "WQE: ring full\n");
return IRDMA_ERR_RING_FULL; return -ENOMEM;
} }
ret_code = irdma_create_mg_ctx(info); ret_code = irdma_create_mg_ctx(info);
...@@ -198,8 +195,8 @@ static bool irdma_compare_mgs(struct irdma_mcast_grp_ctx_entry_info *entry1, ...@@ -198,8 +195,8 @@ static bool irdma_compare_mgs(struct irdma_mcast_grp_ctx_entry_info *entry1,
* @ctx: Multcast group context * @ctx: Multcast group context
* @mg: Multcast group info * @mg: Multcast group info
*/ */
enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx, int irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
struct irdma_mcast_grp_ctx_entry_info *mg) struct irdma_mcast_grp_ctx_entry_info *mg)
{ {
u32 idx; u32 idx;
bool free_entry_found = false; bool free_entry_found = false;
...@@ -228,7 +225,7 @@ enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx, ...@@ -228,7 +225,7 @@ enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
return 0; return 0;
} }
return IRDMA_ERR_NO_MEMORY; return -ENOMEM;
} }
/** /**
...@@ -239,8 +236,8 @@ enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx, ...@@ -239,8 +236,8 @@ enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
* Finds and removes a specific mulicast group from context, all * Finds and removes a specific mulicast group from context, all
* parameters must match to remove a multicast group. * parameters must match to remove a multicast group.
*/ */
enum irdma_status_code irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx, int irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
struct irdma_mcast_grp_ctx_entry_info *mg) struct irdma_mcast_grp_ctx_entry_info *mg)
{ {
u32 idx; u32 idx;
...@@ -269,5 +266,5 @@ enum irdma_status_code irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx, ...@@ -269,5 +266,5 @@ enum irdma_status_code irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
} }
} }
return IRDMA_ERR_PARAM; return -EINVAL;
} }
...@@ -32,56 +32,54 @@ struct irdma_sc_ah { ...@@ -32,56 +32,54 @@ struct irdma_sc_ah {
struct irdma_ah_info ah_info; struct irdma_ah_info ah_info;
}; };
enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx, int irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
struct irdma_mcast_grp_ctx_entry_info *mg); struct irdma_mcast_grp_ctx_entry_info *mg);
enum irdma_status_code irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx, int irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
struct irdma_mcast_grp_ctx_entry_info *mg); struct irdma_mcast_grp_ctx_entry_info *mg);
enum irdma_status_code irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info, int irdma_sc_access_ah(struct irdma_sc_cqp *cqp, struct irdma_ah_info *info,
u32 op, u64 scratch); u32 op, u64 scratch);
enum irdma_status_code irdma_access_mcast_grp(struct irdma_sc_cqp *cqp, int irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
struct irdma_mcast_grp_info *info, struct irdma_mcast_grp_info *info, u32 op,
u32 op, u64 scratch); u64 scratch);
static inline void irdma_sc_init_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah) static inline void irdma_sc_init_ah(struct irdma_sc_dev *dev, struct irdma_sc_ah *ah)
{ {
ah->dev = dev; ah->dev = dev;
} }
static inline enum irdma_status_code irdma_sc_create_ah(struct irdma_sc_cqp *cqp, static inline int irdma_sc_create_ah(struct irdma_sc_cqp *cqp,
struct irdma_ah_info *info, struct irdma_ah_info *info, u64 scratch)
u64 scratch)
{ {
return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_CREATE_ADDR_HANDLE, return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_CREATE_ADDR_HANDLE,
scratch); scratch);
} }
static inline enum irdma_status_code irdma_sc_destroy_ah(struct irdma_sc_cqp *cqp, static inline int irdma_sc_destroy_ah(struct irdma_sc_cqp *cqp,
struct irdma_ah_info *info, struct irdma_ah_info *info, u64 scratch)
u64 scratch)
{ {
return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_DESTROY_ADDR_HANDLE, return irdma_sc_access_ah(cqp, info, IRDMA_CQP_OP_DESTROY_ADDR_HANDLE,
scratch); scratch);
} }
static inline enum irdma_status_code irdma_sc_create_mcast_grp(struct irdma_sc_cqp *cqp, static inline int irdma_sc_create_mcast_grp(struct irdma_sc_cqp *cqp,
struct irdma_mcast_grp_info *info, struct irdma_mcast_grp_info *info,
u64 scratch) u64 scratch)
{ {
return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_CREATE_MCAST_GRP, return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_CREATE_MCAST_GRP,
scratch); scratch);
} }
static inline enum irdma_status_code irdma_sc_modify_mcast_grp(struct irdma_sc_cqp *cqp, static inline int irdma_sc_modify_mcast_grp(struct irdma_sc_cqp *cqp,
struct irdma_mcast_grp_info *info, struct irdma_mcast_grp_info *info,
u64 scratch) u64 scratch)
{ {
return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_MODIFY_MCAST_GRP, return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_MODIFY_MCAST_GRP,
scratch); scratch);
} }
static inline enum irdma_status_code irdma_sc_destroy_mcast_grp(struct irdma_sc_cqp *cqp, static inline int irdma_sc_destroy_mcast_grp(struct irdma_sc_cqp *cqp,
struct irdma_mcast_grp_info *info, struct irdma_mcast_grp_info *info,
u64 scratch) u64 scratch)
{ {
return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_DESTROY_MCAST_GRP, return irdma_access_mcast_grp(cqp, info, IRDMA_CQP_OP_DESTROY_MCAST_GRP,
scratch); scratch);
......
This diff is collapsed.
...@@ -270,29 +270,24 @@ struct irdma_cq_poll_info { ...@@ -270,29 +270,24 @@ struct irdma_cq_poll_info {
bool imm_valid:1; bool imm_valid:1;
}; };
enum irdma_status_code irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info, struct irdma_post_sq_info *info, bool post_sq);
bool post_sq); int irdma_uk_inline_send(struct irdma_qp_uk *qp,
enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq);
struct irdma_post_sq_info *info, int irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled,
bool post_sq); bool post_sq);
int irdma_uk_post_receive(struct irdma_qp_uk *qp,
enum irdma_status_code irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id, struct irdma_post_rq_info *info);
bool signaled, bool post_sq);
enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
struct irdma_post_rq_info *info);
void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp); void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp);
enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp, int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
struct irdma_post_sq_info *info, bool inv_stag, bool post_sq);
bool inv_stag, bool post_sq); int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp, bool post_sq);
struct irdma_post_sq_info *info, int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
bool post_sq); bool post_sq);
enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp, int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info, bool post_sq); struct irdma_post_sq_info *info,
enum irdma_status_code irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, bool post_sq);
struct irdma_post_sq_info *info,
bool post_sq);
struct irdma_wqe_uk_ops { struct irdma_wqe_uk_ops {
void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity); void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity);
...@@ -303,16 +298,16 @@ struct irdma_wqe_uk_ops { ...@@ -303,16 +298,16 @@ struct irdma_wqe_uk_ops {
struct irdma_bind_window *op_info); struct irdma_bind_window *op_info);
}; };
enum irdma_status_code irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
struct irdma_cq_poll_info *info); struct irdma_cq_poll_info *info);
void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq, void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
enum irdma_cmpl_notify cq_notify); enum irdma_cmpl_notify cq_notify);
void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size); void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size);
void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt); void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt);
void irdma_uk_cq_init(struct irdma_cq_uk *cq, void irdma_uk_cq_init(struct irdma_cq_uk *cq,
struct irdma_cq_uk_init_info *info); struct irdma_cq_uk_init_info *info);
enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp, int irdma_uk_qp_init(struct irdma_qp_uk *qp,
struct irdma_qp_uk_init_info *info); struct irdma_qp_uk_init_info *info);
struct irdma_sq_uk_wr_trk_info { struct irdma_sq_uk_wr_trk_info {
u64 wrid; u64 wrid;
u32 wr_len; u32 wr_len;
...@@ -413,16 +408,15 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx, ...@@ -413,16 +408,15 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
struct irdma_post_sq_info *info); struct irdma_post_sq_info *info);
__le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx); __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx);
void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq); void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq);
enum irdma_status_code irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq);
bool signaled, bool post_sq); int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
enum irdma_status_code irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta); int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
enum irdma_status_code irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge, void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
u32 inline_data, u8 *shift); u32 inline_data, u8 *shift);
enum irdma_status_code irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
u32 sq_size, u8 shift, u32 *wqdepth); u32 *wqdepth);
enum irdma_status_code irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
u32 rq_size, u8 shift, u32 *wqdepth); u32 *wqdepth);
void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta, void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
u32 wqe_idx, bool post_sq); u32 wqe_idx, bool post_sq);
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx); void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
......
This diff is collapsed.
...@@ -256,7 +256,7 @@ static void irdma_alloc_push_page(struct irdma_qp *iwqp) ...@@ -256,7 +256,7 @@ static void irdma_alloc_push_page(struct irdma_qp *iwqp)
struct cqp_cmds_info *cqp_info; struct cqp_cmds_info *cqp_info;
struct irdma_device *iwdev = iwqp->iwdev; struct irdma_device *iwdev = iwqp->iwdev;
struct irdma_sc_qp *qp = &iwqp->sc_qp; struct irdma_sc_qp *qp = &iwqp->sc_qp;
enum irdma_status_code status; int status;
cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
if (!cqp_request) if (!cqp_request)
...@@ -592,7 +592,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev, ...@@ -592,7 +592,7 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
u32 sqdepth, rqdepth; u32 sqdepth, rqdepth;
u8 sqshift, rqshift; u8 sqshift, rqshift;
u32 size; u32 size;
enum irdma_status_code status; int status;
struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info; struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs; struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
...@@ -668,7 +668,7 @@ static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp) ...@@ -668,7 +668,7 @@ static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp)
struct irdma_cqp_request *cqp_request; struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info; struct cqp_cmds_info *cqp_info;
struct irdma_create_qp_info *qp_info; struct irdma_create_qp_info *qp_info;
enum irdma_status_code status; int status;
cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true); cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, true);
if (!cqp_request) if (!cqp_request)
...@@ -806,7 +806,7 @@ static int irdma_create_qp(struct ib_qp *ibqp, ...@@ -806,7 +806,7 @@ static int irdma_create_qp(struct ib_qp *ibqp,
struct irdma_create_qp_req req; struct irdma_create_qp_req req;
struct irdma_create_qp_resp uresp = {}; struct irdma_create_qp_resp uresp = {};
u32 qp_num = 0; u32 qp_num = 0;
enum irdma_status_code ret; int ret;
int err_code; int err_code;
int sq_size; int sq_size;
int rq_size; int rq_size;
...@@ -1792,7 +1792,7 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries, ...@@ -1792,7 +1792,7 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries,
struct irdma_device *iwdev; struct irdma_device *iwdev;
struct irdma_pci_f *rf; struct irdma_pci_f *rf;
struct irdma_cq_buf *cq_buf = NULL; struct irdma_cq_buf *cq_buf = NULL;
enum irdma_status_code status = 0; int status = 0;
unsigned long flags; unsigned long flags;
int ret; int ret;
...@@ -1945,7 +1945,7 @@ static int irdma_create_cq(struct ib_cq *ibcq, ...@@ -1945,7 +1945,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
struct irdma_sc_cq *cq; struct irdma_sc_cq *cq;
struct irdma_sc_dev *dev = &rf->sc_dev; struct irdma_sc_dev *dev = &rf->sc_dev;
struct irdma_cq_init_info info = {}; struct irdma_cq_init_info info = {};
enum irdma_status_code status; int status;
struct irdma_cqp_request *cqp_request; struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info; struct cqp_cmds_info *cqp_info;
struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info; struct irdma_cq_uk_init_info *ukinfo = &info.cq_uk_init_info;
...@@ -2309,7 +2309,7 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr, ...@@ -2309,7 +2309,7 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
struct irdma_pble_info *pinfo; struct irdma_pble_info *pinfo;
u64 *pbl; u64 *pbl;
enum irdma_status_code status; int status;
enum irdma_pble_level level = PBLE_LEVEL_1; enum irdma_pble_level level = PBLE_LEVEL_1;
if (use_pbles) { if (use_pbles) {
...@@ -2434,7 +2434,7 @@ static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr) ...@@ -2434,7 +2434,7 @@ static int irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd); struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
struct irdma_cqp_request *cqp_request; struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info; struct cqp_cmds_info *cqp_info;
enum irdma_status_code status; int status;
cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
if (!cqp_request) if (!cqp_request)
...@@ -2533,7 +2533,7 @@ static int irdma_hw_alloc_stag(struct irdma_device *iwdev, ...@@ -2533,7 +2533,7 @@ static int irdma_hw_alloc_stag(struct irdma_device *iwdev,
{ {
struct irdma_allocate_stag_info *info; struct irdma_allocate_stag_info *info;
struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd); struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
enum irdma_status_code status; int status;
int err = 0; int err = 0;
struct irdma_cqp_request *cqp_request; struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info; struct cqp_cmds_info *cqp_info;
...@@ -2575,7 +2575,7 @@ static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type, ...@@ -2575,7 +2575,7 @@ static struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
struct irdma_pble_alloc *palloc; struct irdma_pble_alloc *palloc;
struct irdma_pbl *iwpbl; struct irdma_pbl *iwpbl;
struct irdma_mr *iwmr; struct irdma_mr *iwmr;
enum irdma_status_code status; int status;
u32 stag; u32 stag;
int err_code = -ENOMEM; int err_code = -ENOMEM;
...@@ -2672,7 +2672,7 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr, ...@@ -2672,7 +2672,7 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
struct irdma_reg_ns_stag_info *stag_info; struct irdma_reg_ns_stag_info *stag_info;
struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd); struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
enum irdma_status_code status; int status;
int err = 0; int err = 0;
struct irdma_cqp_request *cqp_request; struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info; struct cqp_cmds_info *cqp_info;
...@@ -2897,7 +2897,7 @@ struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access ...@@ -2897,7 +2897,7 @@ struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access
struct irdma_device *iwdev = to_iwdev(pd->device); struct irdma_device *iwdev = to_iwdev(pd->device);
struct irdma_pbl *iwpbl; struct irdma_pbl *iwpbl;
struct irdma_mr *iwmr; struct irdma_mr *iwmr;
enum irdma_status_code status; int status;
u32 stag; u32 stag;
int ret; int ret;
...@@ -3057,7 +3057,7 @@ static int irdma_post_send(struct ib_qp *ibqp, ...@@ -3057,7 +3057,7 @@ static int irdma_post_send(struct ib_qp *ibqp,
struct irdma_qp_uk *ukqp; struct irdma_qp_uk *ukqp;
struct irdma_sc_dev *dev; struct irdma_sc_dev *dev;
struct irdma_post_sq_info info; struct irdma_post_sq_info info;
enum irdma_status_code ret; int ret;
int err = 0; int err = 0;
unsigned long flags; unsigned long flags;
bool inv_stag; bool inv_stag;
...@@ -3131,7 +3131,7 @@ static int irdma_post_send(struct ib_qp *ibqp, ...@@ -3131,7 +3131,7 @@ static int irdma_post_send(struct ib_qp *ibqp,
} }
if (ret) { if (ret) {
if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED) if (ret == -ENOMEM)
err = -ENOMEM; err = -ENOMEM;
else else
err = -EINVAL; err = -EINVAL;
...@@ -3170,7 +3170,7 @@ static int irdma_post_send(struct ib_qp *ibqp, ...@@ -3170,7 +3170,7 @@ static int irdma_post_send(struct ib_qp *ibqp,
} }
if (ret) { if (ret) {
if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED) if (ret == -ENOMEM)
err = -ENOMEM; err = -ENOMEM;
else else
err = -EINVAL; err = -EINVAL;
...@@ -3193,7 +3193,7 @@ static int irdma_post_send(struct ib_qp *ibqp, ...@@ -3193,7 +3193,7 @@ static int irdma_post_send(struct ib_qp *ibqp,
ret = irdma_uk_rdma_read(ukqp, &info, inv_stag, false); ret = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
if (ret) { if (ret) {
if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED) if (ret == -ENOMEM)
err = -ENOMEM; err = -ENOMEM;
else else
err = -EINVAL; err = -EINVAL;
...@@ -3274,7 +3274,7 @@ static int irdma_post_recv(struct ib_qp *ibqp, ...@@ -3274,7 +3274,7 @@ static int irdma_post_recv(struct ib_qp *ibqp,
struct irdma_qp *iwqp; struct irdma_qp *iwqp;
struct irdma_qp_uk *ukqp; struct irdma_qp_uk *ukqp;
struct irdma_post_rq_info post_recv = {}; struct irdma_post_rq_info post_recv = {};
enum irdma_status_code ret = 0; int ret = 0;
unsigned long flags; unsigned long flags;
int err = 0; int err = 0;
bool reflush = false; bool reflush = false;
...@@ -3293,7 +3293,7 @@ static int irdma_post_recv(struct ib_qp *ibqp, ...@@ -3293,7 +3293,7 @@ static int irdma_post_recv(struct ib_qp *ibqp,
if (ret) { if (ret) {
ibdev_dbg(&iwqp->iwdev->ibdev, ibdev_dbg(&iwqp->iwdev->ibdev,
"VERBS: post_recv err %d\n", ret); "VERBS: post_recv err %d\n", ret);
if (ret == IRDMA_ERR_QP_TOOMANY_WRS_POSTED) if (ret == -ENOMEM)
err = -ENOMEM; err = -ENOMEM;
else else
err = -EINVAL; err = -EINVAL;
...@@ -3483,7 +3483,7 @@ static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc ...@@ -3483,7 +3483,7 @@ static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc
struct irdma_cq_buf *last_buf = NULL; struct irdma_cq_buf *last_buf = NULL;
struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe; struct irdma_cq_poll_info *cur_cqe = &iwcq->cur_cqe;
struct irdma_cq_buf *cq_buf; struct irdma_cq_buf *cq_buf;
enum irdma_status_code ret; int ret;
struct irdma_device *iwdev; struct irdma_device *iwdev;
struct irdma_cq_uk *ukcq; struct irdma_cq_uk *ukcq;
bool cq_new_cqe = false; bool cq_new_cqe = false;
...@@ -3503,10 +3503,10 @@ static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc ...@@ -3503,10 +3503,10 @@ static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc
cq_new_cqe = true; cq_new_cqe = true;
continue; continue;
} }
if (ret == IRDMA_ERR_Q_EMPTY) if (ret == -ENOENT)
break; break;
/* QP using the CQ is destroyed. Skip reporting this CQE */ /* QP using the CQ is destroyed. Skip reporting this CQE */
if (ret == IRDMA_ERR_Q_DESTROYED) { if (ret == -EFAULT) {
cq_new_cqe = true; cq_new_cqe = true;
continue; continue;
} }
...@@ -3528,10 +3528,10 @@ static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc ...@@ -3528,10 +3528,10 @@ static int __irdma_poll_cq(struct irdma_cq *iwcq, int num_entries, struct ib_wc
continue; continue;
} }
if (ret == IRDMA_ERR_Q_EMPTY) if (ret == -ENOENT)
break; break;
/* QP using the CQ is destroyed. Skip reporting this CQE */ /* QP using the CQ is destroyed. Skip reporting this CQE */
if (ret == IRDMA_ERR_Q_DESTROYED) { if (ret == -EFAULT) {
cq_new_cqe = true; cq_new_cqe = true;
continue; continue;
} }
...@@ -3859,7 +3859,7 @@ static int irdma_mcast_cqp_op(struct irdma_device *iwdev, ...@@ -3859,7 +3859,7 @@ static int irdma_mcast_cqp_op(struct irdma_device *iwdev,
{ {
struct cqp_cmds_info *cqp_info; struct cqp_cmds_info *cqp_info;
struct irdma_cqp_request *cqp_request; struct irdma_cqp_request *cqp_request;
enum irdma_status_code status; int status;
cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true); cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
if (!cqp_request) if (!cqp_request)
......
// SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
/* Copyright (c) 2017 - 2021 Intel Corporation */ /* Copyright (c) 2017 - 2021 Intel Corporation */
#include "osdep.h" #include "osdep.h"
#include "status.h"
#include "hmc.h" #include "hmc.h"
#include "defs.h" #include "defs.h"
#include "type.h" #include "type.h"
...@@ -87,8 +86,8 @@ static void irdma_free_node(struct irdma_sc_vsi *vsi, ...@@ -87,8 +86,8 @@ static void irdma_free_node(struct irdma_sc_vsi *vsi,
* @node: pointer to node * @node: pointer to node
* @cmd: add, remove or modify * @cmd: add, remove or modify
*/ */
static enum irdma_status_code static int irdma_ws_cqp_cmd(struct irdma_sc_vsi *vsi,
irdma_ws_cqp_cmd(struct irdma_sc_vsi *vsi, struct irdma_ws_node *node, u8 cmd) struct irdma_ws_node *node, u8 cmd)
{ {
struct irdma_ws_node_info node_info = {}; struct irdma_ws_node_info node_info = {};
...@@ -106,7 +105,7 @@ irdma_ws_cqp_cmd(struct irdma_sc_vsi *vsi, struct irdma_ws_node *node, u8 cmd) ...@@ -106,7 +105,7 @@ irdma_ws_cqp_cmd(struct irdma_sc_vsi *vsi, struct irdma_ws_node *node, u8 cmd)
node_info.enable = node->enable; node_info.enable = node->enable;
if (irdma_cqp_ws_node_cmd(vsi->dev, cmd, &node_info)) { if (irdma_cqp_ws_node_cmd(vsi->dev, cmd, &node_info)) {
ibdev_dbg(to_ibdev(vsi->dev), "WS: CQP WS CMD failed\n"); ibdev_dbg(to_ibdev(vsi->dev), "WS: CQP WS CMD failed\n");
return IRDMA_ERR_NO_MEMORY; return -ENOMEM;
} }
if (node->type_leaf && cmd == IRDMA_OP_WS_ADD_NODE) { if (node->type_leaf && cmd == IRDMA_OP_WS_ADD_NODE) {
...@@ -234,18 +233,18 @@ static void irdma_remove_leaf(struct irdma_sc_vsi *vsi, u8 user_pri) ...@@ -234,18 +233,18 @@ static void irdma_remove_leaf(struct irdma_sc_vsi *vsi, u8 user_pri)
* @vsi: vsi pointer * @vsi: vsi pointer
* @user_pri: user priority * @user_pri: user priority
*/ */
enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri) int irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
{ {
struct irdma_ws_node *ws_tree_root; struct irdma_ws_node *ws_tree_root;
struct irdma_ws_node *vsi_node; struct irdma_ws_node *vsi_node;
struct irdma_ws_node *tc_node; struct irdma_ws_node *tc_node;
u16 traffic_class; u16 traffic_class;
enum irdma_status_code ret = 0; int ret = 0;
int i; int i;
mutex_lock(&vsi->dev->ws_mutex); mutex_lock(&vsi->dev->ws_mutex);
if (vsi->tc_change_pending) { if (vsi->tc_change_pending) {
ret = IRDMA_ERR_NOT_READY; ret = -EBUSY;
goto exit; goto exit;
} }
...@@ -258,7 +257,7 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri) ...@@ -258,7 +257,7 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
ws_tree_root = irdma_alloc_node(vsi, user_pri, ws_tree_root = irdma_alloc_node(vsi, user_pri,
WS_NODE_TYPE_PARENT, NULL); WS_NODE_TYPE_PARENT, NULL);
if (!ws_tree_root) { if (!ws_tree_root) {
ret = IRDMA_ERR_NO_MEMORY; ret = -ENOMEM;
goto exit; goto exit;
} }
...@@ -283,7 +282,7 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri) ...@@ -283,7 +282,7 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
vsi_node = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_PARENT, vsi_node = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_PARENT,
ws_tree_root); ws_tree_root);
if (!vsi_node) { if (!vsi_node) {
ret = IRDMA_ERR_NO_MEMORY; ret = -ENOMEM;
goto vsi_add_err; goto vsi_add_err;
} }
...@@ -310,7 +309,7 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri) ...@@ -310,7 +309,7 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
tc_node = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_LEAF, tc_node = irdma_alloc_node(vsi, user_pri, WS_NODE_TYPE_LEAF,
vsi_node); vsi_node);
if (!tc_node) { if (!tc_node) {
ret = IRDMA_ERR_NO_MEMORY; ret = -ENOMEM;
goto leaf_add_err; goto leaf_add_err;
} }
......
...@@ -34,7 +34,7 @@ struct irdma_ws_node { ...@@ -34,7 +34,7 @@ struct irdma_ws_node {
}; };
struct irdma_sc_vsi; struct irdma_sc_vsi;
enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri); int irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri);
void irdma_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri); void irdma_ws_remove(struct irdma_sc_vsi *vsi, u8 user_pri);
void irdma_ws_reset(struct irdma_sc_vsi *vsi); void irdma_ws_reset(struct irdma_sc_vsi *vsi);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment