Commit b292fb80 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull more rdma updates from Doug Ledford:
 "This merge window was the first where Huawei had to try and coordinate
  their patches between their net driver and their new roce driver
  (similar to mlx4 and mlx5).

  They didn't do horribly, but there were some issues (and we knew that
  because they simply didn't know what to do in the beginning). As a
  result, I had a set of patches that depended on some patches that
  normally would have come to you via Dave's tree. Those patches have
  been on netdev@ for a while, so I got Dave to give me his approval to
  send them to you. As such, the other 29 patches I had behind them are
  also now ready to go.

  This catches the hns and hns-roce drivers up to current, and for
  future patches we are working with them to get them up to speed on how
  to do joint driver development so that they don't have these sorts of
  cross tree dependency issues again. BTW, Dave gave me permission to
  add his Acked-by: to the patches against the net tree, but I've had
  this branch through 0day (but not linux-next since it was off by
  itself) and I didn't want to rebase the series just to add Dave's ack
  for the 8 patches in the net area.

  Updates to the hns drivers:

   - Small patch set for hns net driver that the roce patches depend on

   - Various fixes to the hns-roce driver

   - Add connection manager support to the hns-roce driver"

* tag 'for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (36 commits)
  IB/hns: Fix for removal of redundant code
  IB/hns: Delete the redundant lines in hns_roce_v1_m_qp()
  IB/hns: Fix the bug when platform_get_resource() exec fail
  IB/hns: Update the rq head when modify qp state
  IB/hns: Cq has not been freed
  IB/hns: Validate mtu when modified qp
  IB/hns: Some items of qpc need to take user param
  IB/hns: The Ack timeout need a lower limit value
  IB/hns: Return bad wr while post send failed
  IB/hns: Fix bug of memory leakage for registering user mr
  IB/hns: Modify the init of iboe lock
  IB/hns: Optimize code of aeq and ceq interrupt handle and fix the bug of qpn
  IB/hns: Delete the sqp_start from the structure hns_roce_caps
  IB/hns: Fix bug of clear hem
  IB/hns: Remove unused parameter named qp_type
  IB/hns: Simplify function of pd alloc and qp alloc
  IB/hns: Fix bug of using uninit refcount and free
  IB/hns: Remove parameters of resize cq
  IB/hns: Remove unused parameters in some functions
  IB/hns: Add node_guid definition to the bindings document
  ...
parents 689f891c 1bdab400
...@@ -14,6 +14,7 @@ length of memory mapped region. ...@@ -14,6 +14,7 @@ length of memory mapped region.
representing a ethernet device. representing a ethernet device.
- dsaf-handle: phandle, specifies a reference to a node - dsaf-handle: phandle, specifies a reference to a node
representing a dsaf device. representing a dsaf device.
- node_guid: a number that uniquely identifies a device or component
- #address-cells: must be 2 - #address-cells: must be 2
- #size-cells: must be 2 - #size-cells: must be 2
Optional properties: Optional properties:
...@@ -32,6 +33,7 @@ Example: ...@@ -32,6 +33,7 @@ Example:
dma-coherent; dma-coherent;
eth-handle = <&eth2 &eth3 &eth4 &eth5 &eth6 &eth7>; eth-handle = <&eth2 &eth3 &eth4 &eth5 &eth6 &eth7>;
dsaf-handle = <&soc0_dsa>; dsaf-handle = <&soc0_dsa>;
node-guid = [00 9A CD 00 00 01 02 03];
#address-cells = <2>; #address-cells = <2>;
#size-cells = <2>; #size-cells = <2>;
interrupt-parent = <&mbigen_dsa>; interrupt-parent = <&mbigen_dsa>;
......
...@@ -83,8 +83,7 @@ static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev, ...@@ -83,8 +83,7 @@ static int hns_roce_sw2hw_cq(struct hns_roce_dev *dev,
static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
struct hns_roce_mtt *hr_mtt, struct hns_roce_mtt *hr_mtt,
struct hns_roce_uar *hr_uar, struct hns_roce_uar *hr_uar,
struct hns_roce_cq *hr_cq, int vector, struct hns_roce_cq *hr_cq, int vector)
int collapsed)
{ {
struct hns_roce_cmd_mailbox *mailbox = NULL; struct hns_roce_cmd_mailbox *mailbox = NULL;
struct hns_roce_cq_table *cq_table = NULL; struct hns_roce_cq_table *cq_table = NULL;
...@@ -153,6 +152,9 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, ...@@ -153,6 +152,9 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
hr_cq->cons_index = 0; hr_cq->cons_index = 0;
hr_cq->uar = hr_uar; hr_cq->uar = hr_uar;
atomic_set(&hr_cq->refcount, 1);
init_completion(&hr_cq->free);
return 0; return 0;
err_radix: err_radix:
...@@ -192,6 +194,11 @@ static void hns_roce_free_cq(struct hns_roce_dev *hr_dev, ...@@ -192,6 +194,11 @@ static void hns_roce_free_cq(struct hns_roce_dev *hr_dev,
/* Waiting interrupt process procedure carried out */ /* Waiting interrupt process procedure carried out */
synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq); synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
/* wait for all interrupt processed */
if (atomic_dec_and_test(&hr_cq->refcount))
complete(&hr_cq->free);
wait_for_completion(&hr_cq->free);
spin_lock_irq(&cq_table->lock); spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, hr_cq->cqn); radix_tree_delete(&cq_table->tree, hr_cq->cqn);
spin_unlock_irq(&cq_table->lock); spin_unlock_irq(&cq_table->lock);
...@@ -300,10 +307,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -300,10 +307,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
cq_entries = roundup_pow_of_two((unsigned int)cq_entries); cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
hr_cq->ib_cq.cqe = cq_entries - 1; hr_cq->ib_cq.cqe = cq_entries - 1;
mutex_init(&hr_cq->resize_mutex);
spin_lock_init(&hr_cq->lock); spin_lock_init(&hr_cq->lock);
hr_cq->hr_resize_buf = NULL;
hr_cq->resize_umem = NULL;
if (context) { if (context) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
...@@ -338,8 +342,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -338,8 +342,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
} }
/* Allocate cq index, fill cq_context */ /* Allocate cq index, fill cq_context */
ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, ret = hns_roce_cq_alloc(hr_dev, cq_entries, &hr_cq->hr_buf.hr_mtt, uar,
uar, hr_cq, vector, 0); hr_cq, vector);
if (ret) { if (ret) {
dev_err(dev, "Creat CQ .Failed to cq_alloc.\n"); dev_err(dev, "Creat CQ .Failed to cq_alloc.\n");
goto err_mtt; goto err_mtt;
...@@ -353,12 +357,15 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -353,12 +357,15 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
if (context) { if (context) {
if (ib_copy_to_udata(udata, &hr_cq->cqn, sizeof(u64))) { if (ib_copy_to_udata(udata, &hr_cq->cqn, sizeof(u64))) {
ret = -EFAULT; ret = -EFAULT;
goto err_mtt; goto err_cqc;
} }
} }
return &hr_cq->ib_cq; return &hr_cq->ib_cq;
err_cqc:
hns_roce_free_cq(hr_dev, hr_cq);
err_mtt: err_mtt:
hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt); hns_roce_mtt_cleanup(hr_dev, &hr_cq->hr_buf.hr_mtt);
if (context) if (context)
......
...@@ -62,7 +62,7 @@ ...@@ -62,7 +62,7 @@
#define HNS_ROCE_AEQE_OF_VEC_NUM 1 #define HNS_ROCE_AEQE_OF_VEC_NUM 1
/* 4G/4K = 1M */ /* 4G/4K = 1M */
#define HNS_ROCE_SL_SHIFT 29 #define HNS_ROCE_SL_SHIFT 28
#define HNS_ROCE_TCLASS_SHIFT 20 #define HNS_ROCE_TCLASS_SHIFT 20
#define HNS_ROCE_FLOW_LABLE_MASK 0xfffff #define HNS_ROCE_FLOW_LABLE_MASK 0xfffff
...@@ -74,7 +74,9 @@ ...@@ -74,7 +74,9 @@
#define MR_TYPE_DMA 0x03 #define MR_TYPE_DMA 0x03
#define PKEY_ID 0xffff #define PKEY_ID 0xffff
#define GUID_LEN 8
#define NODE_DESC_SIZE 64 #define NODE_DESC_SIZE 64
#define DB_REG_OFFSET 0x1000
#define SERV_TYPE_RC 0 #define SERV_TYPE_RC 0
#define SERV_TYPE_RD 1 #define SERV_TYPE_RD 1
...@@ -282,20 +284,11 @@ struct hns_roce_cq_buf { ...@@ -282,20 +284,11 @@ struct hns_roce_cq_buf {
struct hns_roce_mtt hr_mtt; struct hns_roce_mtt hr_mtt;
}; };
struct hns_roce_cq_resize {
struct hns_roce_cq_buf hr_buf;
int cqe;
};
struct hns_roce_cq { struct hns_roce_cq {
struct ib_cq ib_cq; struct ib_cq ib_cq;
struct hns_roce_cq_buf hr_buf; struct hns_roce_cq_buf hr_buf;
/* pointer to store information after resize*/
struct hns_roce_cq_resize *hr_resize_buf;
spinlock_t lock; spinlock_t lock;
struct mutex resize_mutex;
struct ib_umem *umem; struct ib_umem *umem;
struct ib_umem *resize_umem;
void (*comp)(struct hns_roce_cq *); void (*comp)(struct hns_roce_cq *);
void (*event)(struct hns_roce_cq *, enum hns_roce_event); void (*event)(struct hns_roce_cq *, enum hns_roce_event);
...@@ -408,6 +401,7 @@ struct hns_roce_qp { ...@@ -408,6 +401,7 @@ struct hns_roce_qp {
u32 buff_size; u32 buff_size;
struct mutex mutex; struct mutex mutex;
u8 port; u8 port;
u8 phy_port;
u8 sl; u8 sl;
u8 resp_depth; u8 resp_depth;
u8 state; u8 state;
...@@ -471,7 +465,6 @@ struct hns_roce_caps { ...@@ -471,7 +465,6 @@ struct hns_roce_caps {
u32 max_rq_desc_sz; /* 64 */ u32 max_rq_desc_sz; /* 64 */
int max_qp_init_rdma; int max_qp_init_rdma;
int max_qp_dest_rdma; int max_qp_dest_rdma;
int sqp_start;
int num_cqs; int num_cqs;
int max_cqes; int max_cqes;
int reserved_cqs; int reserved_cqs;
...@@ -512,6 +505,8 @@ struct hns_roce_hw { ...@@ -512,6 +505,8 @@ struct hns_roce_hw {
void (*write_cqc)(struct hns_roce_dev *hr_dev, void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
dma_addr_t dma_handle, int nent, u32 vector); dma_addr_t dma_handle, int nent, u32 vector);
int (*clear_hem)(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj);
int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
...@@ -533,7 +528,6 @@ struct hns_roce_dev { ...@@ -533,7 +528,6 @@ struct hns_roce_dev {
struct hns_roce_uar priv_uar; struct hns_roce_uar priv_uar;
const char *irq_names[HNS_ROCE_MAX_IRQ_NUM]; const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
spinlock_t sm_lock; spinlock_t sm_lock;
spinlock_t cq_db_lock;
spinlock_t bt_cmd_lock; spinlock_t bt_cmd_lock;
struct hns_roce_ib_iboe iboe; struct hns_roce_ib_iboe iboe;
......
...@@ -66,9 +66,6 @@ static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev, ...@@ -66,9 +66,6 @@ static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
{ {
struct device *dev = &hr_dev->pdev->dev; struct device *dev = &hr_dev->pdev->dev;
qpn = roce_get_field(aeqe->event.qp_event.qp,
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
dev_warn(dev, "Local Work Queue Catastrophic Error.\n"); dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
...@@ -96,13 +93,6 @@ static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev, ...@@ -96,13 +93,6 @@ static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
default: default:
break; break;
} }
hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp,
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
roce_get_field(aeqe->asyn,
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
} }
static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev, static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
...@@ -111,9 +101,6 @@ static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev, ...@@ -111,9 +101,6 @@ static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
{ {
struct device *dev = &hr_dev->pdev->dev; struct device *dev = &hr_dev->pdev->dev;
qpn = roce_get_field(aeqe->event.qp_event.qp,
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
dev_warn(dev, "Local Access Violation Work Queue Error.\n"); dev_warn(dev, "Local Access Violation Work Queue Error.\n");
switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
...@@ -141,13 +128,69 @@ static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev, ...@@ -141,13 +128,69 @@ static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
default: default:
break; break;
} }
}
static void hns_roce_qp_err_handle(struct hns_roce_dev *hr_dev,
struct hns_roce_aeqe *aeqe,
int event_type)
{
struct device *dev = &hr_dev->pdev->dev;
int phy_port;
int qpn;
qpn = roce_get_field(aeqe->event.qp_event.qp,
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
phy_port = roce_get_field(aeqe->event.qp_event.qp,
HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
if (qpn <= 1)
qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
switch (event_type) {
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
"QP %d, phy_port %d.\n", qpn, phy_port);
break;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn);
break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn);
break;
default:
break;
}
hns_roce_qp_event(hr_dev, qpn, event_type);
}
static void hns_roce_cq_err_handle(struct hns_roce_dev *hr_dev,
struct hns_roce_aeqe *aeqe,
int event_type)
{
struct device *dev = &hr_dev->pdev->dev;
u32 cqn;
cqn = le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
switch (event_type) {
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
dev_warn(dev, "CQ 0x%x access err.\n", cqn);
break;
case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
dev_warn(dev, "CQ 0x%x overflow\n", cqn);
break;
case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
break;
default:
break;
}
hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp, hns_roce_cq_event(hr_dev, cqn, event_type);
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
roce_get_field(aeqe->asyn,
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
} }
static void hns_roce_db_overflow_handle(struct hns_roce_dev *hr_dev, static void hns_roce_db_overflow_handle(struct hns_roce_dev *hr_dev,
...@@ -185,7 +228,7 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) ...@@ -185,7 +228,7 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
struct device *dev = &hr_dev->pdev->dev; struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_aeqe *aeqe; struct hns_roce_aeqe *aeqe;
int aeqes_found = 0; int aeqes_found = 0;
int qpn = 0; int event_type;
while ((aeqe = next_aeqe_sw(eq))) { while ((aeqe = next_aeqe_sw(eq))) {
dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe, dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe,
...@@ -195,9 +238,10 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) ...@@ -195,9 +238,10 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
/* Memory barrier */ /* Memory barrier */
rmb(); rmb();
switch (roce_get_field(aeqe->asyn, event_type = roce_get_field(aeqe->asyn,
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)) { HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
switch (event_type) {
case HNS_ROCE_EVENT_TYPE_PATH_MIG: case HNS_ROCE_EVENT_TYPE_PATH_MIG:
dev_warn(dev, "PATH MIG not supported\n"); dev_warn(dev, "PATH MIG not supported\n");
break; break;
...@@ -211,23 +255,9 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) ...@@ -211,23 +255,9 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
dev_warn(dev, "PATH MIG failed\n"); dev_warn(dev, "PATH MIG failed\n");
break; break;
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
dev_warn(dev, "qpn = 0x%lx\n",
roce_get_field(aeqe->event.qp_event.qp,
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S));
hns_roce_qp_event(hr_dev,
roce_get_field(aeqe->event.qp_event.qp,
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S),
roce_get_field(aeqe->asyn,
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
break;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn);
break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn); hns_roce_qp_err_handle(hr_dev, aeqe, event_type);
break; break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
...@@ -235,40 +265,9 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) ...@@ -235,40 +265,9 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
dev_warn(dev, "SRQ not support!\n"); dev_warn(dev, "SRQ not support!\n");
break; break;
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
dev_warn(dev, "CQ 0x%lx access err.\n",
roce_get_field(aeqe->event.cq_event.cq,
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
hns_roce_cq_event(hr_dev,
le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)),
roce_get_field(aeqe->asyn,
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
break;
case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
dev_warn(dev, "CQ 0x%lx overflow\n",
roce_get_field(aeqe->event.cq_event.cq,
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S));
hns_roce_cq_event(hr_dev,
le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)),
roce_get_field(aeqe->asyn,
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
break;
case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID: case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
dev_warn(dev, "CQ ID invalid.\n"); hns_roce_cq_err_handle(hr_dev, aeqe, event_type);
hns_roce_cq_event(hr_dev,
le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq,
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)),
roce_get_field(aeqe->asyn,
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
break; break;
case HNS_ROCE_EVENT_TYPE_PORT_CHANGE: case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
dev_warn(dev, "port change.\n"); dev_warn(dev, "port change.\n");
...@@ -290,11 +289,8 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) ...@@ -290,11 +289,8 @@ static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S)); HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
break; break;
default: default:
dev_warn(dev, "Unhandled event 0x%lx on EQ %d at index %u\n", dev_warn(dev, "Unhandled event %d on EQ %d at index %u\n",
roce_get_field(aeqe->asyn, event_type, eq->eqn, eq->cons_index);
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S),
eq->eqn, eq->cons_index);
break; break;
}; };
......
...@@ -107,6 +107,10 @@ struct hns_roce_aeqe { ...@@ -107,6 +107,10 @@ struct hns_roce_aeqe {
#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M \ #define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M \
(((1UL << 24) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S) (((1UL << 24) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S)
#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25
#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M \
(((1UL << 3) - 1) << HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S)
#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0 #define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0
#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M \ #define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M \
(((1UL << 16) - 1) << HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S) (((1UL << 16) - 1) << HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)
......
...@@ -36,14 +36,10 @@ ...@@ -36,14 +36,10 @@
#include "hns_roce_hem.h" #include "hns_roce_hem.h"
#include "hns_roce_common.h" #include "hns_roce_common.h"
#define HW_SYNC_TIMEOUT_MSECS 500
#define HW_SYNC_SLEEP_TIME_INTERVAL 20
#define HNS_ROCE_HEM_ALLOC_SIZE (1 << 17) #define HNS_ROCE_HEM_ALLOC_SIZE (1 << 17)
#define HNS_ROCE_TABLE_CHUNK_SIZE (1 << 17) #define HNS_ROCE_TABLE_CHUNK_SIZE (1 << 17)
#define DMA_ADDR_T_SHIFT 12 #define DMA_ADDR_T_SHIFT 12
#define BT_CMD_SYNC_SHIFT 31
#define BT_BA_SHIFT 32 #define BT_BA_SHIFT 32
struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages, struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
...@@ -213,74 +209,6 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev, ...@@ -213,74 +209,6 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
return ret; return ret;
} }
static int hns_roce_clear_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
unsigned long obj)
{
struct device *dev = &hr_dev->pdev->dev;
unsigned long end = 0;
unsigned long flags;
void __iomem *bt_cmd;
uint32_t bt_cmd_val[2];
u32 bt_cmd_h_val = 0;
int ret = 0;
switch (table->type) {
case HEM_TYPE_QPC:
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
break;
case HEM_TYPE_MTPT:
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
HEM_TYPE_MTPT);
break;
case HEM_TYPE_CQC:
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
break;
case HEM_TYPE_SRQC:
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
HEM_TYPE_SRQC);
break;
default:
return ret;
}
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, 0);
spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
while (1) {
if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
if (!(time_before(jiffies, end))) {
dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
flags);
return -EBUSY;
}
} else {
break;
}
msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
}
bt_cmd_val[0] = 0;
bt_cmd_val[1] = bt_cmd_h_val;
hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
return ret;
}
int hns_roce_table_get(struct hns_roce_dev *hr_dev, int hns_roce_table_get(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj) struct hns_roce_hem_table *table, unsigned long obj)
{ {
...@@ -333,7 +261,7 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev, ...@@ -333,7 +261,7 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev,
if (--table->hem[i]->refcount == 0) { if (--table->hem[i]->refcount == 0) {
/* Clear HEM base address */ /* Clear HEM base address */
if (hns_roce_clear_hem(hr_dev, table, obj)) if (hr_dev->hw->clear_hem(hr_dev, table, obj))
dev_warn(dev, "Clear HEM base address failed.\n"); dev_warn(dev, "Clear HEM base address failed.\n");
hns_roce_free_hem(hr_dev, table->hem[i]); hns_roce_free_hem(hr_dev, table->hem[i]);
...@@ -456,7 +384,7 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, ...@@ -456,7 +384,7 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
for (i = 0; i < table->num_hem; ++i) for (i = 0; i < table->num_hem; ++i)
if (table->hem[i]) { if (table->hem[i]) {
if (hns_roce_clear_hem(hr_dev, table, if (hr_dev->hw->clear_hem(hr_dev, table,
i * HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size)) i * HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size))
dev_err(dev, "Clear HEM base address failed.\n"); dev_err(dev, "Clear HEM base address failed.\n");
......
...@@ -34,6 +34,10 @@ ...@@ -34,6 +34,10 @@
#ifndef _HNS_ROCE_HEM_H #ifndef _HNS_ROCE_HEM_H
#define _HNS_ROCE_HEM_H #define _HNS_ROCE_HEM_H
#define HW_SYNC_TIMEOUT_MSECS 500
#define HW_SYNC_SLEEP_TIME_INTERVAL 20
#define BT_CMD_SYNC_SHIFT 31
enum { enum {
/* MAP HEM(Hardware Entry Memory) */ /* MAP HEM(Hardware Entry Memory) */
HEM_TYPE_QPC = 0, HEM_TYPE_QPC = 0,
......
...@@ -73,8 +73,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -73,8 +73,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
u32 ind = 0; u32 ind = 0;
int ret = 0; int ret = 0;
spin_lock_irqsave(&qp->sq.lock, flags); if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
ibqp->qp_type != IB_QPT_RC)) {
dev_err(dev, "un-supported QP type\n");
*bad_wr = NULL;
return -EOPNOTSUPP;
}
spin_lock_irqsave(&qp->sq.lock, flags);
ind = qp->sq_next_wqe; ind = qp->sq_next_wqe;
for (nreq = 0; wr; ++nreq, wr = wr->next) { for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
...@@ -162,7 +168,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -162,7 +168,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
roce_set_field(ud_sq_wqe->u32_36, roce_set_field(ud_sq_wqe->u32_36,
UD_SEND_WQE_U32_36_SGID_INDEX_M, UD_SEND_WQE_U32_36_SGID_INDEX_M,
UD_SEND_WQE_U32_36_SGID_INDEX_S, UD_SEND_WQE_U32_36_SGID_INDEX_S,
hns_get_gid_index(hr_dev, qp->port, hns_get_gid_index(hr_dev, qp->phy_port,
ah->av.gid_index)); ah->av.gid_index));
roce_set_field(ud_sq_wqe->u32_40, roce_set_field(ud_sq_wqe->u32_40,
...@@ -205,8 +211,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -205,8 +211,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
(wr->send_flags & IB_SEND_FENCE ? (wr->send_flags & IB_SEND_FENCE ?
(cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0); (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
wqe = (struct hns_roce_wqe_ctrl_seg *)wqe + wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
sizeof(struct hns_roce_wqe_ctrl_seg);
switch (wr->opcode) { switch (wr->opcode) {
case IB_WR_RDMA_READ: case IB_WR_RDMA_READ:
...@@ -235,8 +240,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -235,8 +240,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break; break;
} }
ctrl->flag |= cpu_to_le32(ps_opcode); ctrl->flag |= cpu_to_le32(ps_opcode);
wqe = (struct hns_roce_wqe_raddr_seg *)wqe + wqe += sizeof(struct hns_roce_wqe_raddr_seg);
sizeof(struct hns_roce_wqe_raddr_seg);
dseg = wqe; dseg = wqe;
if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
...@@ -253,8 +257,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -253,8 +257,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
memcpy(wqe, ((void *) (uintptr_t) memcpy(wqe, ((void *) (uintptr_t)
wr->sg_list[i].addr), wr->sg_list[i].addr),
wr->sg_list[i].length); wr->sg_list[i].length);
wqe = (struct hns_roce_wqe_raddr_seg *) wqe += wr->sg_list[i].length;
wqe + wr->sg_list[i].length;
} }
ctrl->flag |= HNS_ROCE_WQE_INLINE; ctrl->flag |= HNS_ROCE_WQE_INLINE;
} else { } else {
...@@ -266,9 +269,6 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -266,9 +269,6 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
HNS_ROCE_WQE_SGE_NUM_BIT); HNS_ROCE_WQE_SGE_NUM_BIT);
} }
ind++; ind++;
} else {
dev_dbg(dev, "unSupported QP type\n");
break;
} }
} }
...@@ -285,7 +285,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -285,7 +285,7 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
SQ_DOORBELL_U32_4_SQ_HEAD_S, SQ_DOORBELL_U32_4_SQ_HEAD_S,
(qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1))); (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M, roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
SQ_DOORBELL_U32_4_PORT_S, qp->port); SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M, roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn); SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1); roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
...@@ -365,14 +365,14 @@ int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -365,14 +365,14 @@ int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
/* SW update GSI rq header */ /* SW update GSI rq header */
reg_val = roce_read(to_hr_dev(ibqp->device), reg_val = roce_read(to_hr_dev(ibqp->device),
ROCEE_QP1C_CFG3_0_REG + ROCEE_QP1C_CFG3_0_REG +
QP1C_CFGN_OFFSET * hr_qp->port); QP1C_CFGN_OFFSET * hr_qp->phy_port);
roce_set_field(reg_val, roce_set_field(reg_val,
ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M, ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S, ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
hr_qp->rq.head); hr_qp->rq.head);
roce_write(to_hr_dev(ibqp->device), roce_write(to_hr_dev(ibqp->device),
ROCEE_QP1C_CFG3_0_REG + ROCEE_QP1C_CFG3_0_REG +
QP1C_CFGN_OFFSET * hr_qp->port, reg_val); QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
} else { } else {
rq_db.u32_4 = 0; rq_db.u32_4 = 0;
rq_db.u32_8 = 0; rq_db.u32_8 = 0;
...@@ -789,6 +789,66 @@ static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag) ...@@ -789,6 +789,66 @@ static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
} }
} }
static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
{
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_v1_priv *priv;
int ret;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
GFP_KERNEL);
if (!priv->bt_table.qpc_buf.buf)
return -ENOMEM;
priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
GFP_KERNEL);
if (!priv->bt_table.mtpt_buf.buf) {
ret = -ENOMEM;
goto err_failed_alloc_mtpt_buf;
}
priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
GFP_KERNEL);
if (!priv->bt_table.cqc_buf.buf) {
ret = -ENOMEM;
goto err_failed_alloc_cqc_buf;
}
return 0;
err_failed_alloc_cqc_buf:
dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
err_failed_alloc_mtpt_buf:
dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
return ret;
}
static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
{
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_v1_priv *priv;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
}
/** /**
* hns_roce_v1_reset - reset RoCE * hns_roce_v1_reset - reset RoCE
* @hr_dev: RoCE device struct pointer * @hr_dev: RoCE device struct pointer
...@@ -879,7 +939,6 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev) ...@@ -879,7 +939,6 @@ void hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE; caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE;
caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE; caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE;
caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT; caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
caps->sqp_start = 0;
caps->reserved_lkey = 0; caps->reserved_lkey = 0;
caps->reserved_pds = 0; caps->reserved_pds = 0;
caps->reserved_mrws = 1; caps->reserved_mrws = 1;
...@@ -944,8 +1003,18 @@ int hns_roce_v1_init(struct hns_roce_dev *hr_dev) ...@@ -944,8 +1003,18 @@ int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP); hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
ret = hns_roce_bt_init(hr_dev);
if (ret) {
dev_err(dev, "bt init failed!\n");
goto error_failed_bt_init;
}
return 0; return 0;
error_failed_bt_init:
hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
hns_roce_raq_free(hr_dev);
error_failed_raq_init: error_failed_raq_init:
hns_roce_db_free(hr_dev); hns_roce_db_free(hr_dev);
return ret; return ret;
...@@ -953,6 +1022,7 @@ int hns_roce_v1_init(struct hns_roce_dev *hr_dev) ...@@ -953,6 +1022,7 @@ int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
void hns_roce_v1_exit(struct hns_roce_dev *hr_dev) void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
{ {
hns_roce_bt_free(hr_dev);
hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN); hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
hns_roce_raq_free(hr_dev); hns_roce_raq_free(hr_dev);
hns_roce_db_free(hr_dev); hns_roce_db_free(hr_dev);
...@@ -1192,9 +1262,7 @@ static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq) ...@@ -1192,9 +1262,7 @@ static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
return get_sw_cqe(hr_cq, hr_cq->cons_index); return get_sw_cqe(hr_cq, hr_cq->cons_index);
} }
void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index, void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
spinlock_t *doorbell_lock)
{ {
u32 doorbell[2]; u32 doorbell[2];
...@@ -1254,8 +1322,7 @@ static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, ...@@ -1254,8 +1322,7 @@ static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
*/ */
wmb(); wmb();
hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index, hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
&to_hr_dev(hr_cq->ib_cq.device)->cq_db_lock);
} }
} }
...@@ -1485,7 +1552,8 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq, ...@@ -1485,7 +1552,8 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
/* SQ conrespond to CQE */ /* SQ conrespond to CQE */
sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4, sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4,
CQE_BYTE_4_WQE_INDEX_M, CQE_BYTE_4_WQE_INDEX_M,
CQE_BYTE_4_WQE_INDEX_S)); CQE_BYTE_4_WQE_INDEX_S)&
((*cur_qp)->sq.wqe_cnt-1));
switch (sq_wqe->flag & HNS_ROCE_WQE_OPCODE_MASK) { switch (sq_wqe->flag & HNS_ROCE_WQE_OPCODE_MASK) {
case HNS_ROCE_WQE_OPCODE_SEND: case HNS_ROCE_WQE_OPCODE_SEND:
wc->opcode = IB_WC_SEND; wc->opcode = IB_WC_SEND;
...@@ -1591,10 +1659,8 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) ...@@ -1591,10 +1659,8 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
break; break;
} }
if (npolled) { if (npolled)
hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index, hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
&to_hr_dev(ibcq->device)->cq_db_lock);
}
spin_unlock_irqrestore(&hr_cq->lock, flags); spin_unlock_irqrestore(&hr_cq->lock, flags);
...@@ -1604,6 +1670,74 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) ...@@ -1604,6 +1670,74 @@ int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
return ret; return ret;
} }
int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj)
{
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_v1_priv *priv;
unsigned long end = 0, flags = 0;
uint32_t bt_cmd_val[2] = {0};
void __iomem *bt_cmd;
u64 bt_ba = 0;
priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
switch (table->type) {
case HEM_TYPE_QPC:
roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
bt_ba = priv->bt_table.qpc_buf.map >> 12;
break;
case HEM_TYPE_MTPT:
roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_MTPT);
bt_ba = priv->bt_table.mtpt_buf.map >> 12;
break;
case HEM_TYPE_CQC:
roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
bt_ba = priv->bt_table.cqc_buf.map >> 12;
break;
case HEM_TYPE_SRQC:
dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
return -EINVAL;
default:
return 0;
}
roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
while (1) {
if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
if (!(time_before(jiffies, end))) {
dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
flags);
return -EBUSY;
}
} else {
break;
}
msleep(HW_SYNC_SLEEP_TIME_INTERVAL);
}
bt_cmd_val[0] = (uint32_t)bt_ba;
roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
return 0;
}
static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev, static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
struct hns_roce_mtt *mtt, struct hns_roce_mtt *mtt,
enum hns_roce_qp_state cur_state, enum hns_roce_qp_state cur_state,
...@@ -1733,13 +1867,10 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, ...@@ -1733,13 +1867,10 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M, roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head); QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M, roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
QP1C_BYTES_16_PORT_NUM_S, hr_qp->port); QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
roce_set_bit(context->qp1c_bytes_16, roce_set_bit(context->qp1c_bytes_16,
QP1C_BYTES_16_SIGNALING_TYPE_S, QP1C_BYTES_16_SIGNALING_TYPE_S,
hr_qp->sq_signal_bits); hr_qp->sq_signal_bits);
roce_set_bit(context->qp1c_bytes_16,
QP1C_BYTES_16_LOCAL_ENABLE_E2E_CREDIT_S,
hr_qp->sq_signal_bits);
roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S, roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
1); 1);
roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S, roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
...@@ -1784,7 +1915,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, ...@@ -1784,7 +1915,7 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
/* Copy context to QP1C register */ /* Copy context to QP1C register */
addr = (u32 *)(hr_dev->reg_base + ROCEE_QP1C_CFG0_0_REG + addr = (u32 *)(hr_dev->reg_base + ROCEE_QP1C_CFG0_0_REG +
hr_qp->port * sizeof(*context)); hr_qp->phy_port * sizeof(*context));
writel(context->qp1c_bytes_4, addr); writel(context->qp1c_bytes_4, addr);
writel(context->sq_rq_bt_l, addr + 1); writel(context->sq_rq_bt_l, addr + 1);
...@@ -1795,15 +1926,16 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, ...@@ -1795,15 +1926,16 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
writel(context->qp1c_bytes_28, addr + 6); writel(context->qp1c_bytes_28, addr + 6);
writel(context->qp1c_bytes_32, addr + 7); writel(context->qp1c_bytes_32, addr + 7);
writel(context->cur_sq_wqe_ba_l, addr + 8); writel(context->cur_sq_wqe_ba_l, addr + 8);
writel(context->qp1c_bytes_40, addr + 9);
} }
/* Modify QP1C status */ /* Modify QP1C status */
reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG + reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
hr_qp->port * sizeof(*context)); hr_qp->phy_port * sizeof(*context));
roce_set_field(reg_val, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M, roce_set_field(reg_val, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state); ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG + roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
hr_qp->port * sizeof(*context), reg_val); hr_qp->phy_port * sizeof(*context), reg_val);
hr_qp->state = new_state; hr_qp->state = new_state;
if (new_state == IB_QPS_RESET) { if (new_state == IB_QPS_RESET) {
...@@ -1836,12 +1968,10 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, ...@@ -1836,12 +1968,10 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct device *dev = &hr_dev->pdev->dev; struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_qp_context *context; struct hns_roce_qp_context *context;
struct hns_roce_rq_db rq_db;
dma_addr_t dma_handle_2 = 0; dma_addr_t dma_handle_2 = 0;
dma_addr_t dma_handle = 0; dma_addr_t dma_handle = 0;
uint32_t doorbell[2] = {0}; uint32_t doorbell[2] = {0};
int rq_pa_start = 0; int rq_pa_start = 0;
u32 reg_val = 0;
u64 *mtts_2 = NULL; u64 *mtts_2 = NULL;
int ret = -EINVAL; int ret = -EINVAL;
u64 *mtts = NULL; u64 *mtts = NULL;
...@@ -2119,7 +2249,8 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, ...@@ -2119,7 +2249,8 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qpc_bytes_68, roce_set_field(context->qpc_bytes_68,
QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M, QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S, 0); QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
hr_qp->rq.head);
roce_set_field(context->qpc_bytes_68, roce_set_field(context->qpc_bytes_68,
QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M, QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0); QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
...@@ -2186,7 +2317,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, ...@@ -2186,7 +2317,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qpc_bytes_156, roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M, QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S, QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
hr_qp->port); hr_qp->phy_port);
roce_set_field(context->qpc_bytes_156, roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_SL_M, QP_CONTEXT_QPC_BYTES_156_SL_M,
QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl); QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl);
...@@ -2257,20 +2388,17 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, ...@@ -2257,20 +2388,17 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_bit(context->qpc_bytes_140, roce_set_bit(context->qpc_bytes_140,
QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0); QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
roce_set_field(context->qpc_bytes_144,
QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
QP_CONTEXT_QPC_BYTES_144_QP_STATE_S,
attr->qp_state);
roce_set_field(context->qpc_bytes_148, roce_set_field(context->qpc_bytes_148,
QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M, QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0); QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
roce_set_field(context->qpc_bytes_148, roce_set_field(context->qpc_bytes_148,
QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M, QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S, 0); QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
attr->retry_cnt);
roce_set_field(context->qpc_bytes_148, roce_set_field(context->qpc_bytes_148,
QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M, QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S, 0); QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
attr->rnr_retry);
roce_set_field(context->qpc_bytes_148, roce_set_field(context->qpc_bytes_148,
QP_CONTEXT_QPC_BYTES_148_LSN_M, QP_CONTEXT_QPC_BYTES_148_LSN_M,
QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100); QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
...@@ -2281,10 +2409,19 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, ...@@ -2281,10 +2409,19 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M, QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S, QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
attr->retry_cnt); attr->retry_cnt);
roce_set_field(context->qpc_bytes_156, if (attr->timeout < 0x12) {
QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, attr->timeout);
attr->timeout); roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
0x12);
} else {
roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
attr->timeout);
}
roce_set_field(context->qpc_bytes_156, roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M, QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S, QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
...@@ -2292,7 +2429,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, ...@@ -2292,7 +2429,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
roce_set_field(context->qpc_bytes_156, roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M, QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S, QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
hr_qp->port); hr_qp->phy_port);
roce_set_field(context->qpc_bytes_156, roce_set_field(context->qpc_bytes_156,
QP_CONTEXT_QPC_BYTES_156_SL_M, QP_CONTEXT_QPC_BYTES_156_SL_M,
QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl); QP_CONTEXT_QPC_BYTES_156_SL_S, attr->ah_attr.sl);
...@@ -2357,21 +2494,15 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, ...@@ -2357,21 +2494,15 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M, QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S, QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
0); 0);
} else if ((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) || } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
(cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) || (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) || (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
(cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) || (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) || (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
(cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) || (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) || (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
(cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) { (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) {
roce_set_field(context->qpc_bytes_144, dev_err(dev, "not support this status migration\n");
QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
QP_CONTEXT_QPC_BYTES_144_QP_STATE_S,
attr->qp_state);
} else {
dev_err(dev, "not support this modify\n");
goto out; goto out;
} }
...@@ -2397,43 +2528,32 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, ...@@ -2397,43 +2528,32 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
/* Memory barrier */ /* Memory barrier */
wmb(); wmb();
if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
/* SW update GSI rq header */
reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG3_0_REG +
QP1C_CFGN_OFFSET * hr_qp->port);
roce_set_field(reg_val,
ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
hr_qp->rq.head);
roce_write(hr_dev, ROCEE_QP1C_CFG3_0_REG +
QP1C_CFGN_OFFSET * hr_qp->port, reg_val);
} else {
rq_db.u32_4 = 0;
rq_db.u32_8 = 0;
roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
RQ_DOORBELL_U32_4_RQ_HEAD_S,
hr_qp->rq.head);
roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
RQ_DOORBELL_U32_8_CMD_S, 1);
roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
1);
doorbell[0] = rq_db.u32_4; roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
doorbell[1] = rq_db.u32_8; RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l); RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
RQ_DOORBELL_U32_8_CMD_S, 1);
roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
if (ibqp->uobject) {
hr_qp->rq.db_reg_l = hr_dev->reg_base +
ROCEE_DB_OTHERS_L_0_REG +
DB_REG_OFFSET * hr_dev->priv_uar.index;
} }
hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
} }
hr_qp->state = new_state; hr_qp->state = new_state;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
hr_qp->resp_depth = attr->max_dest_rd_atomic; hr_qp->resp_depth = attr->max_dest_rd_atomic;
if (attr_mask & IB_QP_PORT) if (attr_mask & IB_QP_PORT) {
hr_qp->port = (attr->port_num - 1); hr_qp->port = attr->port_num - 1;
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
}
if (new_state == IB_QPS_RESET && !ibqp->uobject) { if (new_state == IB_QPS_RESET && !ibqp->uobject) {
hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
...@@ -2789,6 +2909,7 @@ struct hns_roce_hw hns_roce_hw_v1 = { ...@@ -2789,6 +2909,7 @@ struct hns_roce_hw hns_roce_hw_v1 = {
.set_mtu = hns_roce_v1_set_mtu, .set_mtu = hns_roce_v1_set_mtu,
.write_mtpt = hns_roce_v1_write_mtpt, .write_mtpt = hns_roce_v1_write_mtpt,
.write_cqc = hns_roce_v1_write_cqc, .write_cqc = hns_roce_v1_write_cqc,
.clear_hem = hns_roce_v1_clear_hem,
.modify_qp = hns_roce_v1_modify_qp, .modify_qp = hns_roce_v1_modify_qp,
.query_qp = hns_roce_v1_query_qp, .query_qp = hns_roce_v1_query_qp,
.destroy_qp = hns_roce_v1_destroy_qp, .destroy_qp = hns_roce_v1_destroy_qp,
......
...@@ -102,6 +102,8 @@ ...@@ -102,6 +102,8 @@
#define HNS_ROCE_V1_EXT_ODB_ALFUL \ #define HNS_ROCE_V1_EXT_ODB_ALFUL \
(HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD) (HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
#define HNS_ROCE_BT_RSV_BUF_SIZE (1 << 17)
#define HNS_ROCE_ODB_POLL_MODE 0 #define HNS_ROCE_ODB_POLL_MODE 0
#define HNS_ROCE_SDB_NORMAL_MODE 0 #define HNS_ROCE_SDB_NORMAL_MODE 0
...@@ -971,9 +973,16 @@ struct hns_roce_db_table { ...@@ -971,9 +973,16 @@ struct hns_roce_db_table {
struct hns_roce_ext_db *ext_db; struct hns_roce_ext_db *ext_db;
}; };
struct hns_roce_bt_table {
struct hns_roce_buf_list qpc_buf;
struct hns_roce_buf_list mtpt_buf;
struct hns_roce_buf_list cqc_buf;
};
struct hns_roce_v1_priv { struct hns_roce_v1_priv {
struct hns_roce_db_table db_table; struct hns_roce_db_table db_table;
struct hns_roce_raq_table raq_table; struct hns_roce_raq_table raq_table;
struct hns_roce_bt_table bt_table;
}; };
int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset); int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
......
...@@ -355,8 +355,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev, ...@@ -355,8 +355,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_qp = hr_dev->caps.num_qps; props->max_qp = hr_dev->caps.num_qps;
props->max_qp_wr = hr_dev->caps.max_wqes; props->max_qp_wr = hr_dev->caps.max_wqes;
props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT | props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_RC_RNR_NAK_GEN;
IB_DEVICE_LOCAL_DMA_LKEY;
props->max_sge = hr_dev->caps.max_sq_sg; props->max_sge = hr_dev->caps.max_sq_sg;
props->max_sge_rd = 1; props->max_sge_rd = 1;
props->max_cq = hr_dev->caps.num_cqs; props->max_cq = hr_dev->caps.num_cqs;
...@@ -372,6 +371,25 @@ static int hns_roce_query_device(struct ib_device *ib_dev, ...@@ -372,6 +371,25 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
return 0; return 0;
} }
static struct net_device *hns_roce_get_netdev(struct ib_device *ib_dev,
u8 port_num)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct net_device *ndev;
if (port_num < 1 || port_num > hr_dev->caps.num_ports)
return NULL;
rcu_read_lock();
ndev = hr_dev->iboe.netdevs[port_num - 1];
if (ndev)
dev_hold(ndev);
rcu_read_unlock();
return ndev;
}
static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
struct ib_port_attr *props) struct ib_port_attr *props)
{ {
...@@ -584,6 +602,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ...@@ -584,6 +602,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
struct device *dev = &hr_dev->pdev->dev; struct device *dev = &hr_dev->pdev->dev;
iboe = &hr_dev->iboe; iboe = &hr_dev->iboe;
spin_lock_init(&iboe->lock);
ib_dev = &hr_dev->ib_dev; ib_dev = &hr_dev->ib_dev;
strlcpy(ib_dev->name, "hisi_%d", IB_DEVICE_NAME_MAX); strlcpy(ib_dev->name, "hisi_%d", IB_DEVICE_NAME_MAX);
...@@ -618,6 +637,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ...@@ -618,6 +637,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->query_port = hns_roce_query_port; ib_dev->query_port = hns_roce_query_port;
ib_dev->modify_port = hns_roce_modify_port; ib_dev->modify_port = hns_roce_modify_port;
ib_dev->get_link_layer = hns_roce_get_link_layer; ib_dev->get_link_layer = hns_roce_get_link_layer;
ib_dev->get_netdev = hns_roce_get_netdev;
ib_dev->query_gid = hns_roce_query_gid; ib_dev->query_gid = hns_roce_query_gid;
ib_dev->query_pkey = hns_roce_query_pkey; ib_dev->query_pkey = hns_roce_query_pkey;
ib_dev->alloc_ucontext = hns_roce_alloc_ucontext; ib_dev->alloc_ucontext = hns_roce_alloc_ucontext;
...@@ -667,8 +687,6 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ...@@ -667,8 +687,6 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
goto error_failed_setup_mtu_gids; goto error_failed_setup_mtu_gids;
} }
spin_lock_init(&iboe->lock);
iboe->nb.notifier_call = hns_roce_netdev_event; iboe->nb.notifier_call = hns_roce_netdev_event;
ret = register_netdevice_notifier(&iboe->nb); ret = register_netdevice_notifier(&iboe->nb);
if (ret) { if (ret) {
...@@ -777,6 +795,15 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev) ...@@ -777,6 +795,15 @@ static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
if (IS_ERR(hr_dev->reg_base)) if (IS_ERR(hr_dev->reg_base))
return PTR_ERR(hr_dev->reg_base); return PTR_ERR(hr_dev->reg_base);
/* read the node_guid of IB device from the DT or ACPI */
ret = device_property_read_u8_array(dev, "node-guid",
(u8 *)&hr_dev->ib_dev.node_guid,
GUID_LEN);
if (ret) {
dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
return ret;
}
/* get the RoCE associated ethernet ports or netdevices */ /* get the RoCE associated ethernet ports or netdevices */
for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) { for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
if (dev_of_node(dev)) { if (dev_of_node(dev)) {
...@@ -923,7 +950,6 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) ...@@ -923,7 +950,6 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
struct device *dev = &hr_dev->pdev->dev; struct device *dev = &hr_dev->pdev->dev;
spin_lock_init(&hr_dev->sm_lock); spin_lock_init(&hr_dev->sm_lock);
spin_lock_init(&hr_dev->cq_db_lock);
spin_lock_init(&hr_dev->bt_cmd_lock); spin_lock_init(&hr_dev->bt_cmd_lock);
ret = hns_roce_init_uar_table(hr_dev); ret = hns_roce_init_uar_table(hr_dev);
......
...@@ -564,11 +564,14 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -564,11 +564,14 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (mr->umem->page_size != HNS_ROCE_HEM_PAGE_SIZE) { if (mr->umem->page_size != HNS_ROCE_HEM_PAGE_SIZE) {
dev_err(dev, "Just support 4K page size but is 0x%x now!\n", dev_err(dev, "Just support 4K page size but is 0x%x now!\n",
mr->umem->page_size); mr->umem->page_size);
ret = -EINVAL;
goto err_umem;
} }
if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) { if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n", dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n",
length); length);
ret = -EINVAL;
goto err_umem; goto err_umem;
} }
......
...@@ -35,19 +35,7 @@ ...@@ -35,19 +35,7 @@
static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn) static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
{ {
struct device *dev = &hr_dev->pdev->dev; return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn);
unsigned long pd_number;
int ret = 0;
ret = hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, &pd_number);
if (ret == -1) {
dev_err(dev, "alloc pdn from pdbitmap failed\n");
return -ENOMEM;
}
*pdn = pd_number;
return 0;
} }
static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn) static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
...@@ -117,9 +105,15 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) ...@@ -117,9 +105,15 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
if (ret == -1) if (ret == -1)
return -ENOMEM; return -ENOMEM;
uar->index = (uar->index - 1) % hr_dev->caps.phy_num_uars + 1; if (uar->index > 0)
uar->index = (uar->index - 1) %
(hr_dev->caps.phy_num_uars - 1) + 1;
res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0); res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
return -EINVAL;
}
uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index; uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
return 0; return 0;
......
...@@ -32,14 +32,14 @@ ...@@ -32,14 +32,14 @@
*/ */
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_umem.h> #include <rdma/ib_umem.h>
#include "hns_roce_common.h" #include "hns_roce_common.h"
#include "hns_roce_device.h" #include "hns_roce_device.h"
#include "hns_roce_hem.h" #include "hns_roce_hem.h"
#include "hns_roce_user.h" #include "hns_roce_user.h"
#define DB_REG_OFFSET 0x1000 #define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
#define SQP_NUM 12
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
{ {
...@@ -113,16 +113,8 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt, ...@@ -113,16 +113,8 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
int align, unsigned long *base) int align, unsigned long *base)
{ {
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
int ret = 0;
unsigned long qpn;
ret = hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, &qpn);
if (ret == -1)
return -ENOMEM;
*base = qpn;
return 0; return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
} }
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state) enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
...@@ -255,7 +247,7 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn, ...@@ -255,7 +247,7 @@ void hns_roce_release_range_qp(struct hns_roce_dev *hr_dev, int base_qpn,
{ {
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
if (base_qpn < (hr_dev->caps.sqp_start + 2 * hr_dev->caps.num_ports)) if (base_qpn < SQP_NUM)
return; return;
hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt); hns_roce_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
...@@ -345,12 +337,10 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev, ...@@ -345,12 +337,10 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
struct ib_qp_cap *cap, struct ib_qp_cap *cap,
enum ib_qp_type type,
struct hns_roce_qp *hr_qp) struct hns_roce_qp *hr_qp)
{ {
struct device *dev = &hr_dev->pdev->dev; struct device *dev = &hr_dev->pdev->dev;
u32 max_cnt; u32 max_cnt;
(void)type;
if (cap->max_send_wr > hr_dev->caps.max_wqes || if (cap->max_send_wr > hr_dev->caps.max_wqes ||
cap->max_send_sge > hr_dev->caps.max_sq_sg || cap->max_send_sge > hr_dev->caps.max_sq_sg ||
...@@ -476,7 +466,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -476,7 +466,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
/* Set SQ size */ /* Set SQ size */
ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap, ret = hns_roce_set_kernel_sq_size(hr_dev, &init_attr->cap,
init_attr->qp_type, hr_qp); hr_qp);
if (ret) { if (ret) {
dev_err(dev, "hns_roce_set_kernel_sq_size error!\n"); dev_err(dev, "hns_roce_set_kernel_sq_size error!\n");
goto err_out; goto err_out;
...@@ -617,21 +607,19 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd, ...@@ -617,21 +607,19 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
hr_qp = &hr_sqp->hr_qp; hr_qp = &hr_sqp->hr_qp;
hr_qp->port = init_attr->port_num - 1;
hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
hr_dev->iboe.phy_port[hr_qp->port];
ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata,
hr_dev->caps.sqp_start + hr_qp->ibqp.qp_num, hr_qp);
hr_dev->caps.num_ports +
init_attr->port_num - 1, hr_qp);
if (ret) { if (ret) {
dev_err(dev, "Create GSI QP failed!\n"); dev_err(dev, "Create GSI QP failed!\n");
kfree(hr_sqp); kfree(hr_sqp);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
hr_qp->port = (init_attr->port_num - 1);
hr_qp->ibqp.qp_num = hr_dev->caps.sqp_start +
hr_dev->caps.num_ports +
init_attr->port_num - 1;
break; break;
} }
default:{ default:{
...@@ -670,6 +658,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -670,6 +658,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct device *dev = &hr_dev->pdev->dev; struct device *dev = &hr_dev->pdev->dev;
int ret = -EINVAL; int ret = -EINVAL;
int p; int p;
enum ib_mtu active_mtu;
mutex_lock(&hr_qp->mutex); mutex_lock(&hr_qp->mutex);
...@@ -700,6 +689,19 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -700,6 +689,19 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
} }
} }
if (attr_mask & IB_QP_PATH_MTU) {
p = attr_mask & IB_QP_PORT ? (attr->port_num - 1) : hr_qp->port;
active_mtu = iboe_get_mtu(hr_dev->iboe.netdevs[p]->mtu);
if (attr->path_mtu > IB_MTU_2048 ||
attr->path_mtu < IB_MTU_256 ||
attr->path_mtu > active_mtu) {
dev_err(dev, "attr path_mtu(%d)invalid while modify qp",
attr->path_mtu);
goto out;
}
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) { attr->max_rd_atomic > hr_dev->caps.max_qp_init_rdma) {
dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n", dev_err(dev, "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
...@@ -782,29 +784,11 @@ static void *get_wqe(struct hns_roce_qp *hr_qp, int offset) ...@@ -782,29 +784,11 @@ static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n) void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n)
{ {
struct ib_qp *ibqp = &hr_qp->ibqp;
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
if ((n < 0) || (n > hr_qp->rq.wqe_cnt)) {
dev_err(&hr_dev->pdev->dev, "rq wqe index:%d,rq wqe cnt:%d\r\n",
n, hr_qp->rq.wqe_cnt);
return NULL;
}
return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift)); return get_wqe(hr_qp, hr_qp->rq.offset + (n << hr_qp->rq.wqe_shift));
} }
void *get_send_wqe(struct hns_roce_qp *hr_qp, int n) void *get_send_wqe(struct hns_roce_qp *hr_qp, int n)
{ {
struct ib_qp *ibqp = &hr_qp->ibqp;
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
if ((n < 0) || (n > hr_qp->sq.wqe_cnt)) {
dev_err(&hr_dev->pdev->dev, "sq wqe index:%d,sq wqe cnt:%d\r\n",
n, hr_qp->sq.wqe_cnt);
return NULL;
}
return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift));
} }
...@@ -837,8 +821,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) ...@@ -837,8 +821,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
/* A port include two SQP, six port total 12 */ /* A port include two SQP, six port total 12 */
ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps, ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
hr_dev->caps.num_qps - 1, hr_dev->caps.num_qps - 1, SQP_NUM,
hr_dev->caps.sqp_start + SQP_NUM,
reserved_from_top); reserved_from_top);
if (ret) { if (ret) {
dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n", dev_err(&hr_dev->pdev->dev, "qp bitmap init failed!error=%d\n",
......
...@@ -207,6 +207,7 @@ static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr) ...@@ -207,6 +207,7 @@ static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr)
int ret; int ret;
char *mac_addr = (char *)addr; char *mac_addr = (char *)addr;
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle); struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
u8 port_num;
assert(mac_cb); assert(mac_cb);
...@@ -221,8 +222,11 @@ static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr) ...@@ -221,8 +222,11 @@ static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr)
return ret; return ret;
} }
ret = hns_mac_set_multi(mac_cb, DSAF_BASE_INNER_PORT_NUM, ret = hns_mac_get_inner_port_num(mac_cb, handle->vf_id, &port_num);
mac_addr, true); if (ret)
return ret;
ret = hns_mac_set_multi(mac_cb, port_num, mac_addr, true);
if (ret) if (ret)
dev_err(handle->owner_dev, dev_err(handle->owner_dev,
"mac add mul_mac:%pM port%d fail, ret = %#x!\n", "mac add mul_mac:%pM port%d fail, ret = %#x!\n",
...@@ -678,9 +682,6 @@ static int hns_ae_config_loopback(struct hnae_handle *handle, ...@@ -678,9 +682,6 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
ret = -EINVAL; ret = -EINVAL;
} }
if (!ret)
hns_dsaf_set_inner_lb(mac_cb->dsaf_dev, mac_cb->mac_id, en);
return ret; return ret;
} }
......
...@@ -141,9 +141,10 @@ void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex) ...@@ -141,9 +141,10 @@ void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
*@port_num:port number *@port_num:port number
* *
*/ */
static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, u8 vmid, u8 *port_num)
u8 vmid, u8 *port_num)
{ {
int q_num_per_vf, vf_num_per_port;
int vm_queue_id;
u8 tmp_port; u8 tmp_port;
if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) { if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) {
...@@ -174,6 +175,12 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, ...@@ -174,6 +175,12 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
return -EINVAL; return -EINVAL;
} }
q_num_per_vf = mac_cb->dsaf_dev->rcb_common[0]->max_q_per_vf;
vf_num_per_port = mac_cb->dsaf_dev->rcb_common[0]->max_vfn;
vm_queue_id = vmid * q_num_per_vf +
vf_num_per_port * q_num_per_vf * mac_cb->mac_id;
switch (mac_cb->dsaf_dev->dsaf_mode) { switch (mac_cb->dsaf_dev->dsaf_mode) {
case DSAF_MODE_ENABLE_FIX: case DSAF_MODE_ENABLE_FIX:
tmp_port = 0; tmp_port = 0;
...@@ -193,7 +200,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, ...@@ -193,7 +200,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
case DSAF_MODE_DISABLE_6PORT_2VM: case DSAF_MODE_DISABLE_6PORT_2VM:
case DSAF_MODE_DISABLE_6PORT_4VM: case DSAF_MODE_DISABLE_6PORT_4VM:
case DSAF_MODE_DISABLE_6PORT_16VM: case DSAF_MODE_DISABLE_6PORT_16VM:
tmp_port = vmid; tmp_port = vm_queue_id;
break; break;
default: default:
dev_err(mac_cb->dev, "dsaf mode invalid, %s mac%d!\n", dev_err(mac_cb->dev, "dsaf mode invalid, %s mac%d!\n",
......
...@@ -461,5 +461,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb); ...@@ -461,5 +461,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb);
int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb, int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
enum hnae_led_state status); enum hnae_led_state status);
void hns_mac_set_promisc(struct hns_mac_cb *mac_cb, u8 en); void hns_mac_set_promisc(struct hns_mac_cb *mac_cb, u8 en);
int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
u8 vmid, u8 *port_num);
#endif /* _HNS_DSAF_MAC_H */ #endif /* _HNS_DSAF_MAC_H */
...@@ -760,16 +760,6 @@ void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en) ...@@ -760,16 +760,6 @@ void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
DSAF_CFG_MIX_MODE_S, !!en); DSAF_CFG_MIX_MODE_S, !!en);
} }
void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en)
{
if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
dsaf_dev->mac_cb[mac_id]->mac_type == HNAE_PORT_DEBUG)
return;
dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
DSAFV2_SERDES_LBK_EN_B, !!en);
}
/** /**
* hns_dsaf_tbl_stat_en - tbl * hns_dsaf_tbl_stat_en - tbl
* @dsaf_id: dsa fabric id * @dsaf_id: dsa fabric id
......
...@@ -466,6 +466,5 @@ void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id, ...@@ -466,6 +466,5 @@ void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
u32 *en); u32 *en);
int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id, int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
u32 en); u32 en);
void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en);
#endif /* __HNS_DSAF_MAIN_H__ */ #endif /* __HNS_DSAF_MAIN_H__ */
...@@ -543,6 +543,22 @@ int hns_rcb_set_coalesce_usecs( ...@@ -543,6 +543,22 @@ int hns_rcb_set_coalesce_usecs(
"error: coalesce_usecs setting supports 0~1023us\n"); "error: coalesce_usecs setting supports 0~1023us\n");
return -EINVAL; return -EINVAL;
} }
if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
if (timeout == 0)
/* set timeout to 0, Disable gap time */
dsaf_set_reg_field(rcb_common->io_base,
RCB_INT_GAP_TIME_REG + port_idx * 4,
PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B,
0);
else
/* set timeout non 0, restore gap time to 1 */
dsaf_set_reg_field(rcb_common->io_base,
RCB_INT_GAP_TIME_REG + port_idx * 4,
PPE_INT_GAPTIME_M, PPE_INT_GAPTIME_B,
1);
}
hns_rcb_set_port_timeout(rcb_common, port_idx, timeout); hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
return 0; return 0;
} }
......
...@@ -417,6 +417,7 @@ ...@@ -417,6 +417,7 @@
#define RCB_CFG_OVERTIME_REG 0x9300 #define RCB_CFG_OVERTIME_REG 0x9300
#define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304 #define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304
#define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308 #define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308
#define RCB_INT_GAP_TIME_REG 0x9400
#define RCB_PORT_CFG_OVERTIME_REG 0x9430 #define RCB_PORT_CFG_OVERTIME_REG 0x9430
#define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000 #define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000
...@@ -898,6 +899,9 @@ ...@@ -898,6 +899,9 @@
#define PPE_CNT_CLR_CE_B 0 #define PPE_CNT_CLR_CE_B 0
#define PPE_CNT_CLR_SNAP_EN_B 1 #define PPE_CNT_CLR_SNAP_EN_B 1
#define PPE_INT_GAPTIME_B 0
#define PPE_INT_GAPTIME_M 0x3ff
#define PPE_COMMON_CNT_CLR_CE_B 0 #define PPE_COMMON_CNT_CLR_CE_B 0
#define PPE_COMMON_CNT_CLR_SNAP_EN_B 1 #define PPE_COMMON_CNT_CLR_SNAP_EN_B 1
#define RCB_COM_TSO_MODE_B 0 #define RCB_COM_TSO_MODE_B 0
......
...@@ -574,7 +574,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, ...@@ -574,7 +574,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
struct sk_buff *skb; struct sk_buff *skb;
struct hnae_desc *desc; struct hnae_desc *desc;
struct hnae_desc_cb *desc_cb; struct hnae_desc_cb *desc_cb;
struct ethhdr *eh;
unsigned char *va; unsigned char *va;
int bnum, length, i; int bnum, length, i;
int pull_len; int pull_len;
...@@ -600,7 +599,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, ...@@ -600,7 +599,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
ring->stats.sw_err_cnt++; ring->stats.sw_err_cnt++;
return -ENOMEM; return -ENOMEM;
} }
skb_reset_mac_header(skb);
prefetchw(skb->data); prefetchw(skb->data);
length = le16_to_cpu(desc->rx.pkt_len); length = le16_to_cpu(desc->rx.pkt_len);
...@@ -682,14 +680,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data, ...@@ -682,14 +680,6 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
return -EFAULT; return -EFAULT;
} }
/* filter out multicast pkt with the same src mac as this port */
eh = eth_hdr(skb);
if (unlikely(is_multicast_ether_addr(eh->h_dest) &&
ether_addr_equal(ndev->dev_addr, eh->h_source))) {
dev_kfree_skb_any(skb);
return -EFAULT;
}
ring->stats.rx_pkts++; ring->stats.rx_pkts++;
ring->stats.rx_bytes += skb->len; ring->stats.rx_bytes += skb->len;
...@@ -747,25 +737,37 @@ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data, ...@@ -747,25 +737,37 @@ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
ndev->last_rx = jiffies; ndev->last_rx = jiffies;
} }
static int hns_desc_unused(struct hnae_ring *ring)
{
int ntc = ring->next_to_clean;
int ntu = ring->next_to_use;
return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
}
static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data, static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
int budget, void *v) int budget, void *v)
{ {
struct hnae_ring *ring = ring_data->ring; struct hnae_ring *ring = ring_data->ring;
struct sk_buff *skb; struct sk_buff *skb;
int num, bnum, ex_num; int num, bnum;
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int recv_pkts, recv_bds, clean_count, err; int recv_pkts, recv_bds, clean_count, err;
int unused_count = hns_desc_unused(ring);
num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
rmb(); /* make sure num taken effect before the other data is touched */ rmb(); /* make sure num taken effect before the other data is touched */
recv_pkts = 0, recv_bds = 0, clean_count = 0; recv_pkts = 0, recv_bds = 0, clean_count = 0;
recv: num -= unused_count;
while (recv_pkts < budget && recv_bds < num) { while (recv_pkts < budget && recv_bds < num) {
/* reuse or realloc buffers */ /* reuse or realloc buffers */
if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
hns_nic_alloc_rx_buffers(ring_data, clean_count); hns_nic_alloc_rx_buffers(ring_data,
clean_count + unused_count);
clean_count = 0; clean_count = 0;
unused_count = hns_desc_unused(ring);
} }
/* poll one pkt */ /* poll one pkt */
...@@ -786,21 +788,11 @@ static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data, ...@@ -786,21 +788,11 @@ static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
recv_pkts++; recv_pkts++;
} }
/* make all data has been write before submit */
if (recv_pkts < budget) {
ex_num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
if (ex_num > clean_count) {
num += ex_num - clean_count;
rmb(); /*complete read rx ring bd number*/
goto recv;
}
}
out: out:
/* make all data has been write before submit */ /* make all data has been write before submit */
if (clean_count > 0) if (clean_count + unused_count > 0)
hns_nic_alloc_rx_buffers(ring_data, clean_count); hns_nic_alloc_rx_buffers(ring_data,
clean_count + unused_count);
return recv_pkts; return recv_pkts;
} }
...@@ -810,6 +802,8 @@ static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data) ...@@ -810,6 +802,8 @@ static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
struct hnae_ring *ring = ring_data->ring; struct hnae_ring *ring = ring_data->ring;
int num = 0; int num = 0;
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
/* for hardware bug fixed */ /* for hardware bug fixed */
num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM); num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
...@@ -821,6 +815,20 @@ static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data) ...@@ -821,6 +815,20 @@ static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
} }
} }
static void hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
int num = 0;
num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
if (num == 0)
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
ring, 0);
else
napi_schedule(&ring_data->napi);
}
static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring, static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
int *bytes, int *pkts) int *bytes, int *pkts)
{ {
...@@ -922,7 +930,11 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data, ...@@ -922,7 +930,11 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data) static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
{ {
struct hnae_ring *ring = ring_data->ring; struct hnae_ring *ring = ring_data->ring;
int head = readl_relaxed(ring->io_base + RCB_REG_HEAD); int head;
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
if (head != ring->next_to_clean) { if (head != ring->next_to_clean) {
ring_data->ring->q->handle->dev->ops->toggle_ring_irq( ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
...@@ -932,6 +944,18 @@ static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data) ...@@ -932,6 +944,18 @@ static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
} }
} }
static void hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
{
struct hnae_ring *ring = ring_data->ring;
int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
if (head == ring->next_to_clean)
ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
ring, 0);
else
napi_schedule(&ring_data->napi);
}
static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data) static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
{ {
struct hnae_ring *ring = ring_data->ring; struct hnae_ring *ring = ring_data->ring;
...@@ -963,10 +987,7 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget) ...@@ -963,10 +987,7 @@ static int hns_nic_common_poll(struct napi_struct *napi, int budget)
if (clean_complete >= 0 && clean_complete < budget) { if (clean_complete >= 0 && clean_complete < budget) {
napi_complete(napi); napi_complete(napi);
ring_data->ring->q->handle->dev->ops->toggle_ring_irq( ring_data->fini_process(ring_data);
ring_data->ring, 0);
if (ring_data->fini_process)
ring_data->fini_process(ring_data);
return 0; return 0;
} }
...@@ -1562,6 +1583,21 @@ struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev, ...@@ -1562,6 +1583,21 @@ struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
return stats; return stats;
} }
static u16
hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
struct hns_nic_priv *priv = netdev_priv(ndev);
/* fix hardware broadcast/multicast packets queue loopback */
if (!AE_IS_VER1(priv->enet_ver) &&
is_multicast_ether_addr(eth_hdr->h_dest))
return 0;
else
return fallback(ndev, skb);
}
static const struct net_device_ops hns_nic_netdev_ops = { static const struct net_device_ops hns_nic_netdev_ops = {
.ndo_open = hns_nic_net_open, .ndo_open = hns_nic_net_open,
.ndo_stop = hns_nic_net_stop, .ndo_stop = hns_nic_net_stop,
...@@ -1577,6 +1613,7 @@ static const struct net_device_ops hns_nic_netdev_ops = { ...@@ -1577,6 +1613,7 @@ static const struct net_device_ops hns_nic_netdev_ops = {
.ndo_poll_controller = hns_nic_poll_controller, .ndo_poll_controller = hns_nic_poll_controller,
#endif #endif
.ndo_set_rx_mode = hns_nic_set_rx_mode, .ndo_set_rx_mode = hns_nic_set_rx_mode,
.ndo_select_queue = hns_nic_select_queue,
}; };
static void hns_nic_update_link_status(struct net_device *netdev) static void hns_nic_update_link_status(struct net_device *netdev)
...@@ -1738,7 +1775,8 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) ...@@ -1738,7 +1775,8 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->queue_index = i; rd->queue_index = i;
rd->ring = &h->qs[i]->tx_ring; rd->ring = &h->qs[i]->tx_ring;
rd->poll_one = hns_nic_tx_poll_one; rd->poll_one = hns_nic_tx_poll_one;
rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro : NULL; rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
hns_nic_tx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi, netif_napi_add(priv->netdev, &rd->napi,
hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
...@@ -1750,7 +1788,8 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv) ...@@ -1750,7 +1788,8 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->ring = &h->qs[i - h->q_num]->rx_ring; rd->ring = &h->qs[i - h->q_num]->rx_ring;
rd->poll_one = hns_nic_rx_poll_one; rd->poll_one = hns_nic_rx_poll_one;
rd->ex_process = hns_nic_rx_up_pro; rd->ex_process = hns_nic_rx_up_pro;
rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro : NULL; rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
hns_nic_rx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi, netif_napi_add(priv->netdev, &rd->napi,
hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
......
...@@ -352,6 +352,13 @@ static int __lb_setup(struct net_device *ndev, ...@@ -352,6 +352,13 @@ static int __lb_setup(struct net_device *ndev,
break; break;
} }
if (!ret) {
if (loop == MAC_LOOP_NONE)
h->dev->ops->set_promisc_mode(
h, ndev->flags & IFF_PROMISC);
else
h->dev->ops->set_promisc_mode(h, 1);
}
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment