Commit 121bddf3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Doug Ledford:
 "This is probably our last -rc pull request. We don't have anything
  else outstanding at the moment anyway, and with the summer months on
  us and people taking trips, I expect the next weeks leading up to the
  merge window to be pretty calm and sedate.

  This has two simple, no brainer fixes for the EFA driver.

  Then it has ten not quite so simple fixes for the hfi1 driver. The
  problem with them is that they aren't simply one liner typo fixes.
  They're still fixes, but they're more complex issues like livelock
  under heavy load where the answer was to change work queue usage and
  spinlock usage to resolve the problem, or issues with orphaned
  requests during certain types of failures like link down which
  required some more complex work to fix too. They all look like
  legitimate fixes to me, they just aren't small like I wish they were.

  Summary:

   - 2 minor EFA fixes

   - 10 hfi1 fixes related to scaling issues"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  RDMA/efa: Handle mmap insertions overflow
  RDMA/efa: Fix success return value in case of error
  IB/hfi1: Handle port down properly in pio
  IB/hfi1: Handle wakeup of orphaned QPs for pio
  IB/hfi1: Wakeup QPs orphaned on wait list after flush
  IB/hfi1: Use aborts to trigger RC throttling
  IB/hfi1: Create inline to get extended headers
  IB/hfi1: Silence txreq allocation warnings
  IB/hfi1: Avoid hardlockup with flushlist_lock
  IB/hfi1: Correct tid qp rcd to match verbs context
  IB/hfi1: Close PSM sdma_progress sleep window
  IB/hfi1: Validate fault injection opcode user input
parents c036f7da 7a5834e4
...@@ -139,9 +139,11 @@ int efa_com_destroy_qp(struct efa_com_dev *edev, ...@@ -139,9 +139,11 @@ int efa_com_destroy_qp(struct efa_com_dev *edev,
sizeof(qp_cmd), sizeof(qp_cmd),
(struct efa_admin_acq_entry *)&cmd_completion, (struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion)); sizeof(cmd_completion));
if (err) if (err) {
ibdev_err(edev->efa_dev, "Failed to destroy qp-%u [%d]\n", ibdev_err(edev->efa_dev, "Failed to destroy qp-%u [%d]\n",
qp_cmd.qp_handle, err); qp_cmd.qp_handle, err);
return err;
}
return 0; return 0;
} }
...@@ -199,9 +201,11 @@ int efa_com_destroy_cq(struct efa_com_dev *edev, ...@@ -199,9 +201,11 @@ int efa_com_destroy_cq(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)&destroy_resp, (struct efa_admin_acq_entry *)&destroy_resp,
sizeof(destroy_resp)); sizeof(destroy_resp));
if (err) if (err) {
ibdev_err(edev->efa_dev, "Failed to destroy CQ-%u [%d]\n", ibdev_err(edev->efa_dev, "Failed to destroy CQ-%u [%d]\n",
params->cq_idx, err); params->cq_idx, err);
return err;
}
return 0; return 0;
} }
...@@ -273,10 +277,12 @@ int efa_com_dereg_mr(struct efa_com_dev *edev, ...@@ -273,10 +277,12 @@ int efa_com_dereg_mr(struct efa_com_dev *edev,
sizeof(mr_cmd), sizeof(mr_cmd),
(struct efa_admin_acq_entry *)&cmd_completion, (struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion)); sizeof(cmd_completion));
if (err) if (err) {
ibdev_err(edev->efa_dev, ibdev_err(edev->efa_dev,
"Failed to de-register mr(lkey-%u) [%d]\n", "Failed to de-register mr(lkey-%u) [%d]\n",
mr_cmd.l_key, err); mr_cmd.l_key, err);
return err;
}
return 0; return 0;
} }
...@@ -327,9 +333,11 @@ int efa_com_destroy_ah(struct efa_com_dev *edev, ...@@ -327,9 +333,11 @@ int efa_com_destroy_ah(struct efa_com_dev *edev,
sizeof(ah_cmd), sizeof(ah_cmd),
(struct efa_admin_acq_entry *)&cmd_completion, (struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion)); sizeof(cmd_completion));
if (err) if (err) {
ibdev_err(edev->efa_dev, "Failed to destroy ah-%d pd-%d [%d]\n", ibdev_err(edev->efa_dev, "Failed to destroy ah-%d pd-%d [%d]\n",
ah_cmd.ah, ah_cmd.pd, err); ah_cmd.ah, ah_cmd.pd, err);
return err;
}
return 0; return 0;
} }
...@@ -387,10 +395,12 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev, ...@@ -387,10 +395,12 @@ static int efa_com_get_feature_ex(struct efa_com_dev *edev,
get_resp, get_resp,
sizeof(*get_resp)); sizeof(*get_resp));
if (err) if (err) {
ibdev_err(edev->efa_dev, ibdev_err(edev->efa_dev,
"Failed to submit get_feature command %d [%d]\n", "Failed to submit get_feature command %d [%d]\n",
feature_id, err); feature_id, err);
return err;
}
return 0; return 0;
} }
...@@ -534,10 +544,12 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev, ...@@ -534,10 +544,12 @@ static int efa_com_set_feature_ex(struct efa_com_dev *edev,
(struct efa_admin_acq_entry *)set_resp, (struct efa_admin_acq_entry *)set_resp,
sizeof(*set_resp)); sizeof(*set_resp));
if (err) if (err) {
ibdev_err(edev->efa_dev, ibdev_err(edev->efa_dev,
"Failed to submit set_feature command %d error: %d\n", "Failed to submit set_feature command %d error: %d\n",
feature_id, err); feature_id, err);
return err;
}
return 0; return 0;
} }
......
...@@ -204,6 +204,7 @@ static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext, ...@@ -204,6 +204,7 @@ static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext,
void *obj, u64 address, u64 length, u8 mmap_flag) void *obj, u64 address, u64 length, u8 mmap_flag)
{ {
struct efa_mmap_entry *entry; struct efa_mmap_entry *entry;
u32 next_mmap_page;
int err; int err;
entry = kmalloc(sizeof(*entry), GFP_KERNEL); entry = kmalloc(sizeof(*entry), GFP_KERNEL);
...@@ -216,15 +217,19 @@ static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext, ...@@ -216,15 +217,19 @@ static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext,
entry->mmap_flag = mmap_flag; entry->mmap_flag = mmap_flag;
xa_lock(&ucontext->mmap_xa); xa_lock(&ucontext->mmap_xa);
if (check_add_overflow(ucontext->mmap_xa_page,
(u32)(length >> PAGE_SHIFT),
&next_mmap_page))
goto err_unlock;
entry->mmap_page = ucontext->mmap_xa_page; entry->mmap_page = ucontext->mmap_xa_page;
ucontext->mmap_xa_page += DIV_ROUND_UP(length, PAGE_SIZE); ucontext->mmap_xa_page = next_mmap_page;
err = __xa_insert(&ucontext->mmap_xa, entry->mmap_page, entry, err = __xa_insert(&ucontext->mmap_xa, entry->mmap_page, entry,
GFP_KERNEL); GFP_KERNEL);
if (err)
goto err_unlock;
xa_unlock(&ucontext->mmap_xa); xa_unlock(&ucontext->mmap_xa);
if (err){
kfree(entry);
return EFA_MMAP_INVALID;
}
ibdev_dbg( ibdev_dbg(
&dev->ibdev, &dev->ibdev,
...@@ -232,6 +237,12 @@ static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext, ...@@ -232,6 +237,12 @@ static u64 mmap_entry_insert(struct efa_dev *dev, struct efa_ucontext *ucontext,
entry->obj, entry->address, entry->length, get_mmap_key(entry)); entry->obj, entry->address, entry->length, get_mmap_key(entry));
return get_mmap_key(entry); return get_mmap_key(entry);
err_unlock:
xa_unlock(&ucontext->mmap_xa);
kfree(entry);
return EFA_MMAP_INVALID;
} }
int efa_query_device(struct ib_device *ibdev, int efa_query_device(struct ib_device *ibdev,
......
...@@ -14031,6 +14031,19 @@ static void init_kdeth_qp(struct hfi1_devdata *dd) ...@@ -14031,6 +14031,19 @@ static void init_kdeth_qp(struct hfi1_devdata *dd)
RCV_BTH_QP_KDETH_QP_SHIFT); RCV_BTH_QP_KDETH_QP_SHIFT);
} }
/**
* hfi1_get_qp_map
* @dd: device data
* @idx: index to read
*/
u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx)
{
u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8);
reg >>= (idx % 8) * 8;
return reg;
}
/** /**
* init_qpmap_table * init_qpmap_table
* @dd - device data * @dd - device data
......
...@@ -1445,6 +1445,7 @@ void clear_all_interrupts(struct hfi1_devdata *dd); ...@@ -1445,6 +1445,7 @@ void clear_all_interrupts(struct hfi1_devdata *dd);
void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr); void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr);
void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr); void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr);
void reset_interrupts(struct hfi1_devdata *dd); void reset_interrupts(struct hfi1_devdata *dd);
u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx);
/* /*
* Interrupt source table. * Interrupt source table.
......
...@@ -153,6 +153,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf, ...@@ -153,6 +153,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
char *dash; char *dash;
unsigned long range_start, range_end, i; unsigned long range_start, range_end, i;
bool remove = false; bool remove = false;
unsigned long bound = 1U << BITS_PER_BYTE;
end = strchr(ptr, ','); end = strchr(ptr, ',');
if (end) if (end)
...@@ -178,6 +179,10 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf, ...@@ -178,6 +179,10 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
BITS_PER_BYTE); BITS_PER_BYTE);
break; break;
} }
/* Check the inputs */
if (range_start >= bound || range_end >= bound)
break;
for (i = range_start; i <= range_end; i++) { for (i = range_start; i <= range_end; i++) {
if (remove) if (remove)
clear_bit(i, fault->opcodes); clear_bit(i, fault->opcodes);
......
...@@ -539,6 +539,37 @@ static inline void hfi1_16B_set_qpn(struct opa_16b_mgmt *mgmt, ...@@ -539,6 +539,37 @@ static inline void hfi1_16B_set_qpn(struct opa_16b_mgmt *mgmt,
mgmt->src_qpn = cpu_to_be32(src_qp & OPA_16B_MGMT_QPN_MASK); mgmt->src_qpn = cpu_to_be32(src_qp & OPA_16B_MGMT_QPN_MASK);
} }
/**
* hfi1_get_rc_ohdr - get extended header
* @opah - the opaheader
*/
static inline struct ib_other_headers *
hfi1_get_rc_ohdr(struct hfi1_opa_header *opah)
{
struct ib_other_headers *ohdr;
struct ib_header *hdr = NULL;
struct hfi1_16b_header *hdr_16b = NULL;
/* Find out where the BTH is */
if (opah->hdr_type == HFI1_PKT_TYPE_9B) {
hdr = &opah->ibh;
if (ib_get_lnh(hdr) == HFI1_LRH_BTH)
ohdr = &hdr->u.oth;
else
ohdr = &hdr->u.l.oth;
} else {
u8 l4;
hdr_16b = &opah->opah;
l4 = hfi1_16B_get_l4(hdr_16b);
if (l4 == OPA_16B_L4_IB_LOCAL)
ohdr = &hdr_16b->u.oth;
else
ohdr = &hdr_16b->u.l.oth;
}
return ohdr;
}
struct rvt_sge_state; struct rvt_sge_state;
/* /*
......
...@@ -952,6 +952,22 @@ void sc_disable(struct send_context *sc) ...@@ -952,6 +952,22 @@ void sc_disable(struct send_context *sc)
} }
} }
spin_unlock(&sc->release_lock); spin_unlock(&sc->release_lock);
write_seqlock(&sc->waitlock);
while (!list_empty(&sc->piowait)) {
struct iowait *wait;
struct rvt_qp *qp;
struct hfi1_qp_priv *priv;
wait = list_first_entry(&sc->piowait, struct iowait, list);
qp = iowait_to_qp(wait);
priv = qp->priv;
list_del_init(&priv->s_iowait.list);
priv->s_iowait.lock = NULL;
hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
}
write_sequnlock(&sc->waitlock);
spin_unlock_irq(&sc->alloc_lock); spin_unlock_irq(&sc->alloc_lock);
} }
...@@ -1427,7 +1443,8 @@ void sc_stop(struct send_context *sc, int flag) ...@@ -1427,7 +1443,8 @@ void sc_stop(struct send_context *sc, int flag)
* @cb: optional callback to call when the buffer is finished sending * @cb: optional callback to call when the buffer is finished sending
* @arg: argument for cb * @arg: argument for cb
* *
* Return a pointer to a PIO buffer if successful, NULL if not enough room. * Return a pointer to a PIO buffer, NULL if not enough room, -ECOMM
* when link is down.
*/ */
struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len, struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
pio_release_cb cb, void *arg) pio_release_cb cb, void *arg)
...@@ -1443,7 +1460,7 @@ struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len, ...@@ -1443,7 +1460,7 @@ struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
spin_lock_irqsave(&sc->alloc_lock, flags); spin_lock_irqsave(&sc->alloc_lock, flags);
if (!(sc->flags & SCF_ENABLED)) { if (!(sc->flags & SCF_ENABLED)) {
spin_unlock_irqrestore(&sc->alloc_lock, flags); spin_unlock_irqrestore(&sc->alloc_lock, flags);
goto done; return ERR_PTR(-ECOMM);
} }
retry: retry:
......
...@@ -1432,7 +1432,7 @@ void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn) ...@@ -1432,7 +1432,7 @@ void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn)
pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps,
sc_to_vlt(ppd->dd, sc5), plen); sc_to_vlt(ppd->dd, sc5), plen);
pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL); pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL);
if (!pbuf) { if (IS_ERR_OR_NULL(pbuf)) {
/* /*
* We have no room to send at the moment. Pass * We have no room to send at the moment. Pass
* responsibility for sending the ACK to the send engine * responsibility for sending the ACK to the send engine
...@@ -1701,6 +1701,36 @@ static void reset_sending_psn(struct rvt_qp *qp, u32 psn) ...@@ -1701,6 +1701,36 @@ static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
} }
} }
/**
* hfi1_rc_verbs_aborted - handle abort status
* @qp: the QP
* @opah: the opa header
*
* This code modifies both ACK bit in BTH[2]
* and the s_flags to go into send one mode.
*
* This serves to throttle the send engine to only
* send a single packet in the likely case the
* a link has gone down.
*/
void hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah)
{
struct ib_other_headers *ohdr = hfi1_get_rc_ohdr(opah);
u8 opcode = ib_bth_get_opcode(ohdr);
u32 psn;
/* ignore responses */
if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
opcode <= OP(ATOMIC_ACKNOWLEDGE)) ||
opcode == TID_OP(READ_RESP) ||
opcode == TID_OP(WRITE_RESP))
return;
psn = ib_bth_get_psn(ohdr) | IB_BTH_REQ_ACK;
ohdr->bth[2] = cpu_to_be32(psn);
qp->s_flags |= RVT_S_SEND_ONE;
}
/* /*
* This should be called with the QP s_lock held and interrupts disabled. * This should be called with the QP s_lock held and interrupts disabled.
*/ */
...@@ -1709,8 +1739,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah) ...@@ -1709,8 +1739,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
struct ib_other_headers *ohdr; struct ib_other_headers *ohdr;
struct hfi1_qp_priv *priv = qp->priv; struct hfi1_qp_priv *priv = qp->priv;
struct rvt_swqe *wqe; struct rvt_swqe *wqe;
struct ib_header *hdr = NULL;
struct hfi1_16b_header *hdr_16b = NULL;
u32 opcode, head, tail; u32 opcode, head, tail;
u32 psn; u32 psn;
struct tid_rdma_request *req; struct tid_rdma_request *req;
...@@ -1719,24 +1747,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah) ...@@ -1719,24 +1747,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK)) if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
return; return;
/* Find out where the BTH is */ ohdr = hfi1_get_rc_ohdr(opah);
if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
hdr = &opah->ibh;
if (ib_get_lnh(hdr) == HFI1_LRH_BTH)
ohdr = &hdr->u.oth;
else
ohdr = &hdr->u.l.oth;
} else {
u8 l4;
hdr_16b = &opah->opah;
l4 = hfi1_16B_get_l4(hdr_16b);
if (l4 == OPA_16B_L4_IB_LOCAL)
ohdr = &hdr_16b->u.oth;
else
ohdr = &hdr_16b->u.l.oth;
}
opcode = ib_bth_get_opcode(ohdr); opcode = ib_bth_get_opcode(ohdr);
if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) && if ((opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
opcode <= OP(ATOMIC_ACKNOWLEDGE)) || opcode <= OP(ATOMIC_ACKNOWLEDGE)) ||
......
...@@ -405,19 +405,33 @@ static void sdma_flush(struct sdma_engine *sde) ...@@ -405,19 +405,33 @@ static void sdma_flush(struct sdma_engine *sde)
struct sdma_txreq *txp, *txp_next; struct sdma_txreq *txp, *txp_next;
LIST_HEAD(flushlist); LIST_HEAD(flushlist);
unsigned long flags; unsigned long flags;
uint seq;
/* flush from head to tail */ /* flush from head to tail */
sdma_flush_descq(sde); sdma_flush_descq(sde);
spin_lock_irqsave(&sde->flushlist_lock, flags); spin_lock_irqsave(&sde->flushlist_lock, flags);
/* copy flush list */ /* copy flush list */
list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) { list_splice_init(&sde->flushlist, &flushlist);
list_del_init(&txp->list);
list_add_tail(&txp->list, &flushlist);
}
spin_unlock_irqrestore(&sde->flushlist_lock, flags); spin_unlock_irqrestore(&sde->flushlist_lock, flags);
/* flush from flush list */ /* flush from flush list */
list_for_each_entry_safe(txp, txp_next, &flushlist, list) list_for_each_entry_safe(txp, txp_next, &flushlist, list)
complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED); complete_tx(sde, txp, SDMA_TXREQ_S_ABORTED);
/* wakeup QPs orphaned on the dmawait list */
do {
struct iowait *w, *nw;
seq = read_seqbegin(&sde->waitlock);
if (!list_empty(&sde->dmawait)) {
write_seqlock(&sde->waitlock);
list_for_each_entry_safe(w, nw, &sde->dmawait, list) {
if (w->wakeup) {
w->wakeup(w, SDMA_AVAIL_REASON);
list_del_init(&w->list);
}
}
write_sequnlock(&sde->waitlock);
}
} while (read_seqretry(&sde->waitlock, seq));
} }
/* /*
...@@ -2413,7 +2427,7 @@ int sdma_send_txreq(struct sdma_engine *sde, ...@@ -2413,7 +2427,7 @@ int sdma_send_txreq(struct sdma_engine *sde,
list_add_tail(&tx->list, &sde->flushlist); list_add_tail(&tx->list, &sde->flushlist);
spin_unlock(&sde->flushlist_lock); spin_unlock(&sde->flushlist_lock);
iowait_inc_wait_count(wait, tx->num_desc); iowait_inc_wait_count(wait, tx->num_desc);
schedule_work(&sde->flush_worker); queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
ret = -ECOMM; ret = -ECOMM;
goto unlock; goto unlock;
nodesc: nodesc:
...@@ -2511,7 +2525,7 @@ int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait, ...@@ -2511,7 +2525,7 @@ int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait,
iowait_inc_wait_count(wait, tx->num_desc); iowait_inc_wait_count(wait, tx->num_desc);
} }
spin_unlock(&sde->flushlist_lock); spin_unlock(&sde->flushlist_lock);
schedule_work(&sde->flush_worker); queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
ret = -ECOMM; ret = -ECOMM;
goto update_tail; goto update_tail;
nodesc: nodesc:
......
...@@ -312,9 +312,7 @@ static struct hfi1_ctxtdata *qp_to_rcd(struct rvt_dev_info *rdi, ...@@ -312,9 +312,7 @@ static struct hfi1_ctxtdata *qp_to_rcd(struct rvt_dev_info *rdi,
if (qp->ibqp.qp_num == 0) if (qp->ibqp.qp_num == 0)
ctxt = 0; ctxt = 0;
else else
ctxt = ((qp->ibqp.qp_num >> dd->qos_shift) % ctxt = hfi1_get_qp_map(dd, qp->ibqp.qp_num >> dd->qos_shift);
(dd->n_krcv_queues - 1)) + 1;
return dd->rcd[ctxt]; return dd->rcd[ctxt];
} }
......
...@@ -683,7 +683,7 @@ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp, ...@@ -683,7 +683,7 @@ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen); pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
if (ctxt) { if (ctxt) {
pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL); pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
if (pbuf) { if (!IS_ERR_OR_NULL(pbuf)) {
trace_pio_output_ibhdr(ppd->dd, &hdr, sc5); trace_pio_output_ibhdr(ppd->dd, &hdr, sc5);
ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
&hdr, hwords); &hdr, hwords);
...@@ -738,7 +738,7 @@ void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn, ...@@ -738,7 +738,7 @@ void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen); pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
if (ctxt) { if (ctxt) {
pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL); pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
if (pbuf) { if (!IS_ERR_OR_NULL(pbuf)) {
trace_pio_output_ibhdr(ppd->dd, &hdr, sc5); trace_pio_output_ibhdr(ppd->dd, &hdr, sc5);
ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc, ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
&hdr, hwords); &hdr, hwords);
......
...@@ -130,20 +130,16 @@ static int defer_packet_queue( ...@@ -130,20 +130,16 @@ static int defer_packet_queue(
{ {
struct hfi1_user_sdma_pkt_q *pq = struct hfi1_user_sdma_pkt_q *pq =
container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy); container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy);
struct user_sdma_txreq *tx =
container_of(txreq, struct user_sdma_txreq, txreq);
if (sdma_progress(sde, seq, txreq)) { write_seqlock(&sde->waitlock);
if (tx->busycount++ < MAX_DEFER_RETRY_COUNT) if (sdma_progress(sde, seq, txreq))
goto eagain; goto eagain;
}
/* /*
* We are assuming that if the list is enqueued somewhere, it * We are assuming that if the list is enqueued somewhere, it
* is to the dmawait list since that is the only place where * is to the dmawait list since that is the only place where
* it is supposed to be enqueued. * it is supposed to be enqueued.
*/ */
xchg(&pq->state, SDMA_PKT_Q_DEFERRED); xchg(&pq->state, SDMA_PKT_Q_DEFERRED);
write_seqlock(&sde->waitlock);
if (list_empty(&pq->busy.list)) { if (list_empty(&pq->busy.list)) {
iowait_get_priority(&pq->busy); iowait_get_priority(&pq->busy);
iowait_queue(pkts_sent, &pq->busy, &sde->dmawait); iowait_queue(pkts_sent, &pq->busy, &sde->dmawait);
...@@ -151,6 +147,7 @@ static int defer_packet_queue( ...@@ -151,6 +147,7 @@ static int defer_packet_queue(
write_sequnlock(&sde->waitlock); write_sequnlock(&sde->waitlock);
return -EBUSY; return -EBUSY;
eagain: eagain:
write_sequnlock(&sde->waitlock);
return -EAGAIN; return -EAGAIN;
} }
...@@ -804,7 +801,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts) ...@@ -804,7 +801,6 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
tx->flags = 0; tx->flags = 0;
tx->req = req; tx->req = req;
tx->busycount = 0;
INIT_LIST_HEAD(&tx->list); INIT_LIST_HEAD(&tx->list);
/* /*
......
...@@ -245,7 +245,6 @@ struct user_sdma_txreq { ...@@ -245,7 +245,6 @@ struct user_sdma_txreq {
struct list_head list; struct list_head list;
struct user_sdma_request *req; struct user_sdma_request *req;
u16 flags; u16 flags;
unsigned int busycount;
u16 seqnum; u16 seqnum;
}; };
......
...@@ -638,6 +638,8 @@ static void verbs_sdma_complete( ...@@ -638,6 +638,8 @@ static void verbs_sdma_complete(
struct hfi1_opa_header *hdr; struct hfi1_opa_header *hdr;
hdr = &tx->phdr.hdr; hdr = &tx->phdr.hdr;
if (unlikely(status == SDMA_TXREQ_S_ABORTED))
hfi1_rc_verbs_aborted(qp, hdr);
hfi1_rc_send_complete(qp, hdr); hfi1_rc_send_complete(qp, hdr);
} }
spin_unlock(&qp->s_lock); spin_unlock(&qp->s_lock);
...@@ -1037,10 +1039,10 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, ...@@ -1037,10 +1039,10 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
if (cb) if (cb)
iowait_pio_inc(&priv->s_iowait); iowait_pio_inc(&priv->s_iowait);
pbuf = sc_buffer_alloc(sc, plen, cb, qp); pbuf = sc_buffer_alloc(sc, plen, cb, qp);
if (unlikely(!pbuf)) { if (unlikely(IS_ERR_OR_NULL(pbuf))) {
if (cb) if (cb)
verbs_pio_complete(qp, 0); verbs_pio_complete(qp, 0);
if (ppd->host_link_state != HLS_UP_ACTIVE) { if (IS_ERR(pbuf)) {
/* /*
* If we have filled the PIO buffers to capacity and are * If we have filled the PIO buffers to capacity and are
* not in an active state this request is not going to * not in an active state this request is not going to
...@@ -1095,15 +1097,15 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps, ...@@ -1095,15 +1097,15 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
&ps->s_txreq->phdr.hdr, ib_is_sc5(sc5)); &ps->s_txreq->phdr.hdr, ib_is_sc5(sc5));
pio_bail: pio_bail:
spin_lock_irqsave(&qp->s_lock, flags);
if (qp->s_wqe) { if (qp->s_wqe) {
spin_lock_irqsave(&qp->s_lock, flags);
rvt_send_complete(qp, qp->s_wqe, wc_status); rvt_send_complete(qp, qp->s_wqe, wc_status);
spin_unlock_irqrestore(&qp->s_lock, flags);
} else if (qp->ibqp.qp_type == IB_QPT_RC) { } else if (qp->ibqp.qp_type == IB_QPT_RC) {
spin_lock_irqsave(&qp->s_lock, flags); if (unlikely(wc_status == IB_WC_GENERAL_ERR))
hfi1_rc_verbs_aborted(qp, &ps->s_txreq->phdr.hdr);
hfi1_rc_send_complete(qp, &ps->s_txreq->phdr.hdr); hfi1_rc_send_complete(qp, &ps->s_txreq->phdr.hdr);
spin_unlock_irqrestore(&qp->s_lock, flags);
} }
spin_unlock_irqrestore(&qp->s_lock, flags);
ret = 0; ret = 0;
......
...@@ -416,6 +416,7 @@ void hfi1_rc_hdrerr( ...@@ -416,6 +416,7 @@ void hfi1_rc_hdrerr(
u8 ah_to_sc(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr); u8 ah_to_sc(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
void hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah);
void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah); void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah);
void hfi1_ud_rcv(struct hfi1_packet *packet); void hfi1_ud_rcv(struct hfi1_packet *packet);
......
...@@ -100,7 +100,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, ...@@ -100,7 +100,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
struct hfi1_qp_priv *priv; struct hfi1_qp_priv *priv;
tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC); tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP);
if (tx) if (tx)
goto out; goto out;
priv = qp->priv; priv = qp->priv;
......
...@@ -72,6 +72,7 @@ struct hfi1_ibdev; ...@@ -72,6 +72,7 @@ struct hfi1_ibdev;
struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev, struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
struct rvt_qp *qp); struct rvt_qp *qp);
#define VERBS_TXREQ_GFP (GFP_ATOMIC | __GFP_NOWARN)
static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
struct rvt_qp *qp) struct rvt_qp *qp)
__must_hold(&qp->slock) __must_hold(&qp->slock)
...@@ -79,7 +80,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev, ...@@ -79,7 +80,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
struct verbs_txreq *tx; struct verbs_txreq *tx;
struct hfi1_qp_priv *priv = qp->priv; struct hfi1_qp_priv *priv = qp->priv;
tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC); tx = kmem_cache_alloc(dev->verbs_txreq_cache, VERBS_TXREQ_GFP);
if (unlikely(!tx)) { if (unlikely(!tx)) {
/* call slow path to get the lock */ /* call slow path to get the lock */
tx = __get_txreq(dev, qp); tx = __get_txreq(dev, qp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment