Commit b7270cce authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

Pull infiniband/rdma fixes from Roland Dreier:
 "cxgb4 hardware driver fixes"

* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  RDMA/cxgb4: Update Kconfig to include Chelsio T5 adapter
  RDMA/cxgb4: Only allow kernel db ringing for T4 devs
  RDMA/cxgb4: Force T5 connections to use TAHOE congestion control
  RDMA/cxgb4: Fix endpoint mutex deadlocks
parents b5f3c61d 7d0a73a4
config INFINIBAND_CXGB4
tristate "Chelsio T4 RDMA Driver"
tristate "Chelsio T4/T5 RDMA Driver"
depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
select GENERIC_ALLOCATOR
---help---
This is an iWARP/RDMA driver for the Chelsio T4 1GbE and
10GbE adapters.
This is an iWARP/RDMA driver for the Chelsio T4 and T5
1GbE, 10GbE adapters and T5 40GbE adapter.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
......
......@@ -587,6 +587,10 @@ static int send_connect(struct c4iw_ep *ep)
opt2 |= SACK_EN(1);
if (wscale && enable_tcp_window_scaling)
opt2 |= WND_SCALE_EN(1);
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
opt2 |= T5_OPT_2_VALID;
opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
}
t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
......@@ -996,7 +1000,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status)
static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
{
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
state_set(&ep->com, ABORTING);
__state_set(&ep->com, ABORTING);
set_bit(ABORT_CONN, &ep->com.history);
return send_abort(ep, skb, gfp);
}
......@@ -1154,7 +1158,7 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
return credits;
}
static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
{
struct mpa_message *mpa;
struct mpa_v2_conn_params *mpa_v2_params;
......@@ -1164,6 +1168,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
struct c4iw_qp_attributes attrs;
enum c4iw_qp_attr_mask mask;
int err;
int disconnect = 0;
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
......@@ -1173,7 +1178,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* will abort the connection.
*/
if (stop_ep_timer(ep))
return;
return 0;
/*
* If we get more than the supported amount of private data
......@@ -1195,7 +1200,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* if we don't even have the mpa message, then bail.
*/
if (ep->mpa_pkt_len < sizeof(*mpa))
return;
return 0;
mpa = (struct mpa_message *) ep->mpa_pkt;
/* Validate MPA header. */
......@@ -1235,7 +1240,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
* We'll continue process when more data arrives.
*/
if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
return;
return 0;
if (mpa->flags & MPA_REJECT) {
err = -ECONNREFUSED;
......@@ -1337,9 +1342,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
attrs.layer_etype = LAYER_MPA | DDP_LLP;
attrs.ecode = MPA_NOMATCH_RTR;
attrs.next_state = C4IW_QP_STATE_TERMINATE;
attrs.send_term = 1;
err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
err = -ENOMEM;
disconnect = 1;
goto out;
}
......@@ -1355,9 +1362,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
attrs.layer_etype = LAYER_MPA | DDP_LLP;
attrs.ecode = MPA_INSUFF_IRD;
attrs.next_state = C4IW_QP_STATE_TERMINATE;
attrs.send_term = 1;
err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
err = -ENOMEM;
disconnect = 1;
goto out;
}
goto out;
......@@ -1366,7 +1375,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
send_abort(ep, skb, GFP_KERNEL);
out:
connect_reply_upcall(ep, err);
return;
return disconnect;
}
static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
......@@ -1524,6 +1533,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned int tid = GET_TID(hdr);
struct tid_info *t = dev->rdev.lldi.tids;
__u8 status = hdr->status;
int disconnect = 0;
ep = lookup_tid(t, tid);
if (!ep)
......@@ -1539,7 +1549,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
switch (ep->com.state) {
case MPA_REQ_SENT:
ep->rcv_seq += dlen;
process_mpa_reply(ep, skb);
disconnect = process_mpa_reply(ep, skb);
break;
case MPA_REQ_WAIT:
ep->rcv_seq += dlen;
......@@ -1555,13 +1565,16 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
ep->com.state, ep->hwtid, status);
attrs.next_state = C4IW_QP_STATE_TERMINATE;
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
disconnect = 1;
break;
}
default:
break;
}
mutex_unlock(&ep->com.mutex);
if (disconnect)
c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
return 0;
}
......@@ -2009,6 +2022,10 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
if (tcph->ece && tcph->cwr)
opt2 |= CCTRL_ECN(1);
}
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
opt2 |= T5_OPT_2_VALID;
opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
}
rpl = cplhdr(skb);
INIT_TP_WR(rpl, ep->hwtid);
......@@ -3482,9 +3499,9 @@ static void process_timeout(struct c4iw_ep *ep)
__func__, ep, ep->hwtid, ep->com.state);
abort = 0;
}
mutex_unlock(&ep->com.mutex);
if (abort)
abort_connection(ep, NULL, GFP_KERNEL);
mutex_unlock(&ep->com.mutex);
c4iw_put_ep(&ep->com);
}
......
......@@ -435,6 +435,7 @@ struct c4iw_qp_attributes {
u8 ecode;
u16 sq_db_inc;
u16 rq_db_inc;
u8 send_term;
};
struct c4iw_qp {
......
......@@ -1388,11 +1388,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
qhp->attr.layer_etype = attrs->layer_etype;
qhp->attr.ecode = attrs->ecode;
ep = qhp->ep;
disconnect = 1;
c4iw_get_ep(&qhp->ep->com);
if (!internal)
if (!internal) {
c4iw_get_ep(&qhp->ep->com);
terminate = 1;
else {
disconnect = 1;
} else {
terminate = qhp->attr.send_term;
ret = rdma_fini(rhp, qhp, ep);
if (ret)
goto err;
......@@ -1776,11 +1777,15 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
/*
* Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
* ringing the queue db when we're in DB_FULL mode.
* Only allow this on T4 devices.
*/
attrs.sq_db_inc = attr->sq_psn;
attrs.rq_db_inc = attr->rq_psn;
mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
(mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
return -EINVAL;
return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
}
......
......@@ -836,4 +836,18 @@ struct ulptx_idata {
#define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
#define F_RX_DACK_CHANGE V_RX_DACK_CHANGE(1U)
enum { /* TCP congestion control algorithms */
CONG_ALG_RENO,
CONG_ALG_TAHOE,
CONG_ALG_NEWRENO,
CONG_ALG_HIGHSPEED
};
#define S_CONG_CNTRL 14
#define M_CONG_CNTRL 0x3
#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
#define T5_OPT_2_VALID (1 << 31)
#endif /* _T4FW_RI_API_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment