Commit c6e522e5 authored by Roland Dreier's avatar Roland Dreier Committed by Linus Torvalds

[PATCH] InfiniBand/mthca: support RDMA/atomic attributes in QP modify

Implement setting of RDMA/atomic enable bits, initiator resources and
responder resources for modify QP in low-level Mellanox HCA driver (should
complete RDMA/atomic implementation).
Signed-off-by: default avatarRoland Dreier <roland@topspin.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 0f0a4f00
...@@ -75,6 +75,7 @@ enum { ...@@ -75,6 +75,7 @@ enum {
MTHCA_EQ_CONTEXT_SIZE = 0x40, MTHCA_EQ_CONTEXT_SIZE = 0x40,
MTHCA_CQ_CONTEXT_SIZE = 0x40, MTHCA_CQ_CONTEXT_SIZE = 0x40,
MTHCA_QP_CONTEXT_SIZE = 0x200, MTHCA_QP_CONTEXT_SIZE = 0x200,
MTHCA_RDB_ENTRY_SIZE = 0x20,
MTHCA_AV_SIZE = 0x20, MTHCA_AV_SIZE = 0x20,
MTHCA_MGM_ENTRY_SIZE = 0x40 MTHCA_MGM_ENTRY_SIZE = 0x40
}; };
...@@ -121,7 +122,6 @@ struct mthca_limits { ...@@ -121,7 +122,6 @@ struct mthca_limits {
int mtt_seg_size; int mtt_seg_size;
int reserved_mtts; int reserved_mtts;
int reserved_mrws; int reserved_mrws;
int num_rdbs;
int reserved_uars; int reserved_uars;
int num_mgms; int num_mgms;
int num_amgms; int num_amgms;
...@@ -174,6 +174,8 @@ struct mthca_cq_table { ...@@ -174,6 +174,8 @@ struct mthca_cq_table {
struct mthca_qp_table { struct mthca_qp_table {
struct mthca_alloc alloc; struct mthca_alloc alloc;
u32 rdb_base;
int rdb_shift;
int sqp_start; int sqp_start;
spinlock_t lock; spinlock_t lock;
struct mthca_array qp; struct mthca_array qp;
......
...@@ -50,7 +50,6 @@ static int default_profile[MTHCA_RES_NUM] = { ...@@ -50,7 +50,6 @@ static int default_profile[MTHCA_RES_NUM] = {
}; };
enum { enum {
MTHCA_RDB_ENTRY_SIZE = 32,
MTHCA_MTT_SEG_SIZE = 64 MTHCA_MTT_SEG_SIZE = 64
}; };
...@@ -181,8 +180,13 @@ int mthca_make_profile(struct mthca_dev *dev, ...@@ -181,8 +180,13 @@ int mthca_make_profile(struct mthca_dev *dev,
init_hca->log_num_eqs = profile[i].log_num; init_hca->log_num_eqs = profile[i].log_num;
break; break;
case MTHCA_RES_RDB: case MTHCA_RES_RDB:
dev->limits.num_rdbs = profile[i].num; for (dev->qp_table.rdb_shift = 0;
init_hca->rdb_base = profile[i].start; profile[MTHCA_RES_QP].num << dev->qp_table.rdb_shift <
profile[i].num;
++dev->qp_table.rdb_shift)
; /* nothing */
dev->qp_table.rdb_base = (u32) profile[i].start;
init_hca->rdb_base = profile[i].start;
break; break;
case MTHCA_RES_MCG: case MTHCA_RES_MCG:
dev->limits.num_mgms = profile[i].num >> 1; dev->limits.num_mgms = profile[i].num >> 1;
......
...@@ -162,9 +162,12 @@ struct mthca_qp { ...@@ -162,9 +162,12 @@ struct mthca_qp {
spinlock_t lock; spinlock_t lock;
atomic_t refcount; atomic_t refcount;
u32 qpn; u32 qpn;
int transport;
enum ib_qp_state state;
int is_direct; int is_direct;
u8 transport;
u8 state;
u8 atomic_rd_en;
u8 resp_depth;
struct mthca_mr mr; struct mthca_mr mr;
struct mthca_wq rq; struct mthca_wq rq;
......
...@@ -146,7 +146,7 @@ enum { ...@@ -146,7 +146,7 @@ enum {
MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
MTHCA_QP_OPTPAR_RRE = 1 << 1, MTHCA_QP_OPTPAR_RRE = 1 << 1,
MTHCA_QP_OPTPAR_RAE = 1 << 2, MTHCA_QP_OPTPAR_RAE = 1 << 2,
MTHCA_QP_OPTPAR_REW = 1 << 3, MTHCA_QP_OPTPAR_RWE = 1 << 3,
MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4, MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4,
MTHCA_QP_OPTPAR_Q_KEY = 1 << 5, MTHCA_QP_OPTPAR_Q_KEY = 1 << 5,
MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
...@@ -697,13 +697,86 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) ...@@ -697,13 +697,86 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT); qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
} }
/* XXX initiator resources */ if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
qp_context->params1 |= cpu_to_be32(min(attr->max_dest_rd_atomic ?
ffs(attr->max_dest_rd_atomic) - 1 : 0,
7) << 21);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
}
if (attr_mask & IB_QP_SQ_PSN) if (attr_mask & IB_QP_SQ_PSN)
qp_context->next_send_psn = cpu_to_be32(attr->sq_psn); qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);
qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
/* XXX RDMA/atomic enable, responder resources */ if (attr_mask & IB_QP_ACCESS_FLAGS) {
/*
* Only enable RDMA/atomics if we have responder
* resources set to a non-zero value.
*/
if (qp->resp_depth) {
qp_context->params2 |=
cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE ?
MTHCA_QP_BIT_RWE : 0);
qp_context->params2 |=
cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_READ ?
MTHCA_QP_BIT_RRE : 0);
qp_context->params2 |=
cpu_to_be32(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC ?
MTHCA_QP_BIT_RAE : 0);
}
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
MTHCA_QP_OPTPAR_RRE |
MTHCA_QP_OPTPAR_RAE);
qp->atomic_rd_en = attr->qp_access_flags;
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
u8 rra_max;
if (qp->resp_depth && !attr->max_rd_atomic) {
/*
* Lowering our responder resources to zero.
* Turn off RDMA/atomics as responder.
* (RWE/RRE/RAE in params2 already zero)
*/
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
MTHCA_QP_OPTPAR_RRE |
MTHCA_QP_OPTPAR_RAE);
}
if (!qp->resp_depth && attr->max_rd_atomic) {
/*
* Increasing our responder resources from
* zero. Turn on RDMA/atomics as appropriate.
*/
qp_context->params2 |=
cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_WRITE ?
MTHCA_QP_BIT_RWE : 0);
qp_context->params2 |=
cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_READ ?
MTHCA_QP_BIT_RRE : 0);
qp_context->params2 |=
cpu_to_be32(qp->atomic_rd_en & IB_ACCESS_REMOTE_ATOMIC ?
MTHCA_QP_BIT_RAE : 0);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
MTHCA_QP_OPTPAR_RRE |
MTHCA_QP_OPTPAR_RAE);
}
for (rra_max = 0;
1 << rra_max < attr->max_rd_atomic &&
rra_max < dev->qp_table.rdb_shift;
++rra_max)
; /* nothing */
qp_context->params2 |= cpu_to_be32(rra_max << 21);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
qp->resp_depth = attr->max_rd_atomic;
}
if (qp->rq.policy == IB_SIGNAL_ALL_WR) if (qp->rq.policy == IB_SIGNAL_ALL_WR)
qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC); qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
...@@ -714,7 +787,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) ...@@ -714,7 +787,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
if (attr_mask & IB_QP_RQ_PSN) if (attr_mask & IB_QP_RQ_PSN)
qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn); qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
/* XXX ra_buff_indx */ qp_context->ra_buff_indx = dev->qp_table.rdb_base +
((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
dev->qp_table.rdb_shift);
qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn); qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
...@@ -910,6 +985,8 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev, ...@@ -910,6 +985,8 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
spin_lock_init(&qp->lock); spin_lock_init(&qp->lock);
atomic_set(&qp->refcount, 1); atomic_set(&qp->refcount, 1);
qp->state = IB_QPS_RESET; qp->state = IB_QPS_RESET;
qp->atomic_rd_en = 0;
qp->resp_depth = 0;
qp->sq.policy = send_policy; qp->sq.policy = send_policy;
qp->rq.policy = recv_policy; qp->rq.policy = recv_policy;
qp->rq.cur = 0; qp->rq.cur = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment