Commit 00628182 authored by Chuck Lever's avatar Chuck Lever Committed by J. Bruce Fields

rdma core: Add rdma_rw_mr_payload()

The amount of payload per MR depends on device capabilities and
the memory registration mode in use. The new rdma_rw API hides both,
making it difficult for ULPs to determine how large their transport
send queues need to be.

Expose the MR payload information via a new API.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Acked-by: default avatarDoug Ledford <dledford@redhat.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent 5a25bfd2
...@@ -643,6 +643,30 @@ void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp, ...@@ -643,6 +643,30 @@ void rdma_rw_ctx_destroy_signature(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
} }
EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature); EXPORT_SYMBOL(rdma_rw_ctx_destroy_signature);
/**
* rdma_rw_mr_factor - return number of MRs required for a payload
* @device: device handling the connection
* @port_num: port num to which the connection is bound
* @maxpages: maximum payload pages per rdma_rw_ctx
*
* Returns the number of MRs the device requires to move @maxpayload
* bytes. The returned value is used during transport creation to
* compute max_rdma_ctxts and the size of the transport's Send and
* Send Completion Queues.
*/
unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num,
unsigned int maxpages)
{
unsigned int mr_pages;
if (rdma_rw_can_use_mr(device, port_num))
mr_pages = rdma_rw_fr_page_list_len(device);
else
mr_pages = device->attrs.max_sge_rd;
return DIV_ROUND_UP(maxpages, mr_pages);
}
EXPORT_SYMBOL(rdma_rw_mr_factor);
void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr) void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr)
{ {
u32 factor; u32 factor;
......
...@@ -81,6 +81,8 @@ struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, ...@@ -81,6 +81,8 @@ struct ib_send_wr *rdma_rw_ctx_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num, int rdma_rw_ctx_post(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
struct ib_cqe *cqe, struct ib_send_wr *chain_wr); struct ib_cqe *cqe, struct ib_send_wr *chain_wr);
unsigned int rdma_rw_mr_factor(struct ib_device *device, u8 port_num,
unsigned int maxpages);
void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr); void rdma_rw_init_qp(struct ib_device *dev, struct ib_qp_init_attr *attr);
int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr); int rdma_rw_init_mrs(struct ib_qp *qp, struct ib_qp_init_attr *attr);
void rdma_rw_cleanup_mrs(struct ib_qp *qp); void rdma_rw_cleanup_mrs(struct ib_qp *qp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment