Commit 84c9756c authored by Bart Van Assche's avatar Bart Van Assche Committed by Greg Kroah-Hartman

IB/core, RDMA RW API: Do not exceed QP SGE send limit

commit 632bc3f6 upstream.

Compute the SGE limit for RDMA READ and WRITE requests in
ib_create_qp(). Use that limit in the RDMA RW API implementation.
Signed-off-by: default avatarBart Van Assche <bart.vanassche@sandisk.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Sagi Grimberg <sagi@grimberg.me>
Cc: Steve Wise <swise@opengridcomputing.com>
Cc: Parav Pandit <pandit.parav@gmail.com>
Cc: Nicholas Bellinger <nab@linux-iscsi.org>
Cc: Laurence Oberman <loberman@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 14065868
...@@ -58,13 +58,6 @@ static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num, ...@@ -58,13 +58,6 @@ static inline bool rdma_rw_io_needs_mr(struct ib_device *dev, u8 port_num,
return false; return false;
} }
static inline u32 rdma_rw_max_sge(struct ib_device *dev,
enum dma_data_direction dir)
{
return dir == DMA_TO_DEVICE ?
dev->attrs.max_sge : dev->attrs.max_sge_rd;
}
static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev) static inline u32 rdma_rw_fr_page_list_len(struct ib_device *dev)
{ {
/* arbitrary limit to avoid allocating gigantic resources */ /* arbitrary limit to avoid allocating gigantic resources */
...@@ -186,7 +179,8 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp, ...@@ -186,7 +179,8 @@ static int rdma_rw_init_map_wrs(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
u64 remote_addr, u32 rkey, enum dma_data_direction dir) u64 remote_addr, u32 rkey, enum dma_data_direction dir)
{ {
struct ib_device *dev = qp->pd->device; struct ib_device *dev = qp->pd->device;
u32 max_sge = rdma_rw_max_sge(dev, dir); u32 max_sge = dir == DMA_TO_DEVICE ? qp->max_write_sge :
qp->max_read_sge;
struct ib_sge *sge; struct ib_sge *sge;
u32 total_len = 0, i, j; u32 total_len = 0, i, j;
......
...@@ -814,6 +814,15 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, ...@@ -814,6 +814,15 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
} }
} }
/*
* Note: all hw drivers guarantee that max_send_sge is lower than
* the device RDMA WRITE SGE limit but not all hw drivers ensure that
* max_send_sge <= max_sge_rd.
*/
qp->max_write_sge = qp_init_attr->cap.max_send_sge;
qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
device->attrs.max_sge_rd);
return qp; return qp;
} }
EXPORT_SYMBOL(ib_create_qp); EXPORT_SYMBOL(ib_create_qp);
......
...@@ -1428,6 +1428,10 @@ struct ib_srq { ...@@ -1428,6 +1428,10 @@ struct ib_srq {
} ext; } ext;
}; };
/*
* @max_write_sge: Maximum SGE elements per RDMA WRITE request.
* @max_read_sge: Maximum SGE elements per RDMA READ request.
*/
struct ib_qp { struct ib_qp {
struct ib_device *device; struct ib_device *device;
struct ib_pd *pd; struct ib_pd *pd;
...@@ -1449,6 +1453,8 @@ struct ib_qp { ...@@ -1449,6 +1453,8 @@ struct ib_qp {
void (*event_handler)(struct ib_event *, void *); void (*event_handler)(struct ib_event *, void *);
void *qp_context; void *qp_context;
u32 qp_num; u32 qp_num;
u32 max_write_sge;
u32 max_read_sge;
enum ib_qp_type qp_type; enum ib_qp_type qp_type;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment