Commit b7e0b9a9 authored by Chuck Lever's avatar Chuck Lever Committed by J. Bruce Fields

svcrdma: Replace GFP_KERNEL in a loop with GFP_NOFAIL

At the 2015 LSF/MM, it was requested that memory allocation
call sites that request GFP_KERNEL allocations in a loop should be
annotated with __GFP_NOFAIL.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent 30b7e246
...@@ -211,7 +211,6 @@ extern int svc_rdma_sendto(struct svc_rqst *); ...@@ -211,7 +211,6 @@ extern int svc_rdma_sendto(struct svc_rqst *);
extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *); extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *, extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
enum rpcrdma_errcode); enum rpcrdma_errcode);
struct page *svc_rdma_get_page(void);
extern int svc_rdma_post_recv(struct svcxprt_rdma *); extern int svc_rdma_post_recv(struct svcxprt_rdma *);
extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *); extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
......
...@@ -517,7 +517,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) ...@@ -517,7 +517,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
inline_bytes = rqstp->rq_res.len; inline_bytes = rqstp->rq_res.len;
/* Create the RDMA response header */ /* Create the RDMA response header */
res_page = svc_rdma_get_page(); res_page = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
rdma_resp = page_address(res_page); rdma_resp = page_address(res_page);
reply_ary = svc_rdma_get_reply_array(rdma_argp); reply_ary = svc_rdma_get_reply_array(rdma_argp);
if (reply_ary) if (reply_ary)
......
...@@ -99,12 +99,8 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) ...@@ -99,12 +99,8 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
{ {
struct svc_rdma_op_ctxt *ctxt; struct svc_rdma_op_ctxt *ctxt;
while (1) { ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep,
ctxt = kmem_cache_alloc(svc_rdma_ctxt_cachep, GFP_KERNEL); GFP_KERNEL | __GFP_NOFAIL);
if (ctxt)
break;
schedule_timeout_uninterruptible(msecs_to_jiffies(500));
}
ctxt->xprt = xprt; ctxt->xprt = xprt;
INIT_LIST_HEAD(&ctxt->dto_q); INIT_LIST_HEAD(&ctxt->dto_q);
ctxt->count = 0; ctxt->count = 0;
...@@ -156,12 +152,8 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages) ...@@ -156,12 +152,8 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
struct svc_rdma_req_map *svc_rdma_get_req_map(void) struct svc_rdma_req_map *svc_rdma_get_req_map(void)
{ {
struct svc_rdma_req_map *map; struct svc_rdma_req_map *map;
while (1) { map = kmem_cache_alloc(svc_rdma_map_cachep,
map = kmem_cache_alloc(svc_rdma_map_cachep, GFP_KERNEL); GFP_KERNEL | __GFP_NOFAIL);
if (map)
break;
schedule_timeout_uninterruptible(msecs_to_jiffies(500));
}
map->count = 0; map->count = 0;
return map; return map;
} }
...@@ -490,18 +482,6 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, ...@@ -490,18 +482,6 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv,
return cma_xprt; return cma_xprt;
} }
struct page *svc_rdma_get_page(void)
{
struct page *page;
while ((page = alloc_page(GFP_KERNEL)) == NULL) {
/* If we can't get memory, wait a bit and try again */
printk(KERN_INFO "svcrdma: out of memory...retrying in 1s\n");
schedule_timeout_uninterruptible(msecs_to_jiffies(1000));
}
return page;
}
int svc_rdma_post_recv(struct svcxprt_rdma *xprt) int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
{ {
struct ib_recv_wr recv_wr, *bad_recv_wr; struct ib_recv_wr recv_wr, *bad_recv_wr;
...@@ -520,7 +500,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt) ...@@ -520,7 +500,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
pr_err("svcrdma: Too many sges (%d)\n", sge_no); pr_err("svcrdma: Too many sges (%d)\n", sge_no);
goto err_put_ctxt; goto err_put_ctxt;
} }
page = svc_rdma_get_page(); page = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
ctxt->pages[sge_no] = page; ctxt->pages[sge_no] = page;
pa = ib_dma_map_page(xprt->sc_cm_id->device, pa = ib_dma_map_page(xprt->sc_cm_id->device,
page, 0, PAGE_SIZE, page, 0, PAGE_SIZE,
...@@ -1323,7 +1303,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, ...@@ -1323,7 +1303,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
int length; int length;
int ret; int ret;
p = svc_rdma_get_page(); p = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
va = page_address(p); va = page_address(p);
/* XDR encode error */ /* XDR encode error */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment