Commit b33982c3 authored by Ursula Braun's avatar Ursula Braun Committed by David S. Miller

net/smc: cleanup function __smc_buf_create()

Split function __smc_buf_create() for better readability.
Signed-off-by: default avatarUrsula Braun <ubraun@linux.vnet.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3e034725
...@@ -487,51 +487,17 @@ static inline int smc_rmb_wnd_update_limit(int rmbe_size) ...@@ -487,51 +487,17 @@ static inline int smc_rmb_wnd_update_limit(int rmbe_size)
return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2); return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2);
} }
static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) static struct smc_buf_desc *smc_new_buf_create(struct smc_link_group *lgr,
bool is_rmb, int bufsize)
{ {
struct smc_connection *conn = &smc->conn; struct smc_buf_desc *buf_desc;
struct smc_link_group *lgr = conn->lgr;
struct smc_buf_desc *buf_desc = NULL;
struct list_head *buf_list;
int bufsize, bufsize_short;
struct smc_link *lnk; struct smc_link *lnk;
int sk_buf_size;
rwlock_t *lock;
int rc; int rc;
lnk = &lgr->lnk[SMC_SINGLE_LINK]; /* try to alloc a new buffer */
if (is_rmb)
/* use socket recv buffer size (w/o overhead) as start value */
sk_buf_size = smc->sk.sk_rcvbuf / 2;
else
/* use socket send buffer size (w/o overhead) as start value */
sk_buf_size = smc->sk.sk_sndbuf / 2;
for (bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2);
bufsize_short >= 0; bufsize_short--) {
if (is_rmb) {
lock = &lgr->rmbs_lock;
buf_list = &lgr->rmbs[bufsize_short];
} else {
lock = &lgr->sndbufs_lock;
buf_list = &lgr->sndbufs[bufsize_short];
}
bufsize = smc_uncompress_bufsize(bufsize_short);
if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC)
continue;
/* check for reusable slot in the link group */
buf_desc = smc_buf_get_slot(lgr, bufsize_short, lock, buf_list);
if (buf_desc) {
memset(buf_desc->cpu_addr, 0, bufsize);
break; /* found reusable slot */
}
/* try to allocate the determined number of pages */
buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL); buf_desc = kzalloc(sizeof(*buf_desc), GFP_KERNEL);
if (!buf_desc) if (!buf_desc)
break; /* give up with -ENOMEM */ return ERR_PTR(-ENOMEM);
buf_desc->cpu_addr = buf_desc->cpu_addr =
(void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN |
...@@ -540,28 +506,28 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) ...@@ -540,28 +506,28 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb)
get_order(bufsize)); get_order(bufsize));
if (!buf_desc->cpu_addr) { if (!buf_desc->cpu_addr) {
kfree(buf_desc); kfree(buf_desc);
buf_desc = NULL; return ERR_PTR(-EAGAIN);
continue;
} }
buf_desc->order = get_order(bufsize);
/* build the sg table from the pages */
lnk = &lgr->lnk[SMC_SINGLE_LINK];
rc = sg_alloc_table(&buf_desc->sgt[SMC_SINGLE_LINK], 1, rc = sg_alloc_table(&buf_desc->sgt[SMC_SINGLE_LINK], 1,
GFP_KERNEL); GFP_KERNEL);
if (rc) { if (rc) {
smc_buf_free(buf_desc, lnk, is_rmb); smc_buf_free(buf_desc, lnk, is_rmb);
buf_desc = NULL; return ERR_PTR(rc);
continue;
} }
sg_set_buf(buf_desc->sgt[SMC_SINGLE_LINK].sgl, sg_set_buf(buf_desc->sgt[SMC_SINGLE_LINK].sgl,
buf_desc->cpu_addr, bufsize); buf_desc->cpu_addr, bufsize);
/* map sg table to DMA address */ /* map sg table to DMA address */
rc = smc_ib_buf_map_sg(lnk->smcibdev, buf_desc, is_rmb ? rc = smc_ib_buf_map_sg(lnk->smcibdev, buf_desc,
DMA_FROM_DEVICE : DMA_TO_DEVICE); is_rmb ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
/* SMC protocol depends on mapping to one DMA address only */ /* SMC protocol depends on mapping to one DMA address only */
if (rc != 1) { if (rc != 1) {
smc_buf_free(buf_desc, lnk, is_rmb); smc_buf_free(buf_desc, lnk, is_rmb);
buf_desc = NULL; return ERR_PTR(-EAGAIN);
continue; /* if mapping failed, try smaller one */
} }
/* create a new memory region for the RMB */ /* create a new memory region for the RMB */
...@@ -572,10 +538,56 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) ...@@ -572,10 +538,56 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb)
buf_desc); buf_desc);
if (rc) { if (rc) {
smc_buf_free(buf_desc, lnk, is_rmb); smc_buf_free(buf_desc, lnk, is_rmb);
buf_desc = NULL; return ERR_PTR(rc);
continue; }
} }
return buf_desc;
}
static int __smc_buf_create(struct smc_sock *smc, bool is_rmb)
{
struct smc_connection *conn = &smc->conn;
struct smc_link_group *lgr = conn->lgr;
struct smc_buf_desc *buf_desc = NULL;
struct list_head *buf_list;
int bufsize, bufsize_short;
int sk_buf_size;
rwlock_t *lock;
if (is_rmb)
/* use socket recv buffer size (w/o overhead) as start value */
sk_buf_size = smc->sk.sk_rcvbuf / 2;
else
/* use socket send buffer size (w/o overhead) as start value */
sk_buf_size = smc->sk.sk_sndbuf / 2;
for (bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2);
bufsize_short >= 0; bufsize_short--) {
if (is_rmb) {
lock = &lgr->rmbs_lock;
buf_list = &lgr->rmbs[bufsize_short];
} else {
lock = &lgr->sndbufs_lock;
buf_list = &lgr->sndbufs[bufsize_short];
} }
bufsize = smc_uncompress_bufsize(bufsize_short);
if ((1 << get_order(bufsize)) > SG_MAX_SINGLE_ALLOC)
continue;
/* check for reusable slot in the link group */
buf_desc = smc_buf_get_slot(lgr, bufsize_short, lock, buf_list);
if (buf_desc) {
memset(buf_desc->cpu_addr, 0, bufsize);
break; /* found reusable slot */
}
buf_desc = smc_new_buf_create(lgr, is_rmb, bufsize);
if (PTR_ERR(buf_desc) == -ENOMEM)
break;
if (IS_ERR(buf_desc))
continue;
buf_desc->used = 1; buf_desc->used = 1;
write_lock_bh(lock); write_lock_bh(lock);
...@@ -584,7 +596,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) ...@@ -584,7 +596,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb)
break; /* found */ break; /* found */
} }
if (!buf_desc || !buf_desc->cpu_addr) if (IS_ERR(buf_desc))
return -ENOMEM; return -ENOMEM;
if (is_rmb) { if (is_rmb) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment