Commit 1b6dc1df authored by Jeff Layton's avatar Jeff Layton Committed by J. Bruce Fields

nfsd/sunrpc: factor svc_rqst allocation and freeing from sv_nrthreads refcounting

In later patches, we'll want to be able to allocate and free svc_rqst
structures without monkeying with the serv->sv_nrthreads refcount.

Factor those pieces out of their respective functions.
Signed-off-by: default avatarShirley Ma <shirley.ma@oracle.com>
Acked-by: default avatarJeff Layton <jlayton@primarydata.com>
Tested-by: default avatarShirley Ma <shirley.ma@oracle.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@redhat.com>
parent d70bc0c6
...@@ -458,8 +458,11 @@ void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net); ...@@ -458,8 +458,11 @@ void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
int svc_bind(struct svc_serv *serv, struct net *net); int svc_bind(struct svc_serv *serv, struct net *net);
struct svc_serv *svc_create(struct svc_program *, unsigned int, struct svc_serv *svc_create(struct svc_program *, unsigned int,
struct svc_serv_ops *); struct svc_serv_ops *);
struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
struct svc_pool *pool, int node);
struct svc_rqst *svc_prepare_thread(struct svc_serv *serv, struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
struct svc_pool *pool, int node); struct svc_pool *pool, int node);
void svc_rqst_free(struct svc_rqst *);
void svc_exit_thread(struct svc_rqst *); void svc_exit_thread(struct svc_rqst *);
unsigned int svc_pool_map_get(void); unsigned int svc_pool_map_get(void);
void svc_pool_map_put(void); void svc_pool_map_put(void);
......
...@@ -583,40 +583,52 @@ svc_release_buffer(struct svc_rqst *rqstp) ...@@ -583,40 +583,52 @@ svc_release_buffer(struct svc_rqst *rqstp)
} }
struct svc_rqst * struct svc_rqst *
svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
{ {
struct svc_rqst *rqstp; struct svc_rqst *rqstp;
rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node); rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
if (!rqstp) if (!rqstp)
goto out_enomem; return rqstp;
serv->sv_nrthreads++;
__set_bit(RQ_BUSY, &rqstp->rq_flags); __set_bit(RQ_BUSY, &rqstp->rq_flags);
spin_lock_init(&rqstp->rq_lock); spin_lock_init(&rqstp->rq_lock);
rqstp->rq_server = serv; rqstp->rq_server = serv;
rqstp->rq_pool = pool; rqstp->rq_pool = pool;
spin_lock_bh(&pool->sp_lock);
pool->sp_nrthreads++;
list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
spin_unlock_bh(&pool->sp_lock);
rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
if (!rqstp->rq_argp) if (!rqstp->rq_argp)
goto out_thread; goto out_enomem;
rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node); rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
if (!rqstp->rq_resp) if (!rqstp->rq_resp)
goto out_thread; goto out_enomem;
if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node)) if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
goto out_thread; goto out_enomem;
return rqstp; return rqstp;
out_thread:
svc_exit_thread(rqstp);
out_enomem: out_enomem:
return ERR_PTR(-ENOMEM); svc_rqst_free(rqstp);
return NULL;
}
EXPORT_SYMBOL_GPL(svc_rqst_alloc);
struct svc_rqst *
svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
{
struct svc_rqst *rqstp;
rqstp = svc_rqst_alloc(serv, pool, node);
if (!rqstp)
return ERR_PTR(-ENOMEM);
serv->sv_nrthreads++;
spin_lock_bh(&pool->sp_lock);
pool->sp_nrthreads++;
list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
spin_unlock_bh(&pool->sp_lock);
return rqstp;
} }
EXPORT_SYMBOL_GPL(svc_prepare_thread); EXPORT_SYMBOL_GPL(svc_prepare_thread);
...@@ -751,15 +763,21 @@ EXPORT_SYMBOL_GPL(svc_set_num_threads); ...@@ -751,15 +763,21 @@ EXPORT_SYMBOL_GPL(svc_set_num_threads);
* mutex" for the service. * mutex" for the service.
*/ */
void void
svc_exit_thread(struct svc_rqst *rqstp) svc_rqst_free(struct svc_rqst *rqstp)
{ {
struct svc_serv *serv = rqstp->rq_server;
struct svc_pool *pool = rqstp->rq_pool;
svc_release_buffer(rqstp); svc_release_buffer(rqstp);
kfree(rqstp->rq_resp); kfree(rqstp->rq_resp);
kfree(rqstp->rq_argp); kfree(rqstp->rq_argp);
kfree(rqstp->rq_auth_data); kfree(rqstp->rq_auth_data);
kfree_rcu(rqstp, rq_rcu_head);
}
EXPORT_SYMBOL_GPL(svc_rqst_free);
void
svc_exit_thread(struct svc_rqst *rqstp)
{
struct svc_serv *serv = rqstp->rq_server;
struct svc_pool *pool = rqstp->rq_pool;
spin_lock_bh(&pool->sp_lock); spin_lock_bh(&pool->sp_lock);
pool->sp_nrthreads--; pool->sp_nrthreads--;
...@@ -767,7 +785,7 @@ svc_exit_thread(struct svc_rqst *rqstp) ...@@ -767,7 +785,7 @@ svc_exit_thread(struct svc_rqst *rqstp)
list_del_rcu(&rqstp->rq_all); list_del_rcu(&rqstp->rq_all);
spin_unlock_bh(&pool->sp_lock); spin_unlock_bh(&pool->sp_lock);
kfree_rcu(rqstp, rq_rcu_head); svc_rqst_free(rqstp);
/* Release the server */ /* Release the server */
if (serv) if (serv)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment