Commit ccf08bed authored by Chuck Lever's avatar Chuck Lever

SUNRPC: Replace pool stats with per-CPU variables

Eliminate the use of bus-locked operations in svc_xprt_enqueue(),
which is a hot path. Replace them with per-cpu variables to reduce
cross-CPU memory bus traffic.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
parent 65ba3d24
...@@ -21,14 +21,6 @@ ...@@ -21,14 +21,6 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/pagevec.h> #include <linux/pagevec.h>
/* statistics for svc_pool structures */
struct svc_pool_stats {
atomic_long_t packets;
unsigned long sockets_queued;
atomic_long_t threads_woken;
atomic_long_t threads_timedout;
};
/* /*
* *
* RPC service thread pool. * RPC service thread pool.
...@@ -45,7 +37,12 @@ struct svc_pool { ...@@ -45,7 +37,12 @@ struct svc_pool {
struct list_head sp_sockets; /* pending sockets */ struct list_head sp_sockets; /* pending sockets */
unsigned int sp_nrthreads; /* # of threads in pool */ unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */ struct list_head sp_all_threads; /* all server threads */
struct svc_pool_stats sp_stats; /* statistics on pool operation */
/* statistics on pool operation */
struct percpu_counter sp_sockets_queued;
struct percpu_counter sp_threads_woken;
struct percpu_counter sp_threads_timedout;
#define SP_TASK_PENDING (0) /* still work to do even if no #define SP_TASK_PENDING (0) /* still work to do even if no
* xprt is queued. */ * xprt is queued. */
#define SP_CONGESTED (1) #define SP_CONGESTED (1)
......
...@@ -512,6 +512,10 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, ...@@ -512,6 +512,10 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
INIT_LIST_HEAD(&pool->sp_sockets); INIT_LIST_HEAD(&pool->sp_sockets);
INIT_LIST_HEAD(&pool->sp_all_threads); INIT_LIST_HEAD(&pool->sp_all_threads);
spin_lock_init(&pool->sp_lock); spin_lock_init(&pool->sp_lock);
percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL);
percpu_counter_init(&pool->sp_threads_woken, 0, GFP_KERNEL);
percpu_counter_init(&pool->sp_threads_timedout, 0, GFP_KERNEL);
} }
return serv; return serv;
...@@ -565,6 +569,7 @@ void ...@@ -565,6 +569,7 @@ void
svc_destroy(struct kref *ref) svc_destroy(struct kref *ref)
{ {
struct svc_serv *serv = container_of(ref, struct svc_serv, sv_refcnt); struct svc_serv *serv = container_of(ref, struct svc_serv, sv_refcnt);
unsigned int i;
dprintk("svc: svc_destroy(%s)\n", serv->sv_program->pg_name); dprintk("svc: svc_destroy(%s)\n", serv->sv_program->pg_name);
timer_shutdown_sync(&serv->sv_temptimer); timer_shutdown_sync(&serv->sv_temptimer);
...@@ -580,6 +585,13 @@ svc_destroy(struct kref *ref) ...@@ -580,6 +585,13 @@ svc_destroy(struct kref *ref)
svc_pool_map_put(serv->sv_nrpools); svc_pool_map_put(serv->sv_nrpools);
for (i = 0; i < serv->sv_nrpools; i++) {
struct svc_pool *pool = &serv->sv_pools[i];
percpu_counter_destroy(&pool->sp_sockets_queued);
percpu_counter_destroy(&pool->sp_threads_woken);
percpu_counter_destroy(&pool->sp_threads_timedout);
}
kfree(serv->sv_pools); kfree(serv->sv_pools);
kfree(serv); kfree(serv);
} }
......
...@@ -462,11 +462,9 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) ...@@ -462,11 +462,9 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
pool = svc_pool_for_cpu(xprt->xpt_server); pool = svc_pool_for_cpu(xprt->xpt_server);
atomic_long_inc(&pool->sp_stats.packets); percpu_counter_inc(&pool->sp_sockets_queued);
spin_lock_bh(&pool->sp_lock); spin_lock_bh(&pool->sp_lock);
list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
pool->sp_stats.sockets_queued++;
spin_unlock_bh(&pool->sp_lock); spin_unlock_bh(&pool->sp_lock);
/* find a thread for this xprt */ /* find a thread for this xprt */
...@@ -474,7 +472,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) ...@@ -474,7 +472,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags))
continue; continue;
atomic_long_inc(&pool->sp_stats.threads_woken); percpu_counter_inc(&pool->sp_threads_woken);
rqstp->rq_qtime = ktime_get(); rqstp->rq_qtime = ktime_get();
wake_up_process(rqstp->rq_task); wake_up_process(rqstp->rq_task);
goto out_unlock; goto out_unlock;
...@@ -769,7 +767,7 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) ...@@ -769,7 +767,7 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
goto out_found; goto out_found;
if (!time_left) if (!time_left)
atomic_long_inc(&pool->sp_stats.threads_timedout); percpu_counter_inc(&pool->sp_threads_timedout);
if (signalled() || kthread_should_stop()) if (signalled() || kthread_should_stop())
return ERR_PTR(-EINTR); return ERR_PTR(-EINTR);
...@@ -1440,12 +1438,12 @@ static int svc_pool_stats_show(struct seq_file *m, void *p) ...@@ -1440,12 +1438,12 @@ static int svc_pool_stats_show(struct seq_file *m, void *p)
return 0; return 0;
} }
seq_printf(m, "%u %lu %lu %lu %lu\n", seq_printf(m, "%u %llu %llu %llu %llu\n",
pool->sp_id, pool->sp_id,
(unsigned long)atomic_long_read(&pool->sp_stats.packets), percpu_counter_sum_positive(&pool->sp_sockets_queued),
pool->sp_stats.sockets_queued, percpu_counter_sum_positive(&pool->sp_sockets_queued),
(unsigned long)atomic_long_read(&pool->sp_stats.threads_woken), percpu_counter_sum_positive(&pool->sp_threads_woken),
(unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout)); percpu_counter_sum_positive(&pool->sp_threads_timedout));
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment