Commit 8148f0b5 authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

blk-stat: Optimise blk_stat_add()

blk_stat_add() calls {get,put}_cpu_ptr() in a loop, which entails
overhead of disabling/enabling preemption. The loop is under RCU
(i.e.short) anyway, so do get_cpu() in advance.
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent a2e80f6f
...@@ -53,7 +53,7 @@ void blk_stat_add(struct request *rq, u64 now) ...@@ -53,7 +53,7 @@ void blk_stat_add(struct request *rq, u64 now)
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct blk_stat_callback *cb; struct blk_stat_callback *cb;
struct blk_rq_stat *stat; struct blk_rq_stat *stat;
int bucket; int bucket, cpu;
u64 value; u64 value;
value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0; value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
...@@ -61,6 +61,7 @@ void blk_stat_add(struct request *rq, u64 now) ...@@ -61,6 +61,7 @@ void blk_stat_add(struct request *rq, u64 now)
blk_throtl_stat_add(rq, value); blk_throtl_stat_add(rq, value);
rcu_read_lock(); rcu_read_lock();
cpu = get_cpu();
list_for_each_entry_rcu(cb, &q->stats->callbacks, list) { list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
if (!blk_stat_is_active(cb)) if (!blk_stat_is_active(cb))
continue; continue;
...@@ -69,10 +70,10 @@ void blk_stat_add(struct request *rq, u64 now) ...@@ -69,10 +70,10 @@ void blk_stat_add(struct request *rq, u64 now)
if (bucket < 0) if (bucket < 0)
continue; continue;
stat = &get_cpu_ptr(cb->cpu_stat)[bucket]; stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
blk_rq_stat_add(stat, value); blk_rq_stat_add(stat, value);
put_cpu_ptr(cb->cpu_stat);
} }
put_cpu();
rcu_read_unlock(); rcu_read_unlock();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment