Commit 90d3839b authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

block: Use u64_stats_init() to initialize seqcounts

Now that seqcounts are lockdep enabled objects, we need to explicitly
initialize runtime allocated seqcounts so that lockdep can track them.

Without this patch, Fengguang was seeing:

  [    4.127282] INFO: trying to register non-static key.
  [    4.128027] the code is fine but needs lockdep annotation.
  [    4.128027] turning off the locking correctness validator.
  [    4.128027] CPU: 0 PID: 96 Comm: kworker/u4:1 Not tainted 3.12.0-next-20131108-10601-gbad570d #2
  [    4.128027] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
  [    ...     ]
  [    4.128027] Call Trace:
  [    4.128027]  [<7908e744>] ? console_unlock+0x353/0x380
  [    4.128027]  [<79dc7cf2>] dump_stack+0x48/0x60
  [    4.128027]  [<7908953e>] __lock_acquire.isra.26+0x7e3/0xceb
  [    4.128027]  [<7908a1c5>] lock_acquire+0x71/0x9a
  [    4.128027]  [<794079aa>] ? blk_throtl_bio+0x1c3/0x485
  [    4.128027]  [<7940658b>] throtl_update_dispatch_stats+0x7c/0x153
  [    4.128027]  [<794079aa>] ? blk_throtl_bio+0x1c3/0x485
  [    4.128027]  [<794079aa>] blk_throtl_bio+0x1c3/0x485
  ...

Use u64_stats_init() for all affected data structures, which initializes
the seqcount.
Reported-and-Tested-by: default avatarFengguang Wu <fengguang.wu@intel.com>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
[ Folded in another fix from the mailing list as well as a fix to that fix. Tweaked commit message. ]
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1384314134-6895-1-git-send-email-john.stultz@linaro.org
[ So I actually think that the two SOBs from PeterZ are the right depiction of the patch route. ]
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 5216d530
...@@ -402,6 +402,11 @@ struct request_list *__blk_queue_next_rl(struct request_list *rl, ...@@ -402,6 +402,11 @@ struct request_list *__blk_queue_next_rl(struct request_list *rl,
#define blk_queue_for_each_rl(rl, q) \ #define blk_queue_for_each_rl(rl, q) \
for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
static inline void blkg_stat_init(struct blkg_stat *stat)
{
u64_stats_init(&stat->syncp);
}
/** /**
* blkg_stat_add - add a value to a blkg_stat * blkg_stat_add - add a value to a blkg_stat
* @stat: target blkg_stat * @stat: target blkg_stat
...@@ -458,6 +463,11 @@ static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from) ...@@ -458,6 +463,11 @@ static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
blkg_stat_add(to, blkg_stat_read(from)); blkg_stat_add(to, blkg_stat_read(from));
} }
static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
{
u64_stats_init(&rwstat->syncp);
}
/** /**
* blkg_rwstat_add - add a value to a blkg_rwstat * blkg_rwstat_add - add a value to a blkg_rwstat
* @rwstat: target blkg_rwstat * @rwstat: target blkg_rwstat
......
...@@ -256,6 +256,12 @@ static struct throtl_data *sq_to_td(struct throtl_service_queue *sq) ...@@ -256,6 +256,12 @@ static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
} \ } \
} while (0) } while (0)
static void tg_stats_init(struct tg_stats_cpu *tg_stats)
{
blkg_rwstat_init(&tg_stats->service_bytes);
blkg_rwstat_init(&tg_stats->serviced);
}
/* /*
* Worker for allocating per cpu stat for tgs. This is scheduled on the * Worker for allocating per cpu stat for tgs. This is scheduled on the
* system_wq once there are some groups on the alloc_list waiting for * system_wq once there are some groups on the alloc_list waiting for
...@@ -269,12 +275,16 @@ static void tg_stats_alloc_fn(struct work_struct *work) ...@@ -269,12 +275,16 @@ static void tg_stats_alloc_fn(struct work_struct *work)
alloc_stats: alloc_stats:
if (!stats_cpu) { if (!stats_cpu) {
int cpu;
stats_cpu = alloc_percpu(struct tg_stats_cpu); stats_cpu = alloc_percpu(struct tg_stats_cpu);
if (!stats_cpu) { if (!stats_cpu) {
/* allocation failed, try again after some time */ /* allocation failed, try again after some time */
schedule_delayed_work(dwork, msecs_to_jiffies(10)); schedule_delayed_work(dwork, msecs_to_jiffies(10));
return; return;
} }
for_each_possible_cpu(cpu)
tg_stats_init(per_cpu_ptr(stats_cpu, cpu));
} }
spin_lock_irq(&tg_stats_alloc_lock); spin_lock_irq(&tg_stats_alloc_lock);
......
...@@ -1508,6 +1508,29 @@ static void cfq_init_cfqg_base(struct cfq_group *cfqg) ...@@ -1508,6 +1508,29 @@ static void cfq_init_cfqg_base(struct cfq_group *cfqg)
} }
#ifdef CONFIG_CFQ_GROUP_IOSCHED #ifdef CONFIG_CFQ_GROUP_IOSCHED
static void cfqg_stats_init(struct cfqg_stats *stats)
{
blkg_rwstat_init(&stats->service_bytes);
blkg_rwstat_init(&stats->serviced);
blkg_rwstat_init(&stats->merged);
blkg_rwstat_init(&stats->service_time);
blkg_rwstat_init(&stats->wait_time);
blkg_rwstat_init(&stats->queued);
blkg_stat_init(&stats->sectors);
blkg_stat_init(&stats->time);
#ifdef CONFIG_DEBUG_BLK_CGROUP
blkg_stat_init(&stats->unaccounted_time);
blkg_stat_init(&stats->avg_queue_size_sum);
blkg_stat_init(&stats->avg_queue_size_samples);
blkg_stat_init(&stats->dequeue);
blkg_stat_init(&stats->group_wait_time);
blkg_stat_init(&stats->idle_time);
blkg_stat_init(&stats->empty_time);
#endif
}
static void cfq_pd_init(struct blkcg_gq *blkg) static void cfq_pd_init(struct blkcg_gq *blkg)
{ {
struct cfq_group *cfqg = blkg_to_cfqg(blkg); struct cfq_group *cfqg = blkg_to_cfqg(blkg);
...@@ -1515,6 +1538,8 @@ static void cfq_pd_init(struct blkcg_gq *blkg) ...@@ -1515,6 +1538,8 @@ static void cfq_pd_init(struct blkcg_gq *blkg)
cfq_init_cfqg_base(cfqg); cfq_init_cfqg_base(cfqg);
cfqg->weight = blkg->blkcg->cfq_weight; cfqg->weight = blkg->blkcg->cfq_weight;
cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight; cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight;
cfqg_stats_init(&cfqg->stats);
cfqg_stats_init(&cfqg->dead_stats);
} }
static void cfq_pd_offline(struct blkcg_gq *blkg) static void cfq_pd_offline(struct blkcg_gq *blkg)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment