Commit f689054a authored by Shakeel Butt's avatar Shakeel Butt Committed by Andrew Morton

percpu_counter: add percpu_counter_sum_all interface

The percpu_counter is used for scenarios where performance is more
important than the accuracy.  For percpu_counter users, who want more
accurate information in their slowpath, percpu_counter_sum is provided
which traverses all the online CPUs to accumulate the data.  The reason it
only needs to traverse online CPUs is because percpu_counter does
implement CPU offline callback which syncs the local data of the offlined
CPU.

However there is a small race window between the online CPUs traversal of
percpu_counter_sum and the CPU offline callback.  The offline callback has
to traverse all the percpu_counters on the system to flush the CPU local
data which can be a lot.  During that time, the CPU which is going offline
has already been published as offline to all the readers.  So, as the
offline callback is running, percpu_counter_sum can be called for one
counter which has some state on the CPU going offline.  Since
percpu_counter_sum only traverses online CPUs, it will skip that specific
CPU and the offline callback might not have flushed the state for that
specific percpu_counter on that offlined CPU.

Normally this is not an issue because percpu_counter users can deal with
some inaccuracy for small time window.  However a new user i.e.  mm_struct
on the cleanup path wants to check the exact state of the percpu_counter
through check_mm().  For such users, this patch introduces
percpu_counter_sum_all() which traverses all possible CPUs and it is used
in fork.c:check_mm() to avoid the potential race.

This issue is exposed by the later patch "mm: convert mm's rss stats into
percpu_counter".

Link: https://lkml.kernel.org/r/20221109012011.881058-1-shakeelb@google.comSigned-off-by: default avatarShakeel Butt <shakeelb@google.com>
Reported-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Tested-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent f1a79412
......@@ -45,6 +45,7 @@ void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
s32 batch);
s64 __percpu_counter_sum(struct percpu_counter *fbc);
s64 percpu_counter_sum_all(struct percpu_counter *fbc);
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
void percpu_counter_sync(struct percpu_counter *fbc);
......@@ -193,6 +194,11 @@ static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
return percpu_counter_read(fbc);
}
static inline s64 percpu_counter_sum_all(struct percpu_counter *fbc)
{
return percpu_counter_read(fbc);
}
static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
{
return true;
......
......@@ -755,6 +755,11 @@ static void check_mm(struct mm_struct *mm)
for (i = 0; i < NR_MM_COUNTERS; i++) {
long x = percpu_counter_sum(&mm->rss_stat[i]);
if (likely(!x))
continue;
/* Making sure this is not due to race with CPU offlining. */
x = percpu_counter_sum_all(&mm->rss_stat[i]);
if (unlikely(x))
pr_alert("BUG: Bad rss-counter state mm:%p type:%s val:%ld\n",
mm, resident_page_types[i], x);
......
......@@ -117,11 +117,8 @@ void percpu_counter_sync(struct percpu_counter *fbc)
}
EXPORT_SYMBOL(percpu_counter_sync);
/*
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive()
*/
s64 __percpu_counter_sum(struct percpu_counter *fbc)
static s64 __percpu_counter_sum_mask(struct percpu_counter *fbc,
const struct cpumask *cpu_mask)
{
s64 ret;
int cpu;
......@@ -129,15 +126,35 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
raw_spin_lock_irqsave(&fbc->lock, flags);
ret = fbc->count;
for_each_online_cpu(cpu) {
for_each_cpu(cpu, cpu_mask) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
raw_spin_unlock_irqrestore(&fbc->lock, flags);
return ret;
}
/*
* Add up all the per-cpu counts, return the result. This is a more accurate
* but much slower version of percpu_counter_read_positive()
*/
s64 __percpu_counter_sum(struct percpu_counter *fbc)
{
return __percpu_counter_sum_mask(fbc, cpu_online_mask);
}
EXPORT_SYMBOL(__percpu_counter_sum);
/*
* This is slower version of percpu_counter_sum as it traverses all possible
* cpus. Use this only in the cases where accurate data is needed in the
* presense of CPUs getting offlined.
*/
s64 percpu_counter_sum_all(struct percpu_counter *fbc)
{
return __percpu_counter_sum_mask(fbc, cpu_possible_mask);
}
EXPORT_SYMBOL(percpu_counter_sum_all);
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
struct lock_class_key *key)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment