Commit 7d9ab9b6 authored by Roman Gushchin's avatar Roman Gushchin Committed by Dennis Zhou

percpu_ref: release percpu memory early without PERCPU_REF_ALLOW_REINIT

Release percpu memory after finishing the switch to the atomic mode
if only PERCPU_REF_ALLOW_REINIT isn't set.
Signed-off-by: default avatarRoman Gushchin <guro@fb.com>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarDennis Zhou <dennis@kernel.org>
parent ddde2af7
...@@ -102,6 +102,7 @@ struct percpu_ref { ...@@ -102,6 +102,7 @@ struct percpu_ref {
percpu_ref_func_t *release; percpu_ref_func_t *release;
percpu_ref_func_t *confirm_switch; percpu_ref_func_t *confirm_switch;
bool force_atomic:1; bool force_atomic:1;
bool allow_reinit:1;
struct rcu_head rcu; struct rcu_head rcu;
}; };
......
...@@ -69,11 +69,14 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, ...@@ -69,11 +69,14 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
return -ENOMEM; return -ENOMEM;
ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC; ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
ref->allow_reinit = flags & PERCPU_REF_ALLOW_REINIT;
if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) {
ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
else ref->allow_reinit = true;
} else {
start_count += PERCPU_COUNT_BIAS; start_count += PERCPU_COUNT_BIAS;
}
if (flags & PERCPU_REF_INIT_DEAD) if (flags & PERCPU_REF_INIT_DEAD)
ref->percpu_count_ptr |= __PERCPU_REF_DEAD; ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
...@@ -119,6 +122,9 @@ static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu) ...@@ -119,6 +122,9 @@ static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu)
ref->confirm_switch = NULL; ref->confirm_switch = NULL;
wake_up_all(&percpu_ref_switch_waitq); wake_up_all(&percpu_ref_switch_waitq);
if (!ref->allow_reinit)
percpu_ref_exit(ref);
/* drop ref from percpu_ref_switch_to_atomic() */ /* drop ref from percpu_ref_switch_to_atomic() */
percpu_ref_put(ref); percpu_ref_put(ref);
} }
...@@ -194,6 +200,9 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) ...@@ -194,6 +200,9 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
return; return;
if (WARN_ON_ONCE(!ref->allow_reinit))
return;
atomic_long_add(PERCPU_COUNT_BIAS, &ref->count); atomic_long_add(PERCPU_COUNT_BIAS, &ref->count);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment