Commit 06efa5f3 authored by Kent Overstreet's avatar Kent Overstreet

closures: closure_get_not_zero(), closure_return_sync()

Provide new primitives for solving a lifetime issue with bcachefs
btree_trans objects.

closure_sync_return(): like closure_sync(), wait synchronously for any
outstanding gets. like closure_return, the closure is considered
"finished" and the ref left at 0.

closure_get_not_zero(): get a ref on a closure if it's alive, i.e. the
ref is not zero.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 18e92841
...@@ -284,6 +284,21 @@ static inline void closure_get(struct closure *cl) ...@@ -284,6 +284,21 @@ static inline void closure_get(struct closure *cl)
#endif #endif
} }
/**
* closure_get_not_zero
*/
static inline bool closure_get_not_zero(struct closure *cl)
{
unsigned old = atomic_read(&cl->remaining);
do {
if (!(old & CLOSURE_REMAINING_MASK))
return false;
} while (!atomic_try_cmpxchg_acquire(&cl->remaining, &old, old + 1));
return true;
}
/** /**
* closure_init - Initialize a closure, setting the refcount to 1 * closure_init - Initialize a closure, setting the refcount to 1
* @cl: closure to initialize * @cl: closure to initialize
...@@ -310,6 +325,12 @@ static inline void closure_init_stack(struct closure *cl) ...@@ -310,6 +325,12 @@ static inline void closure_init_stack(struct closure *cl)
atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER); atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
} }
static inline void closure_init_stack_release(struct closure *cl)
{
memset(cl, 0, sizeof(struct closure));
atomic_set_release(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
}
/** /**
* closure_wake_up - wake up all closures on a wait list, * closure_wake_up - wake up all closures on a wait list,
* with memory barrier * with memory barrier
...@@ -355,6 +376,8 @@ do { \ ...@@ -355,6 +376,8 @@ do { \
*/ */
#define closure_return(_cl) continue_at((_cl), NULL, NULL) #define closure_return(_cl) continue_at((_cl), NULL, NULL)
void closure_return_sync(struct closure *cl);
/** /**
* continue_at_nobarrier - jump to another function without barrier * continue_at_nobarrier - jump to another function without barrier
* *
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/sched/debug.h> #include <linux/sched/debug.h>
static inline void closure_put_after_sub(struct closure *cl, int flags) static inline void closure_put_after_sub_checks(int flags)
{ {
int r = flags & CLOSURE_REMAINING_MASK; int r = flags & CLOSURE_REMAINING_MASK;
...@@ -22,12 +22,17 @@ static inline void closure_put_after_sub(struct closure *cl, int flags) ...@@ -22,12 +22,17 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
flags & CLOSURE_GUARD_MASK, (unsigned) __fls(r))) flags & CLOSURE_GUARD_MASK, (unsigned) __fls(r)))
r &= ~CLOSURE_GUARD_MASK; r &= ~CLOSURE_GUARD_MASK;
if (!r) { WARN(!r && (flags & ~CLOSURE_DESTRUCTOR),
smp_acquire__after_ctrl_dep(); "closure ref hit 0 with incorrect flags set: %x (%u)",
flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags));
}
static inline void closure_put_after_sub(struct closure *cl, int flags)
{
closure_put_after_sub_checks(flags);
WARN(flags & ~CLOSURE_DESTRUCTOR, if (!(flags & CLOSURE_REMAINING_MASK)) {
"closure ref hit 0 with incorrect flags set: %x (%u)", smp_acquire__after_ctrl_dep();
flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags));
cl->closure_get_happened = false; cl->closure_get_happened = false;
...@@ -145,6 +150,41 @@ void __sched __closure_sync(struct closure *cl) ...@@ -145,6 +150,41 @@ void __sched __closure_sync(struct closure *cl)
} }
EXPORT_SYMBOL(__closure_sync); EXPORT_SYMBOL(__closure_sync);
/*
* closure_return_sync - finish running a closure, synchronously (i.e. waiting
* for outstanding get()s to finish) and returning once closure refcount is 0.
*
* Unlike closure_sync() this doesn't reinit the ref to 1; subsequent
* closure_get_not_zero() calls waill fail.
*/
void __sched closure_return_sync(struct closure *cl)
{
struct closure_syncer s = { .task = current };
cl->s = &s;
set_closure_fn(cl, closure_sync_fn, NULL);
unsigned flags = atomic_sub_return_release(1 + CLOSURE_RUNNING - CLOSURE_DESTRUCTOR,
&cl->remaining);
closure_put_after_sub_checks(flags);
if (unlikely(flags & CLOSURE_REMAINING_MASK)) {
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (s.done)
break;
schedule();
}
__set_current_state(TASK_RUNNING);
}
if (cl->parent)
closure_put(cl->parent);
}
EXPORT_SYMBOL(closure_return_sync);
int __sched __closure_sync_timeout(struct closure *cl, unsigned long timeout) int __sched __closure_sync_timeout(struct closure *cl, unsigned long timeout)
{ {
struct closure_syncer s = { .task = current }; struct closure_syncer s = { .task = current };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment