Commit ee2be7d7 authored by Andreas Gruenbacher's avatar Andreas Gruenbacher

gfs2: Replace gfs2_glock_queue_put with gfs2_glock_put_async

Function gfs2_glock_queue_put() puts a glock reference by enqueuing
glock work instead of putting the reference directly.  This ensures that
the operation won't sleep, but it is costly and really only necessary
when putting the final glock reference.  Replace it with a new
gfs2_glock_put_async() function that only queues glock work when putting
the last glock reference.
Signed-off-by: default avatarAndreas Gruenbacher <agruenba@redhat.com>
parent f80d882e
......@@ -285,14 +285,6 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
}
/*
* Cause the glock to be put in work queue context.
*/
void gfs2_glock_queue_put(struct gfs2_glock *gl)
{
gfs2_glock_queue_work(gl, 0);
}
/**
* gfs2_glock_put() - Decrement reference count on glock
* @gl: The glock to put
......@@ -307,6 +299,22 @@ void gfs2_glock_put(struct gfs2_glock *gl)
__gfs2_glock_put(gl);
}
/*
* gfs2_glock_put_async - Decrement reference count without sleeping
* @gl: The glock to put
*
* Decrement the reference count on glock immediately unless it is the last
* reference. Defer putting the last reference to work queue context.
*/
void gfs2_glock_put_async(struct gfs2_glock *gl)
{
if (lockref_put_or_lock(&gl->gl_lockref))
return;
__gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock);
}
/**
* may_grant - check if it's ok to grant a new lock
* @gl: The glock
......@@ -2529,8 +2537,7 @@ static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
if (gl) {
if (n == 0)
return;
if (!lockref_put_not_zero(&gl->gl_lockref))
gfs2_glock_queue_put(gl);
gfs2_glock_put_async(gl);
}
for (;;) {
gl = rhashtable_walk_next(&gi->hti);
......
......@@ -172,7 +172,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
int create, struct gfs2_glock **glp);
struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl);
void gfs2_glock_put(struct gfs2_glock *gl);
void gfs2_glock_queue_put(struct gfs2_glock *gl);
void gfs2_glock_put_async(struct gfs2_glock *gl);
void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
u16 flags, struct gfs2_holder *gh,
......
......@@ -786,7 +786,7 @@ void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
{
if (atomic_dec_return(&gl->gl_revokes) == 0) {
clear_bit(GLF_LFLUSH, &gl->gl_flags);
gfs2_glock_queue_put(gl);
gfs2_glock_put_async(gl);
}
}
......
......@@ -1049,7 +1049,7 @@ static int gfs2_drop_inode(struct inode *inode)
gfs2_glock_hold(gl);
if (!gfs2_queue_try_to_evict(gl))
gfs2_glock_queue_put(gl);
gfs2_glock_put_async(gl);
return 0;
}
......@@ -1255,7 +1255,7 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
{
if (current->flags & PF_MEMALLOC)
gfs2_glock_queue_put(gl);
gfs2_glock_put_async(gl);
else
gfs2_glock_put(gl);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment