Commit 7286b31e authored by Steven Whitehouse's avatar Steven Whitehouse

GFS2: Take glock reference in examine_bucket()

We need to check the glock ref counter in a race free way
in order to ensure that the gfs2_glock_hold() call will
succeed. The easiest way to do that is to simply take the
reference count early in the common code of examine_bucket,
skipping any glocks with zero ref count.

That means that the examiner functions all need to put their
reference on the glock once they've performed their function.
Signed-off-by: default avatarSteven Whitehouse <swhiteho@redhat.com>
Reported-by: default avatarDavid Teigland <teigland@redhat.com>
Tested-by: default avatarDavid Teigland <teigland@redhat.com>
parent 7c0ef28a
...@@ -1488,7 +1488,7 @@ static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp, ...@@ -1488,7 +1488,7 @@ static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
rcu_read_lock(); rcu_read_lock();
hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) { hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref)) if ((gl->gl_sbd == sdp) && atomic_inc_not_zero(&gl->gl_ref))
examiner(gl); examiner(gl);
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -1508,18 +1508,17 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp) ...@@ -1508,18 +1508,17 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
* thaw_glock - thaw out a glock which has an unprocessed reply waiting * thaw_glock - thaw out a glock which has an unprocessed reply waiting
* @gl: The glock to thaw * @gl: The glock to thaw
* *
* N.B. When we freeze a glock, we leave a ref to the glock outstanding,
* so this has to result in the ref count being dropped by one.
*/ */
static void thaw_glock(struct gfs2_glock *gl) static void thaw_glock(struct gfs2_glock *gl)
{ {
if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
return; goto out;
set_bit(GLF_REPLY_PENDING, &gl->gl_flags); set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
gfs2_glock_hold(gl); if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) {
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) out:
gfs2_glock_put(gl); gfs2_glock_put(gl);
}
} }
/** /**
...@@ -1536,7 +1535,6 @@ static void clear_glock(struct gfs2_glock *gl) ...@@ -1536,7 +1535,6 @@ static void clear_glock(struct gfs2_glock *gl)
if (gl->gl_state != LM_ST_UNLOCKED) if (gl->gl_state != LM_ST_UNLOCKED)
handle_callback(gl, LM_ST_UNLOCKED, 0, false); handle_callback(gl, LM_ST_UNLOCKED, 0, false);
spin_unlock(&gl->gl_spin); spin_unlock(&gl->gl_spin);
gfs2_glock_hold(gl);
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put(gl); gfs2_glock_put(gl);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment