Commit 26bb7505 authored by Steven Whitehouse's avatar Steven Whitehouse

GFS2: Fix glock refcount issues

This patch fixes some ref counting issues. Firstly by moving
the point at which we drop the ref count after a dlm lock
operation has completed we ensure that we never call
gfs2_glock_hold() on a lock with a zero ref count.

Secondly, by using atomic_dec_and_lock() in gfs2_glock_put()
we ensure that at no time will a glock with zero ref count
appear on the lru_list. That means that we can remove the
check for this in our shrinker (which was racy).
Signed-off-by: default avatarSteven Whitehouse <swhiteho@redhat.com>
parent c29cd900
......@@ -241,15 +241,14 @@ int gfs2_glock_put(struct gfs2_glock *gl)
int rv = 0;
write_lock(gl_lock_addr(gl->gl_hash));
if (atomic_dec_and_test(&gl->gl_ref)) {
if (atomic_dec_and_lock(&gl->gl_ref, &lru_lock)) {
hlist_del(&gl->gl_list);
write_unlock(gl_lock_addr(gl->gl_hash));
spin_lock(&lru_lock);
if (!list_empty(&gl->gl_lru)) {
list_del_init(&gl->gl_lru);
atomic_dec(&lru_count);
}
spin_unlock(&lru_lock);
write_unlock(gl_lock_addr(gl->gl_hash));
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
glock_free(gl);
rv = 1;
......@@ -513,7 +512,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
GLOCK_BUG_ON(gl, 1);
}
spin_unlock(&gl->gl_spin);
gfs2_glock_put(gl);
return;
}
......@@ -524,8 +522,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
if (glops->go_xmote_bh) {
spin_unlock(&gl->gl_spin);
rv = glops->go_xmote_bh(gl, gh);
if (rv == -EAGAIN)
return;
spin_lock(&gl->gl_spin);
if (rv) {
do_error(gl, rv);
......@@ -540,7 +536,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
clear_bit(GLF_LOCK, &gl->gl_flags);
out_locked:
spin_unlock(&gl->gl_spin);
gfs2_glock_put(gl);
}
static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
......@@ -600,7 +595,6 @@ __acquires(&gl->gl_spin)
if (!(ret & LM_OUT_ASYNC)) {
finish_xmote(gl, ret);
gfs2_glock_hold(gl);
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put(gl);
} else {
......@@ -712,9 +706,12 @@ static void glock_work_func(struct work_struct *work)
{
unsigned long delay = 0;
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
int drop_ref = 0;
if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags))
if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
finish_xmote(gl, gl->gl_reply);
drop_ref = 1;
}
down_read(&gfs2_umount_flush_sem);
spin_lock(&gl->gl_spin);
if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
......@@ -732,6 +729,8 @@ static void glock_work_func(struct work_struct *work)
if (!delay ||
queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
gfs2_glock_put(gl);
if (drop_ref)
gfs2_glock_put(gl);
}
/**
......@@ -1366,10 +1365,6 @@ static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask)
list_del_init(&gl->gl_lru);
atomic_dec(&lru_count);
/* Check if glock is about to be freed */
if (atomic_read(&gl->gl_ref) == 0)
continue;
/* Test for being demotable */
if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
gfs2_glock_hold(gl);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment