Commit d98779e6 authored by Andreas Gruenbacher's avatar Andreas Gruenbacher

gfs2: Fix potential glock use-after-free on unmount

When a DLM lockspace is released and there ares still locks in that
lockspace, DLM will unlock those locks automatically.  Commit
fb6791d1 started exploiting this behavior to speed up filesystem
unmount: gfs2 would simply free glocks it didn't want to unlock and then
release the lockspace.  This didn't take the bast callbacks for
asynchronous lock contention notifications into account, which remain
active until until a lock is unlocked or its lockspace is released.

To prevent those callbacks from accessing deallocated objects, put the
glocks that should not be unlocked on the sd_dead_glocks list, release
the lockspace, and only then free those glocks.

As an additional measure, ignore unexpected ast and bast callbacks if
the receiving glock is dead.

Fixes: fb6791d1 ("GFS2: skip dlm_unlock calls in unmount")
Signed-off-by: default avatarAndreas Gruenbacher <agruenba@redhat.com>
Cc: David Teigland <teigland@redhat.com>
parent 59f60005
...@@ -166,18 +166,45 @@ static bool glock_blocked_by_withdraw(struct gfs2_glock *gl) ...@@ -166,18 +166,45 @@ static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
return true; return true;
} }
void gfs2_glock_free(struct gfs2_glock *gl) static void __gfs2_glock_free(struct gfs2_glock *gl)
{ {
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
smp_mb(); smp_mb();
wake_up_glock(gl); wake_up_glock(gl);
call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
}
void gfs2_glock_free(struct gfs2_glock *gl) {
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
__gfs2_glock_free(gl);
if (atomic_dec_and_test(&sdp->sd_glock_disposal)) if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_kill_wait); wake_up(&sdp->sd_kill_wait);
} }
void gfs2_glock_free_later(struct gfs2_glock *gl) {
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
spin_lock(&lru_lock);
list_add(&gl->gl_lru, &sdp->sd_dead_glocks);
spin_unlock(&lru_lock);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_kill_wait);
}
static void gfs2_free_dead_glocks(struct gfs2_sbd *sdp)
{
struct list_head *list = &sdp->sd_dead_glocks;
while(!list_empty(list)) {
struct gfs2_glock *gl;
gl = list_first_entry(list, struct gfs2_glock, gl_lru);
list_del_init(&gl->gl_lru);
__gfs2_glock_free(gl);
}
}
/** /**
* gfs2_glock_hold() - increment reference count on glock * gfs2_glock_hold() - increment reference count on glock
* @gl: The glock to hold * @gl: The glock to hold
...@@ -2233,6 +2260,8 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) ...@@ -2233,6 +2260,8 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
wait_event_timeout(sdp->sd_kill_wait, wait_event_timeout(sdp->sd_kill_wait,
atomic_read(&sdp->sd_glock_disposal) == 0, atomic_read(&sdp->sd_glock_disposal) == 0,
HZ * 600); HZ * 600);
gfs2_lm_unmount(sdp);
gfs2_free_dead_glocks(sdp);
glock_hash_walk(dump_glock_func, sdp); glock_hash_walk(dump_glock_func, sdp);
} }
......
...@@ -252,6 +252,7 @@ void gfs2_gl_dq_holders(struct gfs2_sbd *sdp); ...@@ -252,6 +252,7 @@ void gfs2_gl_dq_holders(struct gfs2_sbd *sdp);
void gfs2_glock_thaw(struct gfs2_sbd *sdp); void gfs2_glock_thaw(struct gfs2_sbd *sdp);
void gfs2_glock_add_to_lru(struct gfs2_glock *gl); void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
void gfs2_glock_free(struct gfs2_glock *gl); void gfs2_glock_free(struct gfs2_glock *gl);
void gfs2_glock_free_later(struct gfs2_glock *gl);
int __init gfs2_glock_init(void); int __init gfs2_glock_init(void);
void gfs2_glock_exit(void); void gfs2_glock_exit(void);
......
...@@ -838,6 +838,7 @@ struct gfs2_sbd { ...@@ -838,6 +838,7 @@ struct gfs2_sbd {
/* For quiescing the filesystem */ /* For quiescing the filesystem */
struct gfs2_holder sd_freeze_gh; struct gfs2_holder sd_freeze_gh;
struct mutex sd_freeze_mutex; struct mutex sd_freeze_mutex;
struct list_head sd_dead_glocks;
char sd_fsname[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2]; char sd_fsname[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2];
char sd_table_name[GFS2_FSNAME_LEN]; char sd_table_name[GFS2_FSNAME_LEN];
......
...@@ -121,6 +121,11 @@ static void gdlm_ast(void *arg) ...@@ -121,6 +121,11 @@ static void gdlm_ast(void *arg)
struct gfs2_glock *gl = arg; struct gfs2_glock *gl = arg;
unsigned ret = gl->gl_state; unsigned ret = gl->gl_state;
/* If the glock is dead, we only react to a dlm_unlock() reply. */
if (__lockref_is_dead(&gl->gl_lockref) &&
gl->gl_lksb.sb_status != -DLM_EUNLOCK)
return;
gfs2_update_reply_times(gl); gfs2_update_reply_times(gl);
BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED); BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
...@@ -171,6 +176,9 @@ static void gdlm_bast(void *arg, int mode) ...@@ -171,6 +176,9 @@ static void gdlm_bast(void *arg, int mode)
{ {
struct gfs2_glock *gl = arg; struct gfs2_glock *gl = arg;
if (__lockref_is_dead(&gl->gl_lockref))
return;
switch (mode) { switch (mode) {
case DLM_LOCK_EX: case DLM_LOCK_EX:
gfs2_glock_cb(gl, LM_ST_UNLOCKED); gfs2_glock_cb(gl, LM_ST_UNLOCKED);
...@@ -291,8 +299,12 @@ static void gdlm_put_lock(struct gfs2_glock *gl) ...@@ -291,8 +299,12 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
struct lm_lockstruct *ls = &sdp->sd_lockstruct; struct lm_lockstruct *ls = &sdp->sd_lockstruct;
int error; int error;
if (gl->gl_lksb.sb_lkid == 0) BUG_ON(!__lockref_is_dead(&gl->gl_lockref));
goto out_free;
if (gl->gl_lksb.sb_lkid == 0) {
gfs2_glock_free(gl);
return;
}
clear_bit(GLF_BLOCKING, &gl->gl_flags); clear_bit(GLF_BLOCKING, &gl->gl_flags);
gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
...@@ -300,13 +312,17 @@ static void gdlm_put_lock(struct gfs2_glock *gl) ...@@ -300,13 +312,17 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
gfs2_update_request_times(gl); gfs2_update_request_times(gl);
/* don't want to call dlm if we've unmounted the lock protocol */ /* don't want to call dlm if we've unmounted the lock protocol */
if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
goto out_free; gfs2_glock_free(gl);
return;
}
/* don't want to skip dlm_unlock writing the lvb when lock has one */ /* don't want to skip dlm_unlock writing the lvb when lock has one */
if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
!gl->gl_lksb.sb_lvbptr) !gl->gl_lksb.sb_lvbptr) {
goto out_free; gfs2_glock_free_later(gl);
return;
}
again: again:
error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
...@@ -321,10 +337,6 @@ static void gdlm_put_lock(struct gfs2_glock *gl) ...@@ -321,10 +337,6 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
gl->gl_name.ln_type, gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number, error); (unsigned long long)gl->gl_name.ln_number, error);
} }
return;
out_free:
gfs2_glock_free(gl);
} }
static void gdlm_cancel(struct gfs2_glock *gl) static void gdlm_cancel(struct gfs2_glock *gl)
......
...@@ -136,6 +136,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) ...@@ -136,6 +136,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
atomic_set(&sdp->sd_log_in_flight, 0); atomic_set(&sdp->sd_log_in_flight, 0);
init_waitqueue_head(&sdp->sd_log_flush_wait); init_waitqueue_head(&sdp->sd_log_flush_wait);
mutex_init(&sdp->sd_freeze_mutex); mutex_init(&sdp->sd_freeze_mutex);
INIT_LIST_HEAD(&sdp->sd_dead_glocks);
return sdp; return sdp;
......
...@@ -650,10 +650,7 @@ static void gfs2_put_super(struct super_block *sb) ...@@ -650,10 +650,7 @@ static void gfs2_put_super(struct super_block *sb)
gfs2_gl_hash_clear(sdp); gfs2_gl_hash_clear(sdp);
truncate_inode_pages_final(&sdp->sd_aspace); truncate_inode_pages_final(&sdp->sd_aspace);
gfs2_delete_debugfs_file(sdp); gfs2_delete_debugfs_file(sdp);
/* Unmount the locking protocol */
gfs2_lm_unmount(sdp);
/* At this point, we're through participating in the lockspace */
gfs2_sys_fs_del(sdp); gfs2_sys_fs_del(sdp);
free_sbd(sdp); free_sbd(sdp);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment