Commit 71c1b213 authored by Andreas Gruenbacher's avatar Andreas Gruenbacher Committed by Bob Peterson

gfs2: gfs2_evict_inode: Put glocks asynchronously

gfs2_evict_inode is called to free inodes under memory pressure.  The
function calls into DLM when an inode's last cluster-wide reference goes
away (remote unlink) and to release the glock and associated DLM lock
before finally destroying the inode.  However, if DLM is blocked on
memory to become available, calling into DLM again will deadlock.

Avoid that by decoupling releasing glocks from destroying inodes in that
case: with gfs2_glock_queue_put, glocks will be dequeued asynchronously
in work queue context, when the associated inodes have likely already
been destroyed.

With this change, inodes can end up being unlinked, remote-unlink can be
triggered, and then the inode can be reallocated before all
remote-unlink callbacks are processed.  To detect that, revalidate the
link count in gfs2_evict_inode to make sure we're not deleting an
allocated, referenced inode.
Signed-off-by: default avatarAndreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: default avatarBob Peterson <rpeterso@redhat.com>
parent eebd2e81
...@@ -171,7 +171,7 @@ void gfs2_glock_free(struct gfs2_glock *gl) ...@@ -171,7 +171,7 @@ void gfs2_glock_free(struct gfs2_glock *gl)
* *
*/ */
static void gfs2_glock_hold(struct gfs2_glock *gl) void gfs2_glock_hold(struct gfs2_glock *gl)
{ {
GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref)); GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
lockref_get(&gl->gl_lockref); lockref_get(&gl->gl_lockref);
...@@ -264,6 +264,14 @@ static void __gfs2_glock_put(struct gfs2_glock *gl) ...@@ -264,6 +264,14 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
} }
/*
* Cause the glock to be put in work queue context.
*/
void gfs2_glock_queue_put(struct gfs2_glock *gl)
{
gfs2_glock_queue_work(gl, 0);
}
/** /**
* gfs2_glock_put() - Decrement reference count on glock * gfs2_glock_put() - Decrement reference count on glock
* @gl: The glock to put * @gl: The glock to put
......
...@@ -182,7 +182,9 @@ static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl) ...@@ -182,7 +182,9 @@ static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
const struct gfs2_glock_operations *glops, const struct gfs2_glock_operations *glops,
int create, struct gfs2_glock **glp); int create, struct gfs2_glock **glp);
extern void gfs2_glock_hold(struct gfs2_glock *gl);
extern void gfs2_glock_put(struct gfs2_glock *gl); extern void gfs2_glock_put(struct gfs2_glock *gl);
extern void gfs2_glock_queue_put(struct gfs2_glock *gl);
extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
u16 flags, struct gfs2_holder *gh); u16 flags, struct gfs2_holder *gh);
extern void gfs2_holder_reinit(unsigned int state, u16 flags, extern void gfs2_holder_reinit(unsigned int state, u16 flags,
......
...@@ -1501,6 +1501,22 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip) ...@@ -1501,6 +1501,22 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
return error; return error;
} }
/**
* gfs2_glock_put_eventually
* @gl: The glock to put
*
* When under memory pressure, trigger a deferred glock put to make sure we
* won't call into DLM and deadlock. Otherwise, put the glock directly.
*/
static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
{
if (current->flags & PF_MEMALLOC)
gfs2_glock_queue_put(gl);
else
gfs2_glock_put(gl);
}
/** /**
* gfs2_evict_inode - Remove an inode from cache * gfs2_evict_inode - Remove an inode from cache
* @inode: The inode to evict * @inode: The inode to evict
...@@ -1564,6 +1580,12 @@ static void gfs2_evict_inode(struct inode *inode) ...@@ -1564,6 +1580,12 @@ static void gfs2_evict_inode(struct inode *inode)
goto out_truncate; goto out_truncate;
} }
/*
* The inode may have been recreated in the meantime.
*/
if (inode->i_nlink)
goto out_truncate;
alloc_failed: alloc_failed:
if (gfs2_holder_initialized(&ip->i_iopen_gh) && if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) { test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
...@@ -1653,12 +1675,16 @@ static void gfs2_evict_inode(struct inode *inode) ...@@ -1653,12 +1675,16 @@ static void gfs2_evict_inode(struct inode *inode)
glock_clear_object(ip->i_gl, ip); glock_clear_object(ip->i_gl, ip);
wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE); wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
gfs2_glock_add_to_lru(ip->i_gl); gfs2_glock_add_to_lru(ip->i_gl);
gfs2_glock_put(ip->i_gl); gfs2_glock_put_eventually(ip->i_gl);
ip->i_gl = NULL; ip->i_gl = NULL;
if (gfs2_holder_initialized(&ip->i_iopen_gh)) { if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
glock_clear_object(ip->i_iopen_gh.gh_gl, ip); struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
glock_clear_object(gl, ip);
ip->i_iopen_gh.gh_flags |= GL_NOCACHE; ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_hold(gl);
gfs2_glock_dq_uninit(&ip->i_iopen_gh); gfs2_glock_dq_uninit(&ip->i_iopen_gh);
gfs2_glock_put_eventually(gl);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment