Commit 3042a2cc authored by Steven Whitehouse's avatar Steven Whitehouse

[GFS2] Reorder writeback for glock sync

Previously we were doing (write data, wait for data, write metadata, wait
for metadata). After this patch we so (write metadata, write data, wait for
data, wait for metadata) which should be more efficient.

Also I noticed that the drop_bh and xmote_bh functions were almost
identical. In fact the only difference was a single test, and that
test is such that in the drop_bh case, it would always evaluate to
the correct result. As such we can use the xmote_bh functions in
all the places where we were using the drop_bh function and remove
the drop_bh functions.
Signed-off-by: default avatarSteven Whitehouse <swhiteho@redhat.com>
parent 52d4c74b
...@@ -947,8 +947,8 @@ static void gfs2_glock_drop_th(struct gfs2_glock *gl) ...@@ -947,8 +947,8 @@ static void gfs2_glock_drop_th(struct gfs2_glock *gl)
const struct gfs2_glock_operations *glops = gl->gl_ops; const struct gfs2_glock_operations *glops = gl->gl_ops;
unsigned int ret; unsigned int ret;
if (glops->go_drop_th) if (glops->go_xmote_th)
glops->go_drop_th(gl); glops->go_xmote_th(gl);
gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags)); gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
gfs2_assert_warn(sdp, list_empty(&gl->gl_holders)); gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
...@@ -1252,12 +1252,11 @@ void gfs2_glock_dq(struct gfs2_holder *gh) ...@@ -1252,12 +1252,11 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
list_del_init(&gh->gh_list); list_del_init(&gh->gh_list);
if (list_empty(&gl->gl_holders)) { if (list_empty(&gl->gl_holders)) {
if (glops->go_unlock) {
spin_unlock(&gl->gl_spin); spin_unlock(&gl->gl_spin);
if (glops->go_unlock)
glops->go_unlock(gh); glops->go_unlock(gh);
spin_lock(&gl->gl_spin); spin_lock(&gl->gl_spin);
}
gl->gl_stamp = jiffies; gl->gl_stamp = jiffies;
} }
......
...@@ -138,43 +138,33 @@ static void meta_go_inval(struct gfs2_glock *gl, int flags) ...@@ -138,43 +138,33 @@ static void meta_go_inval(struct gfs2_glock *gl, int flags)
static void inode_go_sync(struct gfs2_glock *gl) static void inode_go_sync(struct gfs2_glock *gl)
{ {
struct gfs2_inode *ip = gl->gl_object; struct gfs2_inode *ip = gl->gl_object;
struct address_space *metamapping = gl->gl_aspace->i_mapping;
int error;
if (gl->gl_state != LM_ST_UNLOCKED)
gfs2_pte_inval(gl);
if (gl->gl_state != LM_ST_EXCLUSIVE)
return;
if (ip && !S_ISREG(ip->i_inode.i_mode)) if (ip && !S_ISREG(ip->i_inode.i_mode))
ip = NULL; ip = NULL;
if (test_bit(GLF_DIRTY, &gl->gl_flags)) { if (test_bit(GLF_DIRTY, &gl->gl_flags)) {
if (ip && !gfs2_is_jdata(ip))
filemap_fdatawrite(ip->i_inode.i_mapping);
gfs2_log_flush(gl->gl_sbd, gl); gfs2_log_flush(gl->gl_sbd, gl);
if (ip && gfs2_is_jdata(ip)) filemap_fdatawrite(metamapping);
filemap_fdatawrite(ip->i_inode.i_mapping);
gfs2_meta_sync(gl);
if (ip) { if (ip) {
struct address_space *mapping = ip->i_inode.i_mapping; struct address_space *mapping = ip->i_inode.i_mapping;
int error = filemap_fdatawait(mapping); filemap_fdatawrite(mapping);
error = filemap_fdatawait(mapping);
mapping_set_error(mapping, error); mapping_set_error(mapping, error);
} }
error = filemap_fdatawait(metamapping);
mapping_set_error(metamapping, error);
clear_bit(GLF_DIRTY, &gl->gl_flags); clear_bit(GLF_DIRTY, &gl->gl_flags);
gfs2_ail_empty_gl(gl); gfs2_ail_empty_gl(gl);
} }
} }
/**
* inode_go_xmote_th - promote/demote a glock
* @gl: the glock
* @state: the requested state
* @flags:
*
*/
static void inode_go_xmote_th(struct gfs2_glock *gl)
{
if (gl->gl_state != LM_ST_UNLOCKED)
gfs2_pte_inval(gl);
if (gl->gl_state == LM_ST_EXCLUSIVE)
inode_go_sync(gl);
}
/** /**
* inode_go_xmote_bh - After promoting/demoting a glock * inode_go_xmote_bh - After promoting/demoting a glock
* @gl: the glock * @gl: the glock
...@@ -195,22 +185,6 @@ static void inode_go_xmote_bh(struct gfs2_glock *gl) ...@@ -195,22 +185,6 @@ static void inode_go_xmote_bh(struct gfs2_glock *gl)
} }
} }
/**
* inode_go_drop_th - unlock a glock
* @gl: the glock
*
* Invoked from rq_demote().
* Another node needs the lock in EXCLUSIVE mode, or lock (unused for too long)
* is being purged from our node's glock cache; we're dropping lock.
*/
static void inode_go_drop_th(struct gfs2_glock *gl)
{
gfs2_pte_inval(gl);
if (gl->gl_state == LM_ST_EXCLUSIVE)
inode_go_sync(gl);
}
/** /**
* inode_go_inval - prepare a inode glock to be released * inode_go_inval - prepare a inode glock to be released
* @gl: the glock * @gl: the glock
...@@ -326,14 +300,14 @@ static void rgrp_go_unlock(struct gfs2_holder *gh) ...@@ -326,14 +300,14 @@ static void rgrp_go_unlock(struct gfs2_holder *gh)
} }
/** /**
* trans_go_xmote_th - promote/demote the transaction glock * trans_go_sync - promote/demote the transaction glock
* @gl: the glock * @gl: the glock
* @state: the requested state * @state: the requested state
* @flags: * @flags:
* *
*/ */
static void trans_go_xmote_th(struct gfs2_glock *gl) static void trans_go_sync(struct gfs2_glock *gl)
{ {
struct gfs2_sbd *sdp = gl->gl_sbd; struct gfs2_sbd *sdp = gl->gl_sbd;
...@@ -376,24 +350,6 @@ static void trans_go_xmote_bh(struct gfs2_glock *gl) ...@@ -376,24 +350,6 @@ static void trans_go_xmote_bh(struct gfs2_glock *gl)
} }
} }
/**
* trans_go_drop_th - unlock the transaction glock
* @gl: the glock
*
* We want to sync the device even with localcaching. Remember
* that localcaching journal replay only marks buffers dirty.
*/
static void trans_go_drop_th(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
gfs2_meta_syncfs(sdp);
gfs2_log_shutdown(sdp);
}
}
/** /**
* quota_go_demote_ok - Check to see if it's ok to unlock a quota glock * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock
* @gl: the glock * @gl: the glock
...@@ -408,14 +364,12 @@ static int quota_go_demote_ok(struct gfs2_glock *gl) ...@@ -408,14 +364,12 @@ static int quota_go_demote_ok(struct gfs2_glock *gl)
const struct gfs2_glock_operations gfs2_meta_glops = { const struct gfs2_glock_operations gfs2_meta_glops = {
.go_xmote_th = meta_go_sync, .go_xmote_th = meta_go_sync,
.go_drop_th = meta_go_sync,
.go_type = LM_TYPE_META, .go_type = LM_TYPE_META,
}; };
const struct gfs2_glock_operations gfs2_inode_glops = { const struct gfs2_glock_operations gfs2_inode_glops = {
.go_xmote_th = inode_go_xmote_th, .go_xmote_th = inode_go_sync,
.go_xmote_bh = inode_go_xmote_bh, .go_xmote_bh = inode_go_xmote_bh,
.go_drop_th = inode_go_drop_th,
.go_inval = inode_go_inval, .go_inval = inode_go_inval,
.go_demote_ok = inode_go_demote_ok, .go_demote_ok = inode_go_demote_ok,
.go_lock = inode_go_lock, .go_lock = inode_go_lock,
...@@ -425,7 +379,6 @@ const struct gfs2_glock_operations gfs2_inode_glops = { ...@@ -425,7 +379,6 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
const struct gfs2_glock_operations gfs2_rgrp_glops = { const struct gfs2_glock_operations gfs2_rgrp_glops = {
.go_xmote_th = meta_go_sync, .go_xmote_th = meta_go_sync,
.go_drop_th = meta_go_sync,
.go_inval = meta_go_inval, .go_inval = meta_go_inval,
.go_demote_ok = rgrp_go_demote_ok, .go_demote_ok = rgrp_go_demote_ok,
.go_lock = rgrp_go_lock, .go_lock = rgrp_go_lock,
...@@ -435,9 +388,8 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = { ...@@ -435,9 +388,8 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
}; };
const struct gfs2_glock_operations gfs2_trans_glops = { const struct gfs2_glock_operations gfs2_trans_glops = {
.go_xmote_th = trans_go_xmote_th, .go_xmote_th = trans_go_sync,
.go_xmote_bh = trans_go_xmote_bh, .go_xmote_bh = trans_go_xmote_bh,
.go_drop_th = trans_go_drop_th,
.go_type = LM_TYPE_NONDISK, .go_type = LM_TYPE_NONDISK,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment