Commit 1e860444 authored by Andreas Gruenbacher's avatar Andreas Gruenbacher

gfs2: Remove and replace gfs2_glock_queue_work

There are no more callers of gfs2_glock_queue_work() left, so remove
that helper.  With that, we can now rename __gfs2_glock_queue_work()
back to gfs2_glock_queue_work() to get rid of some unnecessary clutter.
Signed-off-by: default avatarAndreas Gruenbacher <agruenba@redhat.com>
parent 9947a06d
......@@ -274,7 +274,7 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
* Enqueue the glock on the work queue. Passes one glock reference on to the
* work queue.
*/
static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
/*
* We are holding the lockref spinlock, and the work was still
......@@ -287,12 +287,6 @@ static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay)
}
}
static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
spin_lock(&gl->gl_lockref.lock);
__gfs2_glock_queue_work(gl, delay);
spin_unlock(&gl->gl_lockref.lock);
}
static void __gfs2_glock_put(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
......@@ -337,7 +331,8 @@ void gfs2_glock_put_async(struct gfs2_glock *gl)
if (lockref_put_or_lock(&gl->gl_lockref))
return;
__gfs2_glock_queue_work(gl, 0);
GLOCK_BUG_ON(gl, gl->gl_lockref.count != 1);
gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock);
}
......@@ -831,7 +826,7 @@ __acquires(&gl->gl_lockref.lock)
*/
clear_bit(GLF_LOCK, &gl->gl_flags);
clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
__gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
return;
} else {
clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
......@@ -861,7 +856,7 @@ __acquires(&gl->gl_lockref.lock)
/* Complete the operation now. */
finish_xmote(gl, target);
__gfs2_glock_queue_work(gl, 0);
gfs2_glock_queue_work(gl, 0);
}
/**
......@@ -909,7 +904,7 @@ __acquires(&gl->gl_lockref.lock)
clear_bit(GLF_LOCK, &gl->gl_flags);
smp_mb__after_atomic();
gl->gl_lockref.count++;
__gfs2_glock_queue_work(gl, 0);
gfs2_glock_queue_work(gl, 0);
return;
out_unlock:
......@@ -1141,12 +1136,12 @@ static void glock_work_func(struct work_struct *work)
drop_refs--;
if (gl->gl_name.ln_type != LM_TYPE_INODE)
delay = 0;
__gfs2_glock_queue_work(gl, delay);
gfs2_glock_queue_work(gl, delay);
}
/*
* Drop the remaining glock references manually here. (Mind that
* __gfs2_glock_queue_work depends on the lockref spinlock begin held
* gfs2_glock_queue_work depends on the lockref spinlock begin held
* here as well.)
*/
gl->gl_lockref.count -= drop_refs;
......@@ -1651,7 +1646,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
gl->gl_lockref.count++;
__gfs2_glock_queue_work(gl, 0);
gfs2_glock_queue_work(gl, 0);
}
run_queue(gl, 1);
spin_unlock(&gl->gl_lockref.lock);
......@@ -1717,7 +1712,7 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh)
!test_bit(GLF_DEMOTE, &gl->gl_flags) &&
gl->gl_name.ln_type == LM_TYPE_INODE)
delay = gl->gl_hold_time;
__gfs2_glock_queue_work(gl, delay);
gfs2_glock_queue_work(gl, delay);
}
}
......@@ -1941,7 +1936,7 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
delay = gl->gl_hold_time;
}
handle_callback(gl, state, delay, true);
__gfs2_glock_queue_work(gl, delay);
gfs2_glock_queue_work(gl, delay);
spin_unlock(&gl->gl_lockref.lock);
}
......@@ -2001,7 +1996,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
gl->gl_lockref.count++;
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
__gfs2_glock_queue_work(gl, 0);
gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock);
}
......@@ -2070,7 +2065,7 @@ __acquires(&lru_lock)
gl->gl_lockref.count++;
if (demote_ok(gl))
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
__gfs2_glock_queue_work(gl, 0);
gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock);
cond_resched_lock(&lru_lock);
}
......@@ -2194,7 +2189,7 @@ static void thaw_glock(struct gfs2_glock *gl)
spin_lock(&gl->gl_lockref.lock);
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
__gfs2_glock_queue_work(gl, 0);
gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock);
}
......@@ -2213,7 +2208,7 @@ static void clear_glock(struct gfs2_glock *gl)
gl->gl_lockref.count++;
if (gl->gl_state != LM_ST_UNLOCKED)
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
__gfs2_glock_queue_work(gl, 0);
gfs2_glock_queue_work(gl, 0);
}
spin_unlock(&gl->gl_lockref.lock);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment