Commit 8ad151c2 authored by Steven Whitehouse's avatar Steven Whitehouse

GFS2: Only run logd and quota when mounted read/write

While investigating a rather strange bit of code in the quota
clean up function, I spotted that the reason for its existence
was that when remounting read only, we were not stopping the
quotad thread, and thus it was possible for it to still have
a reference to some of the quotas in that case.

This patch moves the logd and quota thread start and stop into
the make_fs_rw/ro functions, so that we now stop those threads
when mounted read only.

This means that quotad will always be stopped before we call
the quota clean up function, and we can thus dispose of the
(rather hackish) code that waits for it to give up its
reference on the quotas.
Signed-off-by: default avatarSteven Whitehouse <swhiteho@redhat.com>
Cc: Abhijith Das <adas@redhat.com>
parent c754fbbb
...@@ -969,40 +969,6 @@ static int init_per_node(struct gfs2_sbd *sdp, int undo) ...@@ -969,40 +969,6 @@ static int init_per_node(struct gfs2_sbd *sdp, int undo)
return error; return error;
} }
static int init_threads(struct gfs2_sbd *sdp, int undo)
{
struct task_struct *p;
int error = 0;
if (undo)
goto fail_quotad;
p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
if (IS_ERR(p)) {
error = PTR_ERR(p);
fs_err(sdp, "can't start logd thread: %d\n", error);
return error;
}
sdp->sd_logd_process = p;
p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
if (IS_ERR(p)) {
error = PTR_ERR(p);
fs_err(sdp, "can't start quotad thread: %d\n", error);
goto fail;
}
sdp->sd_quotad_process = p;
return 0;
fail_quotad:
kthread_stop(sdp->sd_quotad_process);
fail:
kthread_stop(sdp->sd_logd_process);
return error;
}
static const match_table_t nolock_tokens = { static const match_table_t nolock_tokens = {
{ Opt_jid, "jid=%d\n", }, { Opt_jid, "jid=%d\n", },
{ Opt_err, NULL }, { Opt_err, NULL },
...@@ -1267,15 +1233,11 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent ...@@ -1267,15 +1233,11 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
goto fail_per_node; goto fail_per_node;
} }
error = init_threads(sdp, DO);
if (error)
goto fail_per_node;
if (!(sb->s_flags & MS_RDONLY)) { if (!(sb->s_flags & MS_RDONLY)) {
error = gfs2_make_fs_rw(sdp); error = gfs2_make_fs_rw(sdp);
if (error) { if (error) {
fs_err(sdp, "can't make FS RW: %d\n", error); fs_err(sdp, "can't make FS RW: %d\n", error);
goto fail_threads; goto fail_per_node;
} }
} }
...@@ -1283,8 +1245,6 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent ...@@ -1283,8 +1245,6 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
gfs2_online_uevent(sdp); gfs2_online_uevent(sdp);
return 0; return 0;
fail_threads:
init_threads(sdp, UNDO);
fail_per_node: fail_per_node:
init_per_node(sdp, UNDO); init_per_node(sdp, UNDO);
fail_inodes: fail_inodes:
......
...@@ -1376,23 +1376,6 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp) ...@@ -1376,23 +1376,6 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
while (!list_empty(head)) { while (!list_empty(head)) {
qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
/*
* To be removed in due course... we should be able to
* ensure that all refs to the qd have done by this point
* so that this rather odd test is not required
*/
spin_lock(&qd->qd_lockref.lock);
if (qd->qd_lockref.count > 1 ||
(qd->qd_lockref.count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
spin_unlock(&qd->qd_lockref.lock);
list_move(&qd->qd_list, head);
spin_unlock(&qd_lock);
schedule();
spin_lock(&qd_lock);
continue;
}
spin_unlock(&qd->qd_lockref.lock);
list_del(&qd->qd_list); list_del(&qd->qd_list);
/* Also remove if this qd exists in the reclaim list */ /* Also remove if this qd exists in the reclaim list */
...@@ -1404,11 +1387,8 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp) ...@@ -1404,11 +1387,8 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
hlist_bl_del_rcu(&qd->qd_hlist); hlist_bl_del_rcu(&qd->qd_hlist);
spin_unlock_bucket(qd->qd_hash); spin_unlock_bucket(qd->qd_hash);
if (!qd->qd_lockref.count) { gfs2_assert_warn(sdp, !qd->qd_change);
gfs2_assert_warn(sdp, !qd->qd_change); gfs2_assert_warn(sdp, !qd->qd_slot_count);
gfs2_assert_warn(sdp, !qd->qd_slot_count);
} else
gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
gfs2_assert_warn(sdp, !qd->qd_bh_count); gfs2_assert_warn(sdp, !qd->qd_bh_count);
gfs2_glock_put(qd->qd_gl); gfs2_glock_put(qd->qd_gl);
......
...@@ -369,6 +369,33 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd) ...@@ -369,6 +369,33 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd)
return 0; return 0;
} }
static int init_threads(struct gfs2_sbd *sdp)
{
struct task_struct *p;
int error = 0;
p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
if (IS_ERR(p)) {
error = PTR_ERR(p);
fs_err(sdp, "can't start logd thread: %d\n", error);
return error;
}
sdp->sd_logd_process = p;
p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
if (IS_ERR(p)) {
error = PTR_ERR(p);
fs_err(sdp, "can't start quotad thread: %d\n", error);
goto fail;
}
sdp->sd_quotad_process = p;
return 0;
fail:
kthread_stop(sdp->sd_logd_process);
return error;
}
/** /**
* gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
* @sdp: the filesystem * @sdp: the filesystem
...@@ -384,10 +411,14 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp) ...@@ -384,10 +411,14 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
struct gfs2_log_header_host head; struct gfs2_log_header_host head;
int error; int error;
error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &t_gh); error = init_threads(sdp);
if (error) if (error)
return error; return error;
error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED, 0, &t_gh);
if (error)
goto fail_threads;
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
error = gfs2_find_jhead(sdp->sd_jdesc, &head); error = gfs2_find_jhead(sdp->sd_jdesc, &head);
...@@ -417,7 +448,9 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp) ...@@ -417,7 +448,9 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
fail: fail:
t_gh.gh_flags |= GL_NOCACHE; t_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_uninit(&t_gh); gfs2_glock_dq_uninit(&t_gh);
fail_threads:
kthread_stop(sdp->sd_quotad_process);
kthread_stop(sdp->sd_logd_process);
return error; return error;
} }
...@@ -800,6 +833,9 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp) ...@@ -800,6 +833,9 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
struct gfs2_holder t_gh; struct gfs2_holder t_gh;
int error; int error;
kthread_stop(sdp->sd_quotad_process);
kthread_stop(sdp->sd_logd_process);
flush_workqueue(gfs2_delete_workqueue); flush_workqueue(gfs2_delete_workqueue);
gfs2_quota_sync(sdp->sd_vfs, 0); gfs2_quota_sync(sdp->sd_vfs, 0);
gfs2_statfs_sync(sdp->sd_vfs, 0); gfs2_statfs_sync(sdp->sd_vfs, 0);
...@@ -857,9 +893,6 @@ static void gfs2_put_super(struct super_block *sb) ...@@ -857,9 +893,6 @@ static void gfs2_put_super(struct super_block *sb)
} }
spin_unlock(&sdp->sd_jindex_spin); spin_unlock(&sdp->sd_jindex_spin);
kthread_stop(sdp->sd_quotad_process);
kthread_stop(sdp->sd_logd_process);
if (!(sb->s_flags & MS_RDONLY)) { if (!(sb->s_flags & MS_RDONLY)) {
error = gfs2_make_fs_ro(sdp); error = gfs2_make_fs_ro(sdp);
if (error) if (error)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment