Commit 018d21f5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'gfs2-for-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2

Pull gfs2 updates from Bob Peterson:
 "We've got a lot of patches (39) for this merge window. Most of these
  patches are related to corruption that occurs when journals are
  replayed. For example:

   1. A node fails while writing to the file system.
   2. Other nodes use the metadata that was once used by the failed
      node.
   3. When the node returns to the cluster, its journal is replayed, but
      the older metadata blocks overwrite the changes from step 2.

  Summary:

   - Fixed the recovery sequence to prevent corruption during journal
     replay.

   - Many bug fixes found during recovery testing.

   - New improved file system withdraw sequence.

   - Fixed how resource group buffers are managed.

   - Fixed how metadata revokes are tracked and written.

   - Improve processing of IO errors hit by daemons like logd and
     quotad.

   - Improved error checking in metadata writes.

   - Fixed how qadata quota data structures are managed"

* tag 'gfs2-for-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2: (39 commits)
  gfs2: Fix oversight in gfs2_ail1_flush
  gfs2: change from write to read lock for sd_log_flush_lock in journal replay
  gfs2: instrumentation wrt ail1 stuck
  gfs2: don't lock sd_log_flush_lock in try_rgrp_unlink
  gfs2: Remove unnecessary gfs2_qa_{get,put} pairs
  gfs2: Split gfs2_rsqa_delete into gfs2_rs_delete and gfs2_qa_put
  gfs2: Change inode qa_data to allow multiple users
  gfs2: eliminate gfs2_rsqa_alloc in favor of gfs2_qa_alloc
  gfs2: Switch to list_{first,last}_entry
  gfs2: Clean up inode initialization and teardown
  gfs2: Additional information when gfs2_ail1_flush withdraws
  gfs2: leaf_dealloc needs to allocate one more revoke
  gfs2: allow journal replay to hold sd_log_flush_lock
  gfs2: don't allow releasepage to free bd still used for revokes
  gfs2: flesh out delayed withdraw for gfs2_log_flush
  gfs2: Do proper error checking for go_sync family of glops functions
  gfs2: Don't demote a glock until its revokes are written
  gfs2: drain the ail2 list after io errors
  gfs2: Withdraw in gfs2_ail1_flush if write_cache_pages fails
  gfs2: Do log_flush in gfs2_ail_empty_gl even if ail list is empty
  ...
parents 15c981d1 75b46c43
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "glock.h" #include "glock.h"
#include "inode.h" #include "inode.h"
#include "meta_io.h" #include "meta_io.h"
#include "quota.h"
#include "rgrp.h" #include "rgrp.h"
#include "trans.h" #include "trans.h"
#include "util.h" #include "util.h"
...@@ -116,14 +117,14 @@ int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type) ...@@ -116,14 +117,14 @@ int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
if (acl && acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode))) if (acl && acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode)))
return -E2BIG; return -E2BIG;
ret = gfs2_rsqa_alloc(ip); ret = gfs2_qa_get(ip);
if (ret) if (ret)
return ret; return ret;
if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (ret) if (ret)
return ret; goto out;
need_unlock = true; need_unlock = true;
} }
...@@ -143,5 +144,7 @@ int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type) ...@@ -143,5 +144,7 @@ int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
unlock: unlock:
if (need_unlock) if (need_unlock)
gfs2_glock_dq_uninit(&gh); gfs2_glock_dq_uninit(&gh);
out:
gfs2_qa_put(ip);
return ret; return ret;
} }
...@@ -805,11 +805,16 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask) ...@@ -805,11 +805,16 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
bd = bh->b_private; bd = bh->b_private;
if (bd) { if (bd) {
gfs2_assert_warn(sdp, bd->bd_bh == bh); gfs2_assert_warn(sdp, bd->bd_bh == bh);
if (!list_empty(&bd->bd_list))
list_del_init(&bd->bd_list);
bd->bd_bh = NULL; bd->bd_bh = NULL;
bh->b_private = NULL; bh->b_private = NULL;
kmem_cache_free(gfs2_bufdata_cachep, bd); /*
* The bd may still be queued as a revoke, in which
* case we must not dequeue nor free it.
*/
if (!bd->bd_blkno && !list_empty(&bd->bd_list))
list_del_init(&bd->bd_list);
if (list_empty(&bd->bd_list))
kmem_cache_free(gfs2_bufdata_cachep, bd);
} }
bh = bh->b_this_page; bh = bh->b_this_page;
......
...@@ -2183,7 +2183,7 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize) ...@@ -2183,7 +2183,7 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
inode_dio_wait(inode); inode_dio_wait(inode);
ret = gfs2_rsqa_alloc(ip); ret = gfs2_qa_get(ip);
if (ret) if (ret)
goto out; goto out;
...@@ -2194,7 +2194,8 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize) ...@@ -2194,7 +2194,8 @@ int gfs2_setattr_size(struct inode *inode, u64 newsize)
ret = do_shrink(inode, newsize); ret = do_shrink(inode, newsize);
out: out:
gfs2_rsqa_delete(ip, NULL); gfs2_rs_delete(ip, NULL);
gfs2_qa_put(ip);
return ret; return ret;
} }
...@@ -2223,7 +2224,7 @@ void gfs2_free_journal_extents(struct gfs2_jdesc *jd) ...@@ -2223,7 +2224,7 @@ void gfs2_free_journal_extents(struct gfs2_jdesc *jd)
struct gfs2_journal_extent *jext; struct gfs2_journal_extent *jext;
while(!list_empty(&jd->extent_list)) { while(!list_empty(&jd->extent_list)) {
jext = list_entry(jd->extent_list.next, struct gfs2_journal_extent, list); jext = list_first_entry(&jd->extent_list, struct gfs2_journal_extent, list);
list_del(&jext->list); list_del(&jext->list);
kfree(jext); kfree(jext);
} }
...@@ -2244,7 +2245,7 @@ static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 b ...@@ -2244,7 +2245,7 @@ static int gfs2_add_jextent(struct gfs2_jdesc *jd, u64 lblock, u64 dblock, u64 b
struct gfs2_journal_extent *jext; struct gfs2_journal_extent *jext;
if (!list_empty(&jd->extent_list)) { if (!list_empty(&jd->extent_list)) {
jext = list_entry(jd->extent_list.prev, struct gfs2_journal_extent, list); jext = list_last_entry(&jd->extent_list, struct gfs2_journal_extent, list);
if ((jext->dblock + jext->blocks) == dblock) { if ((jext->dblock + jext->blocks) == dblock) {
jext->blocks += blocks; jext->blocks += blocks;
return 0; return 0;
......
...@@ -2028,7 +2028,8 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len, ...@@ -2028,7 +2028,8 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
error = gfs2_trans_begin(sdp, error = gfs2_trans_begin(sdp,
rg_blocks + (DIV_ROUND_UP(size, sdp->sd_jbsize) + 1) + rg_blocks + (DIV_ROUND_UP(size, sdp->sd_jbsize) + 1) +
RES_DINODE + RES_STATFS + RES_QUOTA, l_blocks); RES_DINODE + RES_STATFS + RES_QUOTA, RES_DINODE +
l_blocks);
if (error) if (error)
goto out_rg_gunlock; goto out_rg_gunlock;
......
...@@ -458,10 +458,6 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) ...@@ -458,10 +458,6 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
sb_start_pagefault(inode->i_sb); sb_start_pagefault(inode->i_sb);
ret = gfs2_rsqa_alloc(ip);
if (ret)
goto out;
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
ret = gfs2_glock_nq(&gh); ret = gfs2_glock_nq(&gh);
if (ret) if (ret)
...@@ -558,7 +554,6 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) ...@@ -558,7 +554,6 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
set_page_dirty(page); set_page_dirty(page);
wait_for_stable_page(page); wait_for_stable_page(page);
} }
out:
sb_end_pagefault(inode->i_sb); sb_end_pagefault(inode->i_sb);
return block_page_mkwrite_return(ret); return block_page_mkwrite_return(ret);
} }
...@@ -635,7 +630,17 @@ int gfs2_open_common(struct inode *inode, struct file *file) ...@@ -635,7 +630,17 @@ int gfs2_open_common(struct inode *inode, struct file *file)
gfs2_assert_warn(GFS2_SB(inode), !file->private_data); gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
file->private_data = fp; file->private_data = fp;
if (file->f_mode & FMODE_WRITE) {
ret = gfs2_qa_get(GFS2_I(inode));
if (ret)
goto fail;
}
return 0; return 0;
fail:
kfree(file->private_data);
file->private_data = NULL;
return ret;
} }
/** /**
...@@ -690,10 +695,10 @@ static int gfs2_release(struct inode *inode, struct file *file) ...@@ -690,10 +695,10 @@ static int gfs2_release(struct inode *inode, struct file *file)
kfree(file->private_data); kfree(file->private_data);
file->private_data = NULL; file->private_data = NULL;
if (!(file->f_mode & FMODE_WRITE)) if (file->f_mode & FMODE_WRITE) {
return 0; gfs2_rs_delete(ip, &inode->i_writecount);
gfs2_qa_put(ip);
gfs2_rsqa_delete(ip, &inode->i_writecount); }
return 0; return 0;
} }
...@@ -849,10 +854,6 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from) ...@@ -849,10 +854,6 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_inode *ip = GFS2_I(inode);
ssize_t ret; ssize_t ret;
ret = gfs2_rsqa_alloc(ip);
if (ret)
return ret;
gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from)); gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from));
if (iocb->ki_flags & IOCB_APPEND) { if (iocb->ki_flags & IOCB_APPEND) {
...@@ -1149,17 +1150,11 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t le ...@@ -1149,17 +1150,11 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t le
if (mode & FALLOC_FL_PUNCH_HOLE) { if (mode & FALLOC_FL_PUNCH_HOLE) {
ret = __gfs2_punch_hole(file, offset, len); ret = __gfs2_punch_hole(file, offset, len);
} else { } else {
ret = gfs2_rsqa_alloc(ip);
if (ret)
goto out_putw;
ret = __gfs2_fallocate(file, mode, offset, len); ret = __gfs2_fallocate(file, mode, offset, len);
if (ret) if (ret)
gfs2_rs_deltree(&ip->i_res); gfs2_rs_deltree(&ip->i_res);
} }
out_putw:
put_write_access(inode); put_write_access(inode);
out_unlock: out_unlock:
gfs2_glock_dq(&gh); gfs2_glock_dq(&gh);
...@@ -1173,16 +1168,12 @@ static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe, ...@@ -1173,16 +1168,12 @@ static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
struct file *out, loff_t *ppos, struct file *out, loff_t *ppos,
size_t len, unsigned int flags) size_t len, unsigned int flags)
{ {
int error; ssize_t ret;
struct gfs2_inode *ip = GFS2_I(out->f_mapping->host);
error = gfs2_rsqa_alloc(ip);
if (error)
return (ssize_t)error;
gfs2_size_hint(out, *ppos, len); gfs2_size_hint(out, *ppos, len);
return iter_file_splice_write(pipe, out, ppos, len, flags); ret = iter_file_splice_write(pipe, out, ppos, len, flags);
return ret;
} }
#ifdef CONFIG_GFS2_FS_LOCKING_DLM #ifdef CONFIG_GFS2_FS_LOCKING_DLM
......
...@@ -133,6 +133,33 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu) ...@@ -133,6 +133,33 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
} }
} }
/**
* glock_blocked_by_withdraw - determine if we can still use a glock
* @gl: the glock
*
* We need to allow some glocks to be enqueued, dequeued, promoted, and demoted
* when we're withdrawn. For example, to maintain metadata integrity, we should
* disallow the use of inode and rgrp glocks when withdrawn. Other glocks, like
* iopen or the transaction glocks may be safely used because none of their
* metadata goes through the journal. So in general, we should disallow all
* glocks that are journaled, and allow all the others. One exception is:
* we need to allow our active journal to be promoted and demoted so others
* may recover it and we can reacquire it when they're done.
*/
static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
if (likely(!gfs2_withdrawn(sdp)))
return false;
if (gl->gl_ops->go_flags & GLOF_NONDISK)
return false;
if (!sdp->sd_jdesc ||
gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr)
return false;
return true;
}
void gfs2_glock_free(struct gfs2_glock *gl) void gfs2_glock_free(struct gfs2_glock *gl)
{ {
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
...@@ -244,7 +271,7 @@ static void __gfs2_glock_put(struct gfs2_glock *gl) ...@@ -244,7 +271,7 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
gfs2_glock_remove_from_lru(gl); gfs2_glock_remove_from_lru(gl);
spin_unlock(&gl->gl_lockref.lock); spin_unlock(&gl->gl_lockref.lock);
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
GLOCK_BUG_ON(gl, mapping && mapping->nrpages); GLOCK_BUG_ON(gl, mapping && mapping->nrpages && !gfs2_withdrawn(sdp));
trace_gfs2_glock_put(gl); trace_gfs2_glock_put(gl);
sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
} }
...@@ -281,7 +308,7 @@ void gfs2_glock_put(struct gfs2_glock *gl) ...@@ -281,7 +308,7 @@ void gfs2_glock_put(struct gfs2_glock *gl)
static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh) static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
{ {
const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list); const struct gfs2_holder *gh_head = list_first_entry(&gl->gl_holders, const struct gfs2_holder, gh_list);
if ((gh->gh_state == LM_ST_EXCLUSIVE || if ((gh->gh_state == LM_ST_EXCLUSIVE ||
gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head) gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
return 0; return 0;
...@@ -549,8 +576,8 @@ __acquires(&gl->gl_lockref.lock) ...@@ -549,8 +576,8 @@ __acquires(&gl->gl_lockref.lock)
unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0); unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
int ret; int ret;
if (unlikely(gfs2_withdrawn(sdp)) && if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl) &&
target != LM_ST_UNLOCKED) gh && !(gh->gh_flags & LM_FLAG_NOEXP))
return; return;
lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
LM_FLAG_PRIORITY); LM_FLAG_PRIORITY);
...@@ -575,13 +602,64 @@ __acquires(&gl->gl_lockref.lock) ...@@ -575,13 +602,64 @@ __acquires(&gl->gl_lockref.lock)
(lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB))) (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
clear_bit(GLF_BLOCKING, &gl->gl_flags); clear_bit(GLF_BLOCKING, &gl->gl_flags);
spin_unlock(&gl->gl_lockref.lock); spin_unlock(&gl->gl_lockref.lock);
if (glops->go_sync) if (glops->go_sync) {
glops->go_sync(gl); ret = glops->go_sync(gl);
if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) /* If we had a problem syncing (due to io errors or whatever,
* we should not invalidate the metadata or tell dlm to
* release the glock to other nodes.
*/
if (ret) {
if (cmpxchg(&sdp->sd_log_error, 0, ret)) {
fs_err(sdp, "Error %d syncing glock \n", ret);
gfs2_dump_glock(NULL, gl, true);
}
return;
}
}
if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) {
/*
* The call to go_sync should have cleared out the ail list.
* If there are still items, we have a problem. We ought to
* withdraw, but we can't because the withdraw code also uses
* glocks. Warn about the error, dump the glock, then fall
* through and wait for logd to do the withdraw for us.
*/
if ((atomic_read(&gl->gl_ail_count) != 0) &&
(!cmpxchg(&sdp->sd_log_error, 0, -EIO))) {
gfs2_assert_warn(sdp, !atomic_read(&gl->gl_ail_count));
gfs2_dump_glock(NULL, gl, true);
}
glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
}
gfs2_glock_hold(gl); gfs2_glock_hold(gl);
/*
* Check for an error encountered since we called go_sync and go_inval.
* If so, we can't withdraw from the glock code because the withdraw
* code itself uses glocks (see function signal_our_withdraw) to
* change the mount to read-only. Most importantly, we must not call
* dlm to unlock the glock until the journal is in a known good state
* (after journal replay) otherwise other nodes may use the object
* (rgrp or dinode) and then later, journal replay will corrupt the
* file system. The best we can do here is wait for the logd daemon
* to see sd_log_error and withdraw, and in the meantime, requeue the
* work for later.
*
* However, if we're just unlocking the lock (say, for unmount, when
* gfs2_gl_hash_clear calls clear_glock) and recovery is complete
* then it's okay to tell dlm to unlock it.
*/
if (unlikely(sdp->sd_log_error && !gfs2_withdrawn(sdp)))
gfs2_withdraw_delayed(sdp);
if (glock_blocked_by_withdraw(gl)) {
if (target != LM_ST_UNLOCKED ||
test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags)) {
gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
goto out;
}
}
if (sdp->sd_lockstruct.ls_ops->lm_lock) { if (sdp->sd_lockstruct.ls_ops->lm_lock) {
/* lock_dlm */ /* lock_dlm */
ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
...@@ -590,8 +668,7 @@ __acquires(&gl->gl_lockref.lock) ...@@ -590,8 +668,7 @@ __acquires(&gl->gl_lockref.lock)
test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) { test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) {
finish_xmote(gl, target); finish_xmote(gl, target);
gfs2_glock_queue_work(gl, 0); gfs2_glock_queue_work(gl, 0);
} } else if (ret) {
else if (ret) {
fs_err(sdp, "lm_lock ret %d\n", ret); fs_err(sdp, "lm_lock ret %d\n", ret);
GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp)); GLOCK_BUG_ON(gl, !gfs2_withdrawn(sdp));
} }
...@@ -599,7 +676,7 @@ __acquires(&gl->gl_lockref.lock) ...@@ -599,7 +676,7 @@ __acquires(&gl->gl_lockref.lock)
finish_xmote(gl, target); finish_xmote(gl, target);
gfs2_glock_queue_work(gl, 0); gfs2_glock_queue_work(gl, 0);
} }
out:
spin_lock(&gl->gl_lockref.lock); spin_lock(&gl->gl_lockref.lock);
} }
...@@ -613,7 +690,7 @@ static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl) ...@@ -613,7 +690,7 @@ static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
struct gfs2_holder *gh; struct gfs2_holder *gh;
if (!list_empty(&gl->gl_holders)) { if (!list_empty(&gl->gl_holders)) {
gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
if (test_bit(HIF_HOLDER, &gh->gh_iflags)) if (test_bit(HIF_HOLDER, &gh->gh_iflags))
return gh; return gh;
} }
...@@ -645,6 +722,9 @@ __acquires(&gl->gl_lockref.lock) ...@@ -645,6 +722,9 @@ __acquires(&gl->gl_lockref.lock)
goto out_unlock; goto out_unlock;
if (nonblock) if (nonblock)
goto out_sched; goto out_sched;
smp_mb();
if (atomic_read(&gl->gl_revokes) != 0)
goto out_sched;
set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE); GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
gl->gl_target = gl->gl_demote_state; gl->gl_target = gl->gl_demote_state;
...@@ -1160,7 +1240,7 @@ __acquires(&gl->gl_lockref.lock) ...@@ -1160,7 +1240,7 @@ __acquires(&gl->gl_lockref.lock)
} }
list_add_tail(&gh->gh_list, insert_pt); list_add_tail(&gh->gh_list, insert_pt);
do_cancel: do_cancel:
gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
if (!(gh->gh_flags & LM_FLAG_PRIORITY)) { if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
spin_unlock(&gl->gl_lockref.lock); spin_unlock(&gl->gl_lockref.lock);
if (sdp->sd_lockstruct.ls_ops->lm_cancel) if (sdp->sd_lockstruct.ls_ops->lm_cancel)
...@@ -1194,10 +1274,9 @@ __acquires(&gl->gl_lockref.lock) ...@@ -1194,10 +1274,9 @@ __acquires(&gl->gl_lockref.lock)
int gfs2_glock_nq(struct gfs2_holder *gh) int gfs2_glock_nq(struct gfs2_holder *gh)
{ {
struct gfs2_glock *gl = gh->gh_gl; struct gfs2_glock *gl = gh->gh_gl;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
int error = 0; int error = 0;
if (unlikely(gfs2_withdrawn(sdp))) if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP))
return -EIO; return -EIO;
if (test_bit(GLF_LRU, &gl->gl_flags)) if (test_bit(GLF_LRU, &gl->gl_flags))
...@@ -1241,24 +1320,32 @@ int gfs2_glock_poll(struct gfs2_holder *gh) ...@@ -1241,24 +1320,32 @@ int gfs2_glock_poll(struct gfs2_holder *gh)
void gfs2_glock_dq(struct gfs2_holder *gh) void gfs2_glock_dq(struct gfs2_holder *gh)
{ {
struct gfs2_glock *gl = gh->gh_gl; struct gfs2_glock *gl = gh->gh_gl;
const struct gfs2_glock_operations *glops = gl->gl_ops; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
unsigned delay = 0; unsigned delay = 0;
int fast_path = 0; int fast_path = 0;
spin_lock(&gl->gl_lockref.lock); spin_lock(&gl->gl_lockref.lock);
/*
* If we're in the process of file system withdraw, we cannot just
* dequeue any glocks until our journal is recovered, lest we
* introduce file system corruption. We need two exceptions to this
* rule: We need to allow unlocking of nondisk glocks and the glock
* for our own journal that needs recovery.
*/
if (test_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags) &&
glock_blocked_by_withdraw(gl) &&
gh->gh_gl != sdp->sd_jinode_gl) {
sdp->sd_glock_dqs_held++;
might_sleep();
wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY,
TASK_UNINTERRUPTIBLE);
}
if (gh->gh_flags & GL_NOCACHE) if (gh->gh_flags & GL_NOCACHE)
handle_callback(gl, LM_ST_UNLOCKED, 0, false); handle_callback(gl, LM_ST_UNLOCKED, 0, false);
list_del_init(&gh->gh_list); list_del_init(&gh->gh_list);
clear_bit(HIF_HOLDER, &gh->gh_iflags); clear_bit(HIF_HOLDER, &gh->gh_iflags);
if (find_first_holder(gl) == NULL) { if (find_first_holder(gl) == NULL) {
if (glops->go_unlock) {
GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
spin_unlock(&gl->gl_lockref.lock);
glops->go_unlock(gh);
spin_lock(&gl->gl_lockref.lock);
clear_bit(GLF_LOCK, &gl->gl_flags);
}
if (list_empty(&gl->gl_holders) && if (list_empty(&gl->gl_holders) &&
!test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
!test_bit(GLF_DEMOTE, &gl->gl_flags)) !test_bit(GLF_DEMOTE, &gl->gl_flags))
...@@ -1555,7 +1642,7 @@ __acquires(&lru_lock) ...@@ -1555,7 +1642,7 @@ __acquires(&lru_lock)
list_sort(NULL, list, glock_cmp); list_sort(NULL, list, glock_cmp);
while(!list_empty(list)) { while(!list_empty(list)) {
gl = list_entry(list->next, struct gfs2_glock, gl_lru); gl = list_first_entry(list, struct gfs2_glock, gl_lru);
list_del_init(&gl->gl_lru); list_del_init(&gl->gl_lru);
if (!spin_trylock(&gl->gl_lockref.lock)) { if (!spin_trylock(&gl->gl_lockref.lock)) {
add_back_to_lru: add_back_to_lru:
...@@ -1596,7 +1683,7 @@ static long gfs2_scan_glock_lru(int nr) ...@@ -1596,7 +1683,7 @@ static long gfs2_scan_glock_lru(int nr)
spin_lock(&lru_lock); spin_lock(&lru_lock);
while ((nr-- >= 0) && !list_empty(&lru_list)) { while ((nr-- >= 0) && !list_empty(&lru_list)) {
gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); gl = list_first_entry(&lru_list, struct gfs2_glock, gl_lru);
/* Test for being demotable */ /* Test for being demotable */
if (!test_bit(GLF_LOCK, &gl->gl_flags)) { if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
......
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
struct workqueue_struct *gfs2_freeze_wq; struct workqueue_struct *gfs2_freeze_wq;
extern struct workqueue_struct *gfs2_control_wq;
static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
{ {
fs_err(gl->gl_name.ln_sbd, fs_err(gl->gl_name.ln_sbd,
...@@ -39,7 +41,8 @@ static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) ...@@ -39,7 +41,8 @@ static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n", fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
gl->gl_name.ln_type, gl->gl_name.ln_number, gl->gl_name.ln_type, gl->gl_name.ln_number,
gfs2_glock2aspace(gl)); gfs2_glock2aspace(gl));
gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n"); gfs2_lm(gl->gl_name.ln_sbd, "AIL error\n");
gfs2_withdraw(gl->gl_name.ln_sbd);
} }
/** /**
...@@ -79,34 +82,62 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, ...@@ -79,34 +82,62 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
} }
static void gfs2_ail_empty_gl(struct gfs2_glock *gl) static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
{ {
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct gfs2_trans tr; struct gfs2_trans tr;
int ret;
memset(&tr, 0, sizeof(tr)); memset(&tr, 0, sizeof(tr));
INIT_LIST_HEAD(&tr.tr_buf); INIT_LIST_HEAD(&tr.tr_buf);
INIT_LIST_HEAD(&tr.tr_databuf); INIT_LIST_HEAD(&tr.tr_databuf);
tr.tr_revokes = atomic_read(&gl->gl_ail_count); tr.tr_revokes = atomic_read(&gl->gl_ail_count);
if (!tr.tr_revokes) if (!tr.tr_revokes) {
return; bool have_revokes;
bool log_in_flight;
/*
* We have nothing on the ail, but there could be revokes on
* the sdp revoke queue, in which case, we still want to flush
* the log and wait for it to finish.
*
* If the sdp revoke list is empty too, we might still have an
* io outstanding for writing revokes, so we should wait for
* it before returning.
*
* If none of these conditions are true, our revokes are all
* flushed and we can return.
*/
gfs2_log_lock(sdp);
have_revokes = !list_empty(&sdp->sd_log_revokes);
log_in_flight = atomic_read(&sdp->sd_log_in_flight);
gfs2_log_unlock(sdp);
if (have_revokes)
goto flush;
if (log_in_flight)
log_flush_wait(sdp);
return 0;
}
/* A shortened, inline version of gfs2_trans_begin() /* A shortened, inline version of gfs2_trans_begin()
* tr->alloced is not set since the transaction structure is * tr->alloced is not set since the transaction structure is
* on the stack */ * on the stack */
tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes); tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes);
tr.tr_ip = _RET_IP_; tr.tr_ip = _RET_IP_;
if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0) ret = gfs2_log_reserve(sdp, tr.tr_reserved);
return; if (ret < 0)
return ret;
WARN_ON_ONCE(current->journal_info); WARN_ON_ONCE(current->journal_info);
current->journal_info = &tr; current->journal_info = &tr;
__gfs2_ail_flush(gl, 0, tr.tr_revokes); __gfs2_ail_flush(gl, 0, tr.tr_revokes);
gfs2_trans_end(sdp); gfs2_trans_end(sdp);
flush:
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
GFS2_LFC_AIL_EMPTY_GL); GFS2_LFC_AIL_EMPTY_GL);
return 0;
} }
void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
...@@ -140,35 +171,32 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) ...@@ -140,35 +171,32 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
* return to caller to demote/unlock the glock until I/O is complete. * return to caller to demote/unlock the glock until I/O is complete.
*/ */
static void rgrp_go_sync(struct gfs2_glock *gl) static int rgrp_go_sync(struct gfs2_glock *gl)
{ {
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct address_space *mapping = &sdp->sd_aspace; struct address_space *mapping = &sdp->sd_aspace;
struct gfs2_rgrpd *rgd; struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
int error; int error;
spin_lock(&gl->gl_lockref.lock);
rgd = gl->gl_object;
if (rgd)
gfs2_rgrp_brelse(rgd);
spin_unlock(&gl->gl_lockref.lock);
if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
return; return 0;
GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
GFS2_LFC_RGRP_GO_SYNC); GFS2_LFC_RGRP_GO_SYNC);
filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end); filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end); error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
WARN_ON_ONCE(error);
mapping_set_error(mapping, error); mapping_set_error(mapping, error);
gfs2_ail_empty_gl(gl); if (!error)
error = gfs2_ail_empty_gl(gl);
spin_lock(&gl->gl_lockref.lock); spin_lock(&gl->gl_lockref.lock);
rgd = gl->gl_object; rgd = gl->gl_object;
if (rgd) if (rgd)
gfs2_free_clones(rgd); gfs2_free_clones(rgd);
spin_unlock(&gl->gl_lockref.lock); spin_unlock(&gl->gl_lockref.lock);
return error;
} }
/** /**
...@@ -191,7 +219,6 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags) ...@@ -191,7 +219,6 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
gfs2_rgrp_brelse(rgd); gfs2_rgrp_brelse(rgd);
WARN_ON_ONCE(!(flags & DIO_METADATA)); WARN_ON_ONCE(!(flags & DIO_METADATA));
gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end); truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
if (rgd) if (rgd)
...@@ -236,12 +263,12 @@ static void gfs2_clear_glop_pending(struct gfs2_inode *ip) ...@@ -236,12 +263,12 @@ static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
* *
*/ */
static void inode_go_sync(struct gfs2_glock *gl) static int inode_go_sync(struct gfs2_glock *gl)
{ {
struct gfs2_inode *ip = gfs2_glock2inode(gl); struct gfs2_inode *ip = gfs2_glock2inode(gl);
int isreg = ip && S_ISREG(ip->i_inode.i_mode); int isreg = ip && S_ISREG(ip->i_inode.i_mode);
struct address_space *metamapping = gfs2_glock2aspace(gl); struct address_space *metamapping = gfs2_glock2aspace(gl);
int error; int error = 0;
if (isreg) { if (isreg) {
if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
...@@ -274,6 +301,7 @@ static void inode_go_sync(struct gfs2_glock *gl) ...@@ -274,6 +301,7 @@ static void inode_go_sync(struct gfs2_glock *gl)
out: out:
gfs2_clear_glop_pending(ip); gfs2_clear_glop_pending(ip);
return error;
} }
/** /**
...@@ -291,8 +319,6 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags) ...@@ -291,8 +319,6 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
{ {
struct gfs2_inode *ip = gfs2_glock2inode(gl); struct gfs2_inode *ip = gfs2_glock2inode(gl);
gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
if (flags & DIO_METADATA) { if (flags & DIO_METADATA) {
struct address_space *mapping = gfs2_glock2aspace(gl); struct address_space *mapping = gfs2_glock2aspace(gl);
truncate_inode_pages(mapping, 0); truncate_inode_pages(mapping, 0);
...@@ -496,24 +522,29 @@ static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl, ...@@ -496,24 +522,29 @@ static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
* *
*/ */
static void freeze_go_sync(struct gfs2_glock *gl) static int freeze_go_sync(struct gfs2_glock *gl)
{ {
int error = 0; int error = 0;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
if (gl->gl_state == LM_ST_SHARED && if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE); atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
error = freeze_super(sdp->sd_vfs); error = freeze_super(sdp->sd_vfs);
if (error) { if (error) {
fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n", fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
error); error);
if (gfs2_withdrawn(sdp)) {
atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
return 0;
}
gfs2_assert_withdraw(sdp, 0); gfs2_assert_withdraw(sdp, 0);
} }
queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work); queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE | gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
GFS2_LFC_FREEZE_GO_SYNC); GFS2_LFC_FREEZE_GO_SYNC);
} }
return 0;
} }
/** /**
...@@ -582,8 +613,76 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote) ...@@ -582,8 +613,76 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
} }
} }
/**
* inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
* @gl: glock being freed
*
* For now, this is only used for the journal inode glock. In withdraw
* situations, we need to wait for the glock to be freed so that we know
* other nodes may proceed with recovery / journal replay.
*/
static void inode_go_free(struct gfs2_glock *gl)
{
/* Note that we cannot reference gl_object because it's already set
* to NULL by this point in its lifecycle. */
if (!test_bit(GLF_FREEING, &gl->gl_flags))
return;
clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
wake_up_bit(&gl->gl_flags, GLF_FREEING);
}
/**
* nondisk_go_callback - used to signal when a node did a withdraw
* @gl: the nondisk glock
* @remote: true if this came from a different cluster node
*
*/
static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
/* Ignore the callback unless it's from another node, and it's the
live lock. */
if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
return;
/* First order of business is to cancel the demote request. We don't
* really want to demote a nondisk glock. At best it's just to inform
* us of another node's withdraw. We'll keep it in SH mode. */
clear_bit(GLF_DEMOTE, &gl->gl_flags);
clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
/* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
return;
/* We only care when a node wants us to unlock, because that means
* they want a journal recovered. */
if (gl->gl_demote_state != LM_ST_UNLOCKED)
return;
if (sdp->sd_args.ar_spectator) {
fs_warn(sdp, "Spectator node cannot recover journals.\n");
return;
}
fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
/*
* We can't call remote_withdraw directly here or gfs2_recover_journal
* because this is called from the glock unlock function and the
* remote_withdraw needs to enqueue and dequeue the same "live" glock
* we were called from. So we queue it to the control work queue in
* lock_dlm.
*/
queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
}
const struct gfs2_glock_operations gfs2_meta_glops = { const struct gfs2_glock_operations gfs2_meta_glops = {
.go_type = LM_TYPE_META, .go_type = LM_TYPE_META,
.go_flags = GLOF_NONDISK,
}; };
const struct gfs2_glock_operations gfs2_inode_glops = { const struct gfs2_glock_operations gfs2_inode_glops = {
...@@ -594,13 +693,13 @@ const struct gfs2_glock_operations gfs2_inode_glops = { ...@@ -594,13 +693,13 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
.go_dump = inode_go_dump, .go_dump = inode_go_dump,
.go_type = LM_TYPE_INODE, .go_type = LM_TYPE_INODE,
.go_flags = GLOF_ASPACE | GLOF_LRU, .go_flags = GLOF_ASPACE | GLOF_LRU,
.go_free = inode_go_free,
}; };
const struct gfs2_glock_operations gfs2_rgrp_glops = { const struct gfs2_glock_operations gfs2_rgrp_glops = {
.go_sync = rgrp_go_sync, .go_sync = rgrp_go_sync,
.go_inval = rgrp_go_inval, .go_inval = rgrp_go_inval,
.go_lock = gfs2_rgrp_go_lock, .go_lock = gfs2_rgrp_go_lock,
.go_unlock = gfs2_rgrp_go_unlock,
.go_dump = gfs2_rgrp_dump, .go_dump = gfs2_rgrp_dump,
.go_type = LM_TYPE_RGRP, .go_type = LM_TYPE_RGRP,
.go_flags = GLOF_LVB, .go_flags = GLOF_LVB,
...@@ -611,30 +710,34 @@ const struct gfs2_glock_operations gfs2_freeze_glops = { ...@@ -611,30 +710,34 @@ const struct gfs2_glock_operations gfs2_freeze_glops = {
.go_xmote_bh = freeze_go_xmote_bh, .go_xmote_bh = freeze_go_xmote_bh,
.go_demote_ok = freeze_go_demote_ok, .go_demote_ok = freeze_go_demote_ok,
.go_type = LM_TYPE_NONDISK, .go_type = LM_TYPE_NONDISK,
.go_flags = GLOF_NONDISK,
}; };
const struct gfs2_glock_operations gfs2_iopen_glops = { const struct gfs2_glock_operations gfs2_iopen_glops = {
.go_type = LM_TYPE_IOPEN, .go_type = LM_TYPE_IOPEN,
.go_callback = iopen_go_callback, .go_callback = iopen_go_callback,
.go_flags = GLOF_LRU, .go_flags = GLOF_LRU | GLOF_NONDISK,
}; };
const struct gfs2_glock_operations gfs2_flock_glops = { const struct gfs2_glock_operations gfs2_flock_glops = {
.go_type = LM_TYPE_FLOCK, .go_type = LM_TYPE_FLOCK,
.go_flags = GLOF_LRU, .go_flags = GLOF_LRU | GLOF_NONDISK,
}; };
const struct gfs2_glock_operations gfs2_nondisk_glops = { const struct gfs2_glock_operations gfs2_nondisk_glops = {
.go_type = LM_TYPE_NONDISK, .go_type = LM_TYPE_NONDISK,
.go_flags = GLOF_NONDISK,
.go_callback = nondisk_go_callback,
}; };
const struct gfs2_glock_operations gfs2_quota_glops = { const struct gfs2_glock_operations gfs2_quota_glops = {
.go_type = LM_TYPE_QUOTA, .go_type = LM_TYPE_QUOTA,
.go_flags = GLOF_LVB | GLOF_LRU, .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
}; };
const struct gfs2_glock_operations gfs2_journal_glops = { const struct gfs2_glock_operations gfs2_journal_glops = {
.go_type = LM_TYPE_JOURNAL, .go_type = LM_TYPE_JOURNAL,
.go_flags = GLOF_NONDISK,
}; };
const struct gfs2_glock_operations *gfs2_glops_list[] = { const struct gfs2_glock_operations *gfs2_glops_list[] = {
......
...@@ -234,20 +234,21 @@ struct lm_lockname { ...@@ -234,20 +234,21 @@ struct lm_lockname {
struct gfs2_glock_operations { struct gfs2_glock_operations {
void (*go_sync) (struct gfs2_glock *gl); int (*go_sync) (struct gfs2_glock *gl);
int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh); int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
void (*go_inval) (struct gfs2_glock *gl, int flags); void (*go_inval) (struct gfs2_glock *gl, int flags);
int (*go_demote_ok) (const struct gfs2_glock *gl); int (*go_demote_ok) (const struct gfs2_glock *gl);
int (*go_lock) (struct gfs2_holder *gh); int (*go_lock) (struct gfs2_holder *gh);
void (*go_unlock) (struct gfs2_holder *gh);
void (*go_dump)(struct seq_file *seq, struct gfs2_glock *gl, void (*go_dump)(struct seq_file *seq, struct gfs2_glock *gl,
const char *fs_id_buf); const char *fs_id_buf);
void (*go_callback)(struct gfs2_glock *gl, bool remote); void (*go_callback)(struct gfs2_glock *gl, bool remote);
void (*go_free)(struct gfs2_glock *gl);
const int go_type; const int go_type;
const unsigned long go_flags; const unsigned long go_flags;
#define GLOF_ASPACE 1 #define GLOF_ASPACE 1 /* address space attached */
#define GLOF_LVB 2 #define GLOF_LVB 2 /* Lock Value Block attached */
#define GLOF_LRU 4 #define GLOF_LRU 4 /* LRU managed */
#define GLOF_NONDISK 8 /* not I/O related */
}; };
enum { enum {
...@@ -294,6 +295,7 @@ struct gfs2_qadata { /* quota allocation data */ ...@@ -294,6 +295,7 @@ struct gfs2_qadata { /* quota allocation data */
struct gfs2_quota_data *qa_qd[2 * GFS2_MAXQUOTAS]; struct gfs2_quota_data *qa_qd[2 * GFS2_MAXQUOTAS];
struct gfs2_holder qa_qd_ghs[2 * GFS2_MAXQUOTAS]; struct gfs2_holder qa_qd_ghs[2 * GFS2_MAXQUOTAS];
unsigned int qa_qd_num; unsigned int qa_qd_num;
int qa_ref;
}; };
/* Resource group multi-block reservation, in order of appearance: /* Resource group multi-block reservation, in order of appearance:
...@@ -343,6 +345,7 @@ enum { ...@@ -343,6 +345,7 @@ enum {
GLF_OBJECT = 14, /* Used only for tracing */ GLF_OBJECT = 14, /* Used only for tracing */
GLF_BLOCKING = 15, GLF_BLOCKING = 15,
GLF_INODE_CREATING = 16, /* Inode creation occurring */ GLF_INODE_CREATING = 16, /* Inode creation occurring */
GLF_FREEING = 18, /* Wait for glock to be freed */
}; };
struct gfs2_glock { struct gfs2_glock {
...@@ -542,6 +545,7 @@ struct gfs2_jdesc { ...@@ -542,6 +545,7 @@ struct gfs2_jdesc {
struct list_head jd_revoke_list; struct list_head jd_revoke_list;
unsigned int jd_replay_tail; unsigned int jd_replay_tail;
u64 jd_no_addr;
}; };
struct gfs2_statfs_change_host { struct gfs2_statfs_change_host {
...@@ -616,8 +620,12 @@ enum { ...@@ -616,8 +620,12 @@ enum {
SDF_RORECOVERY = 7, /* read only recovery */ SDF_RORECOVERY = 7, /* read only recovery */
SDF_SKIP_DLM_UNLOCK = 8, SDF_SKIP_DLM_UNLOCK = 8,
SDF_FORCE_AIL_FLUSH = 9, SDF_FORCE_AIL_FLUSH = 9,
SDF_AIL1_IO_ERROR = 10, SDF_FS_FROZEN = 10,
SDF_FS_FROZEN = 11, SDF_WITHDRAWING = 11, /* Will withdraw eventually */
SDF_WITHDRAW_IN_PROG = 12, /* Withdraw is in progress */
SDF_REMOTE_WITHDRAW = 13, /* Performing remote recovery */
SDF_WITHDRAW_RECOVERY = 14, /* Wait for journal recovery when we are
withdrawing */
}; };
enum gfs2_freeze_state { enum gfs2_freeze_state {
...@@ -768,6 +776,7 @@ struct gfs2_sbd { ...@@ -768,6 +776,7 @@ struct gfs2_sbd {
struct gfs2_jdesc *sd_jdesc; struct gfs2_jdesc *sd_jdesc;
struct gfs2_holder sd_journal_gh; struct gfs2_holder sd_journal_gh;
struct gfs2_holder sd_jinode_gh; struct gfs2_holder sd_jinode_gh;
struct gfs2_glock *sd_jinode_gl;
struct gfs2_holder sd_sc_gh; struct gfs2_holder sd_sc_gh;
struct gfs2_holder sd_qc_gh; struct gfs2_holder sd_qc_gh;
...@@ -828,7 +837,8 @@ struct gfs2_sbd { ...@@ -828,7 +837,8 @@ struct gfs2_sbd {
atomic_t sd_log_in_flight; atomic_t sd_log_in_flight;
struct bio *sd_log_bio; struct bio *sd_log_bio;
wait_queue_head_t sd_log_flush_wait; wait_queue_head_t sd_log_flush_wait;
int sd_log_error; int sd_log_error; /* First log error */
wait_queue_head_t sd_withdraw_wait;
atomic_t sd_reserving_log; atomic_t sd_reserving_log;
wait_queue_head_t sd_reserving_log_wait; wait_queue_head_t sd_reserving_log_wait;
...@@ -852,6 +862,7 @@ struct gfs2_sbd { ...@@ -852,6 +862,7 @@ struct gfs2_sbd {
unsigned long sd_last_warning; unsigned long sd_last_warning;
struct dentry *debugfs_dir; /* debugfs directory */ struct dentry *debugfs_dir; /* debugfs directory */
unsigned long sd_glock_dqs_held;
}; };
static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which) static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which)
......
...@@ -144,7 +144,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, ...@@ -144,7 +144,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl); error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
if (unlikely(error)) if (unlikely(error))
goto fail_put; goto fail;
if (type == DT_UNKNOWN || blktype != GFS2_BLKST_FREE) { if (type == DT_UNKNOWN || blktype != GFS2_BLKST_FREE) {
/* /*
...@@ -155,13 +155,13 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, ...@@ -155,13 +155,13 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE,
GL_SKIP, &i_gh); GL_SKIP, &i_gh);
if (error) if (error)
goto fail_put; goto fail;
if (blktype != GFS2_BLKST_FREE) { if (blktype != GFS2_BLKST_FREE) {
error = gfs2_check_blk_type(sdp, no_addr, error = gfs2_check_blk_type(sdp, no_addr,
blktype); blktype);
if (error) if (error)
goto fail_put; goto fail;
} }
} }
...@@ -169,7 +169,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, ...@@ -169,7 +169,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
set_bit(GIF_INVALID, &ip->i_flags); set_bit(GIF_INVALID, &ip->i_flags);
error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
if (unlikely(error)) if (unlikely(error))
goto fail_put; goto fail;
glock_set_object(ip->i_iopen_gh.gh_gl, ip); glock_set_object(ip->i_iopen_gh.gh_gl, ip);
gfs2_glock_put(io_gl); gfs2_glock_put(io_gl);
io_gl = NULL; io_gl = NULL;
...@@ -182,7 +182,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, ...@@ -182,7 +182,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
/* Inode glock must be locked already */ /* Inode glock must be locked already */
error = gfs2_inode_refresh(GFS2_I(inode)); error = gfs2_inode_refresh(GFS2_I(inode));
if (error) if (error)
goto fail_refresh; goto fail;
} else { } else {
ip->i_no_formal_ino = no_formal_ino; ip->i_no_formal_ino = no_formal_ino;
inode->i_mode = DT2IF(type); inode->i_mode = DT2IF(type);
...@@ -197,17 +197,11 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, ...@@ -197,17 +197,11 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
gfs2_glock_dq_uninit(&i_gh); gfs2_glock_dq_uninit(&i_gh);
return inode; return inode;
fail_refresh: fail:
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
fail_put:
if (io_gl) if (io_gl)
gfs2_glock_put(io_gl); gfs2_glock_put(io_gl);
glock_clear_object(ip->i_gl, ip);
if (gfs2_holder_initialized(&i_gh)) if (gfs2_holder_initialized(&i_gh))
gfs2_glock_dq_uninit(&i_gh); gfs2_glock_dq_uninit(&i_gh);
fail:
iget_failed(inode); iget_failed(inode);
return ERR_PTR(error); return ERR_PTR(error);
} }
...@@ -594,13 +588,13 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, ...@@ -594,13 +588,13 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
if (!name->len || name->len > GFS2_FNAMESIZE) if (!name->len || name->len > GFS2_FNAMESIZE)
return -ENAMETOOLONG; return -ENAMETOOLONG;
error = gfs2_rsqa_alloc(dip); error = gfs2_qa_get(dip);
if (error) if (error)
return error; return error;
error = gfs2_rindex_update(sdp); error = gfs2_rindex_update(sdp);
if (error) if (error)
return error; goto fail;
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); error = gfs2_glock_nq_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
if (error) if (error)
...@@ -647,7 +641,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, ...@@ -647,7 +641,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
goto fail_gunlock; goto fail_gunlock;
ip = GFS2_I(inode); ip = GFS2_I(inode);
error = gfs2_rsqa_alloc(ip); error = gfs2_qa_get(ip);
if (error) if (error)
goto fail_free_acls; goto fail_free_acls;
...@@ -782,11 +776,13 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, ...@@ -782,11 +776,13 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags); clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
gfs2_glock_put(io_gl); gfs2_glock_put(io_gl);
fail_free_inode: fail_free_inode:
gfs2_qa_put(ip);
if (ip->i_gl) { if (ip->i_gl) {
glock_clear_object(ip->i_gl, ip); glock_clear_object(ip->i_gl, ip);
gfs2_glock_put(ip->i_gl); gfs2_glock_put(ip->i_gl);
} }
gfs2_rsqa_delete(ip, NULL); gfs2_rs_delete(ip, NULL);
gfs2_qa_put(ip);
fail_free_acls: fail_free_acls:
posix_acl_release(default_acl); posix_acl_release(default_acl);
posix_acl_release(acl); posix_acl_release(acl);
...@@ -804,6 +800,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, ...@@ -804,6 +800,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
if (gfs2_holder_initialized(ghs + 1)) if (gfs2_holder_initialized(ghs + 1))
gfs2_glock_dq_uninit(ghs + 1); gfs2_glock_dq_uninit(ghs + 1);
fail: fail:
gfs2_qa_put(dip);
return error; return error;
} }
...@@ -905,7 +902,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir, ...@@ -905,7 +902,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
if (S_ISDIR(inode->i_mode)) if (S_ISDIR(inode->i_mode))
return -EPERM; return -EPERM;
error = gfs2_rsqa_alloc(dip); error = gfs2_qa_get(dip);
if (error) if (error)
return error; return error;
...@@ -1008,6 +1005,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir, ...@@ -1008,6 +1005,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
out_child: out_child:
gfs2_glock_dq(ghs); gfs2_glock_dq(ghs);
out_parent: out_parent:
gfs2_qa_put(ip);
gfs2_holder_uninit(ghs); gfs2_holder_uninit(ghs);
gfs2_holder_uninit(ghs + 1); gfs2_holder_uninit(ghs + 1);
return error; return error;
...@@ -1368,7 +1366,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, ...@@ -1368,7 +1366,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
if (error) if (error)
return error; return error;
error = gfs2_rsqa_alloc(ndip); error = gfs2_qa_get(ndip);
if (error) if (error)
return error; return error;
...@@ -1568,6 +1566,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, ...@@ -1568,6 +1566,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
if (gfs2_holder_initialized(&r_gh)) if (gfs2_holder_initialized(&r_gh))
gfs2_glock_dq_uninit(&r_gh); gfs2_glock_dq_uninit(&r_gh);
out: out:
gfs2_qa_put(ndip);
return error; return error;
} }
...@@ -1879,10 +1878,9 @@ static int setattr_chown(struct inode *inode, struct iattr *attr) ...@@ -1879,10 +1878,9 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
ouid = nuid = NO_UID_QUOTA_CHANGE; ouid = nuid = NO_UID_QUOTA_CHANGE;
if (!(attr->ia_valid & ATTR_GID) || gid_eq(ogid, ngid)) if (!(attr->ia_valid & ATTR_GID) || gid_eq(ogid, ngid))
ogid = ngid = NO_GID_QUOTA_CHANGE; ogid = ngid = NO_GID_QUOTA_CHANGE;
error = gfs2_qa_get(ip);
error = gfs2_rsqa_alloc(ip);
if (error) if (error)
goto out; return error;
error = gfs2_rindex_update(sdp); error = gfs2_rindex_update(sdp);
if (error) if (error)
...@@ -1920,6 +1918,7 @@ static int setattr_chown(struct inode *inode, struct iattr *attr) ...@@ -1920,6 +1918,7 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
out_gunlock_q: out_gunlock_q:
gfs2_quota_unlock(ip); gfs2_quota_unlock(ip);
out: out:
gfs2_qa_put(ip);
return error; return error;
} }
...@@ -1941,21 +1940,21 @@ static int gfs2_setattr(struct dentry *dentry, struct iattr *attr) ...@@ -1941,21 +1940,21 @@ static int gfs2_setattr(struct dentry *dentry, struct iattr *attr)
struct gfs2_holder i_gh; struct gfs2_holder i_gh;
int error; int error;
error = gfs2_rsqa_alloc(ip); error = gfs2_qa_get(ip);
if (error) if (error)
return error; return error;
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
if (error) if (error)
return error; goto out;
error = -EPERM; error = -EPERM;
if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
goto out; goto error;
error = setattr_prepare(dentry, attr); error = setattr_prepare(dentry, attr);
if (error) if (error)
goto out; goto error;
if (attr->ia_valid & ATTR_SIZE) if (attr->ia_valid & ATTR_SIZE)
error = gfs2_setattr_size(inode, attr->ia_size); error = gfs2_setattr_size(inode, attr->ia_size);
...@@ -1967,10 +1966,12 @@ static int gfs2_setattr(struct dentry *dentry, struct iattr *attr) ...@@ -1967,10 +1966,12 @@ static int gfs2_setattr(struct dentry *dentry, struct iattr *attr)
error = posix_acl_chmod(inode, inode->i_mode); error = posix_acl_chmod(inode, inode->i_mode);
} }
out: error:
if (!error) if (!error)
mark_inode_dirty(inode); mark_inode_dirty(inode);
gfs2_glock_dq_uninit(&i_gh); gfs2_glock_dq_uninit(&i_gh);
out:
gfs2_qa_put(ip);
return error; return error;
} }
......
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
#include "incore.h" #include "incore.h"
#include "glock.h" #include "glock.h"
#include "glops.h"
#include "recovery.h"
#include "util.h" #include "util.h"
#include "sys.h" #include "sys.h"
#include "trace_gfs2.h" #include "trace_gfs2.h"
...@@ -124,6 +126,8 @@ static void gdlm_ast(void *arg) ...@@ -124,6 +126,8 @@ static void gdlm_ast(void *arg)
switch (gl->gl_lksb.sb_status) { switch (gl->gl_lksb.sb_status) {
case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */ case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
if (gl->gl_ops->go_free)
gl->gl_ops->go_free(gl);
gfs2_glock_free(gl); gfs2_glock_free(gl);
return; return;
case -DLM_ECANCEL: /* Cancel while getting lock */ case -DLM_ECANCEL: /* Cancel while getting lock */
...@@ -323,6 +327,7 @@ static void gdlm_cancel(struct gfs2_glock *gl) ...@@ -323,6 +327,7 @@ static void gdlm_cancel(struct gfs2_glock *gl)
/* /*
* dlm/gfs2 recovery coordination using dlm_recover callbacks * dlm/gfs2 recovery coordination using dlm_recover callbacks
* *
* 0. gfs2 checks for another cluster node withdraw, needing journal replay
* 1. dlm_controld sees lockspace members change * 1. dlm_controld sees lockspace members change
* 2. dlm_controld blocks dlm-kernel locking activity * 2. dlm_controld blocks dlm-kernel locking activity
* 3. dlm_controld within dlm-kernel notifies gfs2 (recover_prep) * 3. dlm_controld within dlm-kernel notifies gfs2 (recover_prep)
...@@ -571,6 +576,28 @@ static int control_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags) ...@@ -571,6 +576,28 @@ static int control_lock(struct gfs2_sbd *sdp, int mode, uint32_t flags)
&ls->ls_control_lksb, "control_lock"); &ls->ls_control_lksb, "control_lock");
} }
/**
* remote_withdraw - react to a node withdrawing from the file system
* @sdp: The superblock
*/
static void remote_withdraw(struct gfs2_sbd *sdp)
{
struct gfs2_jdesc *jd;
int ret = 0, count = 0;
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
if (jd->jd_jid == sdp->sd_lockstruct.ls_jid)
continue;
ret = gfs2_recover_journal(jd, true);
if (ret)
break;
count++;
}
/* Now drop the additional reference we acquired */
fs_err(sdp, "Journals checked: %d, ret = %d.\n", count, ret);
}
static void gfs2_control_func(struct work_struct *work) static void gfs2_control_func(struct work_struct *work)
{ {
struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work); struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_control_work.work);
...@@ -581,6 +608,13 @@ static void gfs2_control_func(struct work_struct *work) ...@@ -581,6 +608,13 @@ static void gfs2_control_func(struct work_struct *work)
int recover_size; int recover_size;
int i, error; int i, error;
/* First check for other nodes that may have done a withdraw. */
if (test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags)) {
remote_withdraw(sdp);
clear_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
return;
}
spin_lock(&ls->ls_recover_spin); spin_lock(&ls->ls_recover_spin);
/* /*
* No MOUNT_DONE means we're still mounting; control_mount() * No MOUNT_DONE means we're still mounting; control_mount()
...@@ -1079,6 +1113,10 @@ static void gdlm_recover_prep(void *arg) ...@@ -1079,6 +1113,10 @@ static void gdlm_recover_prep(void *arg)
struct gfs2_sbd *sdp = arg; struct gfs2_sbd *sdp = arg;
struct lm_lockstruct *ls = &sdp->sd_lockstruct; struct lm_lockstruct *ls = &sdp->sd_lockstruct;
if (gfs2_withdrawn(sdp)) {
fs_err(sdp, "recover_prep ignored due to withdraw.\n");
return;
}
spin_lock(&ls->ls_recover_spin); spin_lock(&ls->ls_recover_spin);
ls->ls_recover_block = ls->ls_recover_start; ls->ls_recover_block = ls->ls_recover_start;
set_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags); set_bit(DFL_DLM_RECOVERY, &ls->ls_recover_flags);
...@@ -1101,6 +1139,11 @@ static void gdlm_recover_slot(void *arg, struct dlm_slot *slot) ...@@ -1101,6 +1139,11 @@ static void gdlm_recover_slot(void *arg, struct dlm_slot *slot)
struct lm_lockstruct *ls = &sdp->sd_lockstruct; struct lm_lockstruct *ls = &sdp->sd_lockstruct;
int jid = slot->slot - 1; int jid = slot->slot - 1;
if (gfs2_withdrawn(sdp)) {
fs_err(sdp, "recover_slot jid %d ignored due to withdraw.\n",
jid);
return;
}
spin_lock(&ls->ls_recover_spin); spin_lock(&ls->ls_recover_spin);
if (ls->ls_recover_size < jid + 1) { if (ls->ls_recover_size < jid + 1) {
fs_err(sdp, "recover_slot jid %d gen %u short size %d\n", fs_err(sdp, "recover_slot jid %d gen %u short size %d\n",
...@@ -1125,6 +1168,10 @@ static void gdlm_recover_done(void *arg, struct dlm_slot *slots, int num_slots, ...@@ -1125,6 +1168,10 @@ static void gdlm_recover_done(void *arg, struct dlm_slot *slots, int num_slots,
struct gfs2_sbd *sdp = arg; struct gfs2_sbd *sdp = arg;
struct lm_lockstruct *ls = &sdp->sd_lockstruct; struct lm_lockstruct *ls = &sdp->sd_lockstruct;
if (gfs2_withdrawn(sdp)) {
fs_err(sdp, "recover_done ignored due to withdraw.\n");
return;
}
/* ensure the ls jid arrays are large enough */ /* ensure the ls jid arrays are large enough */
set_recover_size(sdp, slots, num_slots); set_recover_size(sdp, slots, num_slots);
...@@ -1152,6 +1199,11 @@ static void gdlm_recovery_result(struct gfs2_sbd *sdp, unsigned int jid, ...@@ -1152,6 +1199,11 @@ static void gdlm_recovery_result(struct gfs2_sbd *sdp, unsigned int jid,
{ {
struct lm_lockstruct *ls = &sdp->sd_lockstruct; struct lm_lockstruct *ls = &sdp->sd_lockstruct;
if (gfs2_withdrawn(sdp)) {
fs_err(sdp, "recovery_result jid %d ignored due to withdraw.\n",
jid);
return;
}
if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags)) if (test_bit(DFL_NO_DLM_OPS, &ls->ls_recover_flags))
return; return;
......
This diff is collapsed.
...@@ -73,6 +73,7 @@ extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, ...@@ -73,6 +73,7 @@ extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
u32 type); u32 type);
extern void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans); extern void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
extern void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc); extern void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc);
extern void log_flush_wait(struct gfs2_sbd *sdp);
extern int gfs2_logd(void *data); extern int gfs2_logd(void *data);
extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd); extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
......
...@@ -203,8 +203,12 @@ static void gfs2_end_log_write(struct bio *bio) ...@@ -203,8 +203,12 @@ static void gfs2_end_log_write(struct bio *bio)
struct bvec_iter_all iter_all; struct bvec_iter_all iter_all;
if (bio->bi_status) { if (bio->bi_status) {
fs_err(sdp, "Error %d writing to journal, jid=%u\n", if (!cmpxchg(&sdp->sd_log_error, 0, (int)bio->bi_status))
bio->bi_status, sdp->sd_jdesc->jd_jid); fs_err(sdp, "Error %d writing to journal, jid=%u\n",
bio->bi_status, sdp->sd_jdesc->jd_jid);
gfs2_withdraw_delayed(sdp);
/* prevent more writes to the journal */
clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
wake_up(&sdp->sd_logd_waitq); wake_up(&sdp->sd_logd_waitq);
} }
...@@ -730,7 +734,7 @@ static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) ...@@ -730,7 +734,7 @@ static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
head = &tr->tr_buf; head = &tr->tr_buf;
while (!list_empty(head)) { while (!list_empty(head)) {
bd = list_entry(head->next, struct gfs2_bufdata, bd_list); bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
list_del_init(&bd->bd_list); list_del_init(&bd->bd_list);
gfs2_unpin(sdp, bd->bd_bh, tr); gfs2_unpin(sdp, bd->bd_bh, tr);
} }
...@@ -900,7 +904,7 @@ static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) ...@@ -900,7 +904,7 @@ static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
struct gfs2_glock *gl; struct gfs2_glock *gl;
while (!list_empty(head)) { while (!list_empty(head)) {
bd = list_entry(head->next, struct gfs2_bufdata, bd_list); bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
list_del_init(&bd->bd_list); list_del_init(&bd->bd_list);
gl = bd->bd_gl; gl = bd->bd_gl;
gfs2_glock_remove_revoke(gl); gfs2_glock_remove_revoke(gl);
...@@ -1079,7 +1083,7 @@ static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) ...@@ -1079,7 +1083,7 @@ static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
head = &tr->tr_databuf; head = &tr->tr_databuf;
while (!list_empty(head)) { while (!list_empty(head)) {
bd = list_entry(head->next, struct gfs2_bufdata, bd_list); bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
list_del_init(&bd->bd_list); list_del_init(&bd->bd_list);
gfs2_unpin(sdp, bd->bd_bh, tr); gfs2_unpin(sdp, bd->bd_bh, tr);
} }
......
...@@ -251,7 +251,8 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, ...@@ -251,7 +251,8 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
struct buffer_head *bh, *bhs[2]; struct buffer_head *bh, *bhs[2];
int num = 0; int num = 0;
if (unlikely(gfs2_withdrawn(sdp))) { if (unlikely(gfs2_withdrawn(sdp)) &&
(!sdp->sd_jdesc || (blkno != sdp->sd_jdesc->jd_no_addr))) {
*bhp = NULL; *bhp = NULL;
return -EIO; return -EIO;
} }
......
...@@ -552,6 +552,8 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh) ...@@ -552,6 +552,8 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
mutex_lock(&sdp->sd_jindex_mutex); mutex_lock(&sdp->sd_jindex_mutex);
for (;;) { for (;;) {
struct gfs2_inode *jip;
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh); error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh);
if (error) if (error)
break; break;
...@@ -591,6 +593,8 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh) ...@@ -591,6 +593,8 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
spin_lock(&sdp->sd_jindex_spin); spin_lock(&sdp->sd_jindex_spin);
jd->jd_jid = sdp->sd_journals++; jd->jd_jid = sdp->sd_journals++;
jip = GFS2_I(jd->jd_inode);
jd->jd_no_addr = jip->i_no_addr;
list_add_tail(&jd->jd_list, &sdp->sd_jindex_list); list_add_tail(&jd->jd_list, &sdp->sd_jindex_list);
spin_unlock(&sdp->sd_jindex_spin); spin_unlock(&sdp->sd_jindex_spin);
} }
...@@ -600,48 +604,6 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh) ...@@ -600,48 +604,6 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
return error; return error;
} }
/**
* check_journal_clean - Make sure a journal is clean for a spectator mount
* @sdp: The GFS2 superblock
* @jd: The journal descriptor
*
* Returns: 0 if the journal is clean or locked, else an error
*/
static int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd)
{
int error;
struct gfs2_holder j_gh;
struct gfs2_log_header_host head;
struct gfs2_inode *ip;
ip = GFS2_I(jd->jd_inode);
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_NOEXP |
GL_EXACT | GL_NOCACHE, &j_gh);
if (error) {
fs_err(sdp, "Error locking journal for spectator mount.\n");
return -EPERM;
}
error = gfs2_jdesc_check(jd);
if (error) {
fs_err(sdp, "Error checking journal for spectator mount.\n");
goto out_unlock;
}
error = gfs2_find_jhead(jd, &head, false);
if (error) {
fs_err(sdp, "Error parsing journal for spectator mount.\n");
goto out_unlock;
}
if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
error = -EPERM;
fs_err(sdp, "jid=%u: Journal is dirty, so the first mounter "
"must not be a spectator.\n", jd->jd_jid);
}
out_unlock:
gfs2_glock_dq_uninit(&j_gh);
return error;
}
static int init_journal(struct gfs2_sbd *sdp, int undo) static int init_journal(struct gfs2_sbd *sdp, int undo)
{ {
struct inode *master = d_inode(sdp->sd_master_dir); struct inode *master = d_inode(sdp->sd_master_dir);
...@@ -694,7 +656,8 @@ static int init_journal(struct gfs2_sbd *sdp, int undo) ...@@ -694,7 +656,8 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
error = gfs2_glock_nq_num(sdp, sdp->sd_lockstruct.ls_jid, error = gfs2_glock_nq_num(sdp, sdp->sd_lockstruct.ls_jid,
&gfs2_journal_glops, &gfs2_journal_glops,
LM_ST_EXCLUSIVE, LM_FLAG_NOEXP, LM_ST_EXCLUSIVE,
LM_FLAG_NOEXP | GL_NOCACHE,
&sdp->sd_journal_gh); &sdp->sd_journal_gh);
if (error) { if (error) {
fs_err(sdp, "can't acquire journal glock: %d\n", error); fs_err(sdp, "can't acquire journal glock: %d\n", error);
...@@ -702,6 +665,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo) ...@@ -702,6 +665,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
} }
ip = GFS2_I(sdp->sd_jdesc->jd_inode); ip = GFS2_I(sdp->sd_jdesc->jd_inode);
sdp->sd_jinode_gl = ip->i_gl;
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
LM_FLAG_NOEXP | GL_EXACT | GL_NOCACHE, LM_FLAG_NOEXP | GL_EXACT | GL_NOCACHE,
&sdp->sd_jinode_gh); &sdp->sd_jinode_gh);
...@@ -732,7 +696,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo) ...@@ -732,7 +696,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
struct gfs2_jdesc *jd = gfs2_jdesc_find(sdp, x); struct gfs2_jdesc *jd = gfs2_jdesc_find(sdp, x);
if (sdp->sd_args.ar_spectator) { if (sdp->sd_args.ar_spectator) {
error = check_journal_clean(sdp, jd); error = check_journal_clean(sdp, jd, true);
if (error) if (error)
goto fail_jinode_gh; goto fail_jinode_gh;
continue; continue;
...@@ -762,10 +726,13 @@ static int init_journal(struct gfs2_sbd *sdp, int undo) ...@@ -762,10 +726,13 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
return 0; return 0;
fail_jinode_gh: fail_jinode_gh:
if (!sdp->sd_args.ar_spectator) /* A withdraw may have done dq/uninit so now we need to check it */
if (!sdp->sd_args.ar_spectator &&
gfs2_holder_initialized(&sdp->sd_jinode_gh))
gfs2_glock_dq_uninit(&sdp->sd_jinode_gh); gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
fail_journal_gh: fail_journal_gh:
if (!sdp->sd_args.ar_spectator) if (!sdp->sd_args.ar_spectator &&
gfs2_holder_initialized(&sdp->sd_journal_gh))
gfs2_glock_dq_uninit(&sdp->sd_journal_gh); gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
fail_jindex: fail_jindex:
gfs2_jindex_free(sdp); gfs2_jindex_free(sdp);
......
...@@ -115,7 +115,7 @@ static void gfs2_qd_dispose(struct list_head *list) ...@@ -115,7 +115,7 @@ static void gfs2_qd_dispose(struct list_head *list)
struct gfs2_sbd *sdp; struct gfs2_sbd *sdp;
while (!list_empty(list)) { while (!list_empty(list)) {
qd = list_entry(list->next, struct gfs2_quota_data, qd_lru); qd = list_first_entry(list, struct gfs2_quota_data, qd_lru);
sdp = qd->qd_gl->gl_name.ln_sbd; sdp = qd->qd_gl->gl_name.ln_sbd;
list_del(&qd->qd_lru); list_del(&qd->qd_lru);
...@@ -525,11 +525,11 @@ static void qdsb_put(struct gfs2_quota_data *qd) ...@@ -525,11 +525,11 @@ static void qdsb_put(struct gfs2_quota_data *qd)
} }
/** /**
* gfs2_qa_alloc - make sure we have a quota allocations data structure, * gfs2_qa_get - make sure we have a quota allocations data structure,
* if necessary * if necessary
* @ip: the inode for this reservation * @ip: the inode for this reservation
*/ */
int gfs2_qa_alloc(struct gfs2_inode *ip) int gfs2_qa_get(struct gfs2_inode *ip)
{ {
int error = 0; int error = 0;
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
...@@ -540,17 +540,21 @@ int gfs2_qa_alloc(struct gfs2_inode *ip) ...@@ -540,17 +540,21 @@ int gfs2_qa_alloc(struct gfs2_inode *ip)
down_write(&ip->i_rw_mutex); down_write(&ip->i_rw_mutex);
if (ip->i_qadata == NULL) { if (ip->i_qadata == NULL) {
ip->i_qadata = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS); ip->i_qadata = kmem_cache_zalloc(gfs2_qadata_cachep, GFP_NOFS);
if (!ip->i_qadata) if (!ip->i_qadata) {
error = -ENOMEM; error = -ENOMEM;
goto out;
}
} }
ip->i_qadata->qa_ref++;
out:
up_write(&ip->i_rw_mutex); up_write(&ip->i_rw_mutex);
return error; return error;
} }
void gfs2_qa_delete(struct gfs2_inode *ip, atomic_t *wcount) void gfs2_qa_put(struct gfs2_inode *ip)
{ {
down_write(&ip->i_rw_mutex); down_write(&ip->i_rw_mutex);
if (ip->i_qadata && ((wcount == NULL) || (atomic_read(wcount) <= 1))) { if (ip->i_qadata && --ip->i_qadata->qa_ref == 0) {
kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata); kmem_cache_free(gfs2_qadata_cachep, ip->i_qadata);
ip->i_qadata = NULL; ip->i_qadata = NULL;
} }
...@@ -566,27 +570,27 @@ int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) ...@@ -566,27 +570,27 @@ int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
return 0; return 0;
if (ip->i_qadata == NULL) { error = gfs2_qa_get(ip);
error = gfs2_rsqa_alloc(ip); if (error)
if (error) return error;
return error;
}
qd = ip->i_qadata->qa_qd; qd = ip->i_qadata->qa_qd;
if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) || if (gfs2_assert_warn(sdp, !ip->i_qadata->qa_qd_num) ||
gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags))) {
return -EIO; error = -EIO;
goto out;
}
error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd); error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd);
if (error) if (error)
goto out; goto out_unhold;
ip->i_qadata->qa_qd_num++; ip->i_qadata->qa_qd_num++;
qd++; qd++;
error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd); error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd);
if (error) if (error)
goto out; goto out_unhold;
ip->i_qadata->qa_qd_num++; ip->i_qadata->qa_qd_num++;
qd++; qd++;
...@@ -594,7 +598,7 @@ int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) ...@@ -594,7 +598,7 @@ int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
!uid_eq(uid, ip->i_inode.i_uid)) { !uid_eq(uid, ip->i_inode.i_uid)) {
error = qdsb_get(sdp, make_kqid_uid(uid), qd); error = qdsb_get(sdp, make_kqid_uid(uid), qd);
if (error) if (error)
goto out; goto out_unhold;
ip->i_qadata->qa_qd_num++; ip->i_qadata->qa_qd_num++;
qd++; qd++;
} }
...@@ -603,14 +607,15 @@ int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid) ...@@ -603,14 +607,15 @@ int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
!gid_eq(gid, ip->i_inode.i_gid)) { !gid_eq(gid, ip->i_inode.i_gid)) {
error = qdsb_get(sdp, make_kqid_gid(gid), qd); error = qdsb_get(sdp, make_kqid_gid(gid), qd);
if (error) if (error)
goto out; goto out_unhold;
ip->i_qadata->qa_qd_num++; ip->i_qadata->qa_qd_num++;
qd++; qd++;
} }
out: out_unhold:
if (error) if (error)
gfs2_quota_unhold(ip); gfs2_quota_unhold(ip);
out:
return error; return error;
} }
...@@ -621,6 +626,7 @@ void gfs2_quota_unhold(struct gfs2_inode *ip) ...@@ -621,6 +626,7 @@ void gfs2_quota_unhold(struct gfs2_inode *ip)
if (ip->i_qadata == NULL) if (ip->i_qadata == NULL)
return; return;
gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)); gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
...@@ -628,6 +634,7 @@ void gfs2_quota_unhold(struct gfs2_inode *ip) ...@@ -628,6 +634,7 @@ void gfs2_quota_unhold(struct gfs2_inode *ip)
ip->i_qadata->qa_qd[x] = NULL; ip->i_qadata->qa_qd[x] = NULL;
} }
ip->i_qadata->qa_qd_num = 0; ip->i_qadata->qa_qd_num = 0;
gfs2_qa_put(ip);
} }
static int sort_qd(const void *a, const void *b) static int sort_qd(const void *a, const void *b)
...@@ -876,7 +883,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) ...@@ -876,7 +883,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
unsigned int nalloc = 0, blocks; unsigned int nalloc = 0, blocks;
int error; int error;
error = gfs2_rsqa_alloc(ip); error = gfs2_qa_get(ip);
if (error) if (error)
return error; return error;
...@@ -884,8 +891,10 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) ...@@ -884,8 +891,10 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
&data_blocks, &ind_blocks); &data_blocks, &ind_blocks);
ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS); ghs = kmalloc_array(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
if (!ghs) if (!ghs) {
return -ENOMEM; error = -ENOMEM;
goto out;
}
sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
inode_lock(&ip->i_inode); inode_lock(&ip->i_inode);
...@@ -893,12 +902,12 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) ...@@ -893,12 +902,12 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE, error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
GL_NOCACHE, &ghs[qx]); GL_NOCACHE, &ghs[qx]);
if (error) if (error)
goto out; goto out_dq;
} }
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
if (error) if (error)
goto out; goto out_dq;
for (x = 0; x < num_qd; x++) { for (x = 0; x < num_qd; x++) {
offset = qd2offset(qda[x]); offset = qd2offset(qda[x]);
...@@ -950,13 +959,15 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) ...@@ -950,13 +959,15 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
gfs2_inplace_release(ip); gfs2_inplace_release(ip);
out_alloc: out_alloc:
gfs2_glock_dq_uninit(&i_gh); gfs2_glock_dq_uninit(&i_gh);
out: out_dq:
while (qx--) while (qx--)
gfs2_glock_dq_uninit(&ghs[qx]); gfs2_glock_dq_uninit(&ghs[qx]);
inode_unlock(&ip->i_inode); inode_unlock(&ip->i_inode);
kfree(ghs); kfree(ghs);
gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl, gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC); GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
out:
gfs2_qa_put(ip);
return error; return error;
} }
...@@ -1259,6 +1270,7 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change, ...@@ -1259,6 +1270,7 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
if (ip->i_diskflags & GFS2_DIF_SYSTEM) if (ip->i_diskflags & GFS2_DIF_SYSTEM)
return; return;
BUG_ON(ip->i_qadata->qa_ref <= 0);
for (x = 0; x < ip->i_qadata->qa_qd_num; x++) { for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
qd = ip->i_qadata->qa_qd[x]; qd = ip->i_qadata->qa_qd[x];
...@@ -1441,7 +1453,7 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp) ...@@ -1441,7 +1453,7 @@ void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
spin_lock(&qd_lock); spin_lock(&qd_lock);
while (!list_empty(head)) { while (!list_empty(head)) {
qd = list_entry(head->prev, struct gfs2_quota_data, qd_list); qd = list_last_entry(head, struct gfs2_quota_data, qd_list);
list_del(&qd->qd_list); list_del(&qd->qd_list);
...@@ -1476,8 +1488,8 @@ static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error) ...@@ -1476,8 +1488,8 @@ static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
if (error == 0 || error == -EROFS) if (error == 0 || error == -EROFS)
return; return;
if (!gfs2_withdrawn(sdp)) { if (!gfs2_withdrawn(sdp)) {
fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error); if (!cmpxchg(&sdp->sd_log_error, 0, error))
sdp->sd_log_error = error; fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
wake_up(&sdp->sd_logd_waitq); wake_up(&sdp->sd_logd_waitq);
} }
} }
...@@ -1504,7 +1516,7 @@ static void quotad_check_trunc_list(struct gfs2_sbd *sdp) ...@@ -1504,7 +1516,7 @@ static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
ip = NULL; ip = NULL;
spin_lock(&sdp->sd_trunc_lock); spin_lock(&sdp->sd_trunc_lock);
if (!list_empty(&sdp->sd_trunc_list)) { if (!list_empty(&sdp->sd_trunc_list)) {
ip = list_entry(sdp->sd_trunc_list.next, ip = list_first_entry(&sdp->sd_trunc_list,
struct gfs2_inode, i_trunc_list); struct gfs2_inode, i_trunc_list);
list_del_init(&ip->i_trunc_list); list_del_init(&ip->i_trunc_list);
} }
...@@ -1541,6 +1553,8 @@ int gfs2_quotad(void *data) ...@@ -1541,6 +1553,8 @@ int gfs2_quotad(void *data)
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
if (gfs2_withdrawn(sdp))
goto bypass;
/* Update the master statfs file */ /* Update the master statfs file */
if (sdp->sd_statfs_force_sync) { if (sdp->sd_statfs_force_sync) {
int error = gfs2_statfs_sync(sdp->sd_vfs, 0); int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
...@@ -1561,6 +1575,7 @@ int gfs2_quotad(void *data) ...@@ -1561,6 +1575,7 @@ int gfs2_quotad(void *data)
try_to_freeze(); try_to_freeze();
bypass:
t = min(quotad_timeo, statfs_timeo); t = min(quotad_timeo, statfs_timeo);
prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE); prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
...@@ -1674,7 +1689,7 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, ...@@ -1674,7 +1689,7 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
if (error) if (error)
return error; return error;
error = gfs2_rsqa_alloc(ip); error = gfs2_qa_get(ip);
if (error) if (error)
goto out_put; goto out_put;
...@@ -1743,6 +1758,7 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid, ...@@ -1743,6 +1758,7 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
out_q: out_q:
gfs2_glock_dq_uninit(&q_gh); gfs2_glock_dq_uninit(&q_gh);
out_unlockput: out_unlockput:
gfs2_qa_put(ip);
inode_unlock(&ip->i_inode); inode_unlock(&ip->i_inode);
out_put: out_put:
qd_put(qd); qd_put(qd);
......
...@@ -15,8 +15,8 @@ struct gfs2_sbd; ...@@ -15,8 +15,8 @@ struct gfs2_sbd;
#define NO_UID_QUOTA_CHANGE INVALID_UID #define NO_UID_QUOTA_CHANGE INVALID_UID
#define NO_GID_QUOTA_CHANGE INVALID_GID #define NO_GID_QUOTA_CHANGE INVALID_GID
extern int gfs2_qa_alloc(struct gfs2_inode *ip); extern int gfs2_qa_get(struct gfs2_inode *ip);
extern void gfs2_qa_delete(struct gfs2_inode *ip, atomic_t *wcount); extern void gfs2_qa_put(struct gfs2_inode *ip);
extern int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid); extern int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
extern void gfs2_quota_unhold(struct gfs2_inode *ip); extern void gfs2_quota_unhold(struct gfs2_inode *ip);
......
...@@ -111,7 +111,7 @@ void gfs2_revoke_clean(struct gfs2_jdesc *jd) ...@@ -111,7 +111,7 @@ void gfs2_revoke_clean(struct gfs2_jdesc *jd)
struct gfs2_revoke_replay *rr; struct gfs2_revoke_replay *rr;
while (!list_empty(head)) { while (!list_empty(head)) {
rr = list_entry(head->next, struct gfs2_revoke_replay, rr_list); rr = list_first_entry(head, struct gfs2_revoke_replay, rr_list);
list_del(&rr->rr_list); list_del(&rr->rr_list);
kfree(rr); kfree(rr);
} }
...@@ -305,6 +305,11 @@ void gfs2_recover_func(struct work_struct *work) ...@@ -305,6 +305,11 @@ void gfs2_recover_func(struct work_struct *work)
int error = 0; int error = 0;
int jlocked = 0; int jlocked = 0;
if (gfs2_withdrawn(sdp)) {
fs_err(sdp, "jid=%u: Recovery not attempted due to withdraw.\n",
jd->jd_jid);
goto fail;
}
t_start = ktime_get(); t_start = ktime_get();
if (sdp->sd_args.ar_spectator) if (sdp->sd_args.ar_spectator)
goto fail; goto fail;
...@@ -393,6 +398,10 @@ void gfs2_recover_func(struct work_struct *work) ...@@ -393,6 +398,10 @@ void gfs2_recover_func(struct work_struct *work)
fs_info(sdp, "jid=%u: Replaying journal...0x%x to 0x%x\n", fs_info(sdp, "jid=%u: Replaying journal...0x%x to 0x%x\n",
jd->jd_jid, head.lh_tail, head.lh_blkno); jd->jd_jid, head.lh_tail, head.lh_blkno);
/* We take the sd_log_flush_lock here primarily to prevent log
* flushes and simultaneous journal replays from stomping on
* each other wrt sd_log_bio. */
down_read(&sdp->sd_log_flush_lock);
for (pass = 0; pass < 2; pass++) { for (pass = 0; pass < 2; pass++) {
lops_before_scan(jd, &head, pass); lops_before_scan(jd, &head, pass);
error = foreach_descriptor(jd, head.lh_tail, error = foreach_descriptor(jd, head.lh_tail,
...@@ -403,6 +412,7 @@ void gfs2_recover_func(struct work_struct *work) ...@@ -403,6 +412,7 @@ void gfs2_recover_func(struct work_struct *work)
} }
clean_journal(jd, &head); clean_journal(jd, &head);
up_read(&sdp->sd_log_flush_lock);
gfs2_glock_dq_uninit(&thaw_gh); gfs2_glock_dq_uninit(&thaw_gh);
t_rep = ktime_get(); t_rep = ktime_get();
......
...@@ -457,24 +457,24 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd) ...@@ -457,24 +457,24 @@ void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
} }
if (count[0] != rgd->rd_free) { if (count[0] != rgd->rd_free) {
if (gfs2_consist_rgrpd(rgd)) gfs2_lm(sdp, "free data mismatch: %u != %u\n",
fs_err(sdp, "free data mismatch: %u != %u\n", count[0], rgd->rd_free);
count[0], rgd->rd_free); gfs2_consist_rgrpd(rgd);
return; return;
} }
tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes; tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
if (count[1] != tmp) { if (count[1] != tmp) {
if (gfs2_consist_rgrpd(rgd)) gfs2_lm(sdp, "used data mismatch: %u != %u\n",
fs_err(sdp, "used data mismatch: %u != %u\n", count[1], tmp);
count[1], tmp); gfs2_consist_rgrpd(rgd);
return; return;
} }
if (count[2] + count[3] != rgd->rd_dinodes) { if (count[2] + count[3] != rgd->rd_dinodes) {
if (gfs2_consist_rgrpd(rgd)) gfs2_lm(sdp, "used metadata mismatch: %u != %u\n",
fs_err(sdp, "used metadata mismatch: %u != %u\n", count[2] + count[3], rgd->rd_dinodes);
count[2] + count[3], rgd->rd_dinodes); gfs2_consist_rgrpd(rgd);
return; return;
} }
} }
...@@ -590,16 +590,6 @@ void gfs2_free_clones(struct gfs2_rgrpd *rgd) ...@@ -590,16 +590,6 @@ void gfs2_free_clones(struct gfs2_rgrpd *rgd)
} }
} }
/**
* gfs2_rsqa_alloc - make sure we have a reservation assigned to the inode
* plus a quota allocations data structure, if necessary
* @ip: the inode for this reservation
*/
int gfs2_rsqa_alloc(struct gfs2_inode *ip)
{
return gfs2_qa_alloc(ip);
}
static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs, static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs,
const char *fs_id_buf) const char *fs_id_buf)
{ {
...@@ -672,18 +662,17 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs) ...@@ -672,18 +662,17 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
} }
/** /**
* gfs2_rsqa_delete - delete a multi-block reservation and quota allocation * gfs2_rs_delete - delete a multi-block reservation
* @ip: The inode for this reservation * @ip: The inode for this reservation
* @wcount: The inode's write count, or NULL * @wcount: The inode's write count, or NULL
* *
*/ */
void gfs2_rsqa_delete(struct gfs2_inode *ip, atomic_t *wcount) void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount)
{ {
down_write(&ip->i_rw_mutex); down_write(&ip->i_rw_mutex);
if ((wcount == NULL) || (atomic_read(wcount) <= 1)) if ((wcount == NULL) || (atomic_read(wcount) <= 1))
gfs2_rs_deltree(&ip->i_res); gfs2_rs_deltree(&ip->i_res);
up_write(&ip->i_rw_mutex); up_write(&ip->i_rw_mutex);
gfs2_qa_delete(ip, wcount);
} }
/** /**
...@@ -720,8 +709,12 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp) ...@@ -720,8 +709,12 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
rb_erase(n, &sdp->sd_rindex_tree); rb_erase(n, &sdp->sd_rindex_tree);
if (gl) { if (gl) {
glock_clear_object(gl, rgd); if (gl->gl_state != LM_ST_UNLOCKED) {
gfs2_glock_cb(gl, LM_ST_UNLOCKED);
flush_delayed_work(&gl->gl_work);
}
gfs2_rgrp_brelse(rgd); gfs2_rgrp_brelse(rgd);
glock_clear_object(gl, rgd);
gfs2_glock_put(gl); gfs2_glock_put(gl);
} }
...@@ -733,17 +726,6 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp) ...@@ -733,17 +726,6 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
} }
} }
static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
{
struct gfs2_sbd *sdp = rgd->rd_sbd;
fs_info(sdp, "ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
fs_info(sdp, "ri_length = %u\n", rgd->rd_length);
fs_info(sdp, "ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
fs_info(sdp, "ri_data = %u\n", rgd->rd_data);
fs_info(sdp, "ri_bitbytes = %u\n", rgd->rd_bitbytes);
}
/** /**
* gfs2_compute_bitstructs - Compute the bitmap sizes * gfs2_compute_bitstructs - Compute the bitmap sizes
* @rgd: The resource group descriptor * @rgd: The resource group descriptor
...@@ -814,11 +796,20 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd) ...@@ -814,11 +796,20 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
} }
bi = rgd->rd_bits + (length - 1); bi = rgd->rd_bits + (length - 1);
if ((bi->bi_start + bi->bi_bytes) * GFS2_NBBY != rgd->rd_data) { if ((bi->bi_start + bi->bi_bytes) * GFS2_NBBY != rgd->rd_data) {
if (gfs2_consist_rgrpd(rgd)) { gfs2_lm(sdp,
gfs2_rindex_print(rgd); "ri_addr = %llu\n"
fs_err(sdp, "start=%u len=%u offset=%u\n", "ri_length = %u\n"
bi->bi_start, bi->bi_bytes, bi->bi_offset); "ri_data0 = %llu\n"
} "ri_data = %u\n"
"ri_bitbytes = %u\n"
"start=%u len=%u offset=%u\n",
(unsigned long long)rgd->rd_addr,
rgd->rd_length,
(unsigned long long)rgd->rd_data0,
rgd->rd_data,
rgd->rd_bitbytes,
bi->bi_start, bi->bi_bytes, bi->bi_offset);
gfs2_consist_rgrpd(rgd);
return -EIO; return -EIO;
} }
...@@ -1286,23 +1277,6 @@ void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd) ...@@ -1286,23 +1277,6 @@ void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd)
bi->bi_bh = NULL; bi->bi_bh = NULL;
} }
} }
}
/**
* gfs2_rgrp_go_unlock - Unlock a rgrp glock
* @gh: The glock holder for the resource group
*
*/
void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
{
struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
int demote_requested = test_bit(GLF_DEMOTE, &gh->gh_gl->gl_flags) |
test_bit(GLF_PENDING_DEMOTE, &gh->gh_gl->gl_flags);
if (rgd && demote_requested)
gfs2_rgrp_brelse(rgd);
} }
int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
...@@ -1832,10 +1806,8 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip ...@@ -1832,10 +1806,8 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip
struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 }; struct gfs2_rbm rbm = { .rgd = rgd, .bii = 0, .offset = 0 };
while (1) { while (1) {
down_write(&sdp->sd_log_flush_lock);
error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, NULL, error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, NULL,
true); true);
up_write(&sdp->sd_log_flush_lock);
if (error == -ENOSPC) if (error == -ENOSPC)
break; break;
if (WARN_ON_ONCE(error)) if (WARN_ON_ONCE(error))
......
...@@ -33,7 +33,6 @@ extern int gfs2_rindex_update(struct gfs2_sbd *sdp); ...@@ -33,7 +33,6 @@ extern int gfs2_rindex_update(struct gfs2_sbd *sdp);
extern void gfs2_free_clones(struct gfs2_rgrpd *rgd); extern void gfs2_free_clones(struct gfs2_rgrpd *rgd);
extern int gfs2_rgrp_go_lock(struct gfs2_holder *gh); extern int gfs2_rgrp_go_lock(struct gfs2_holder *gh);
extern void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd); extern void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd);
extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh);
extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip); extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
...@@ -45,9 +44,8 @@ extern void gfs2_inplace_release(struct gfs2_inode *ip); ...@@ -45,9 +44,8 @@ extern void gfs2_inplace_release(struct gfs2_inode *ip);
extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n, extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
bool dinode, u64 *generation); bool dinode, u64 *generation);
extern int gfs2_rsqa_alloc(struct gfs2_inode *ip);
extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs); extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
extern void gfs2_rsqa_delete(struct gfs2_inode *ip, atomic_t *wcount); extern void gfs2_rs_delete(struct gfs2_inode *ip, atomic_t *wcount);
extern void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd, extern void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
u64 bstart, u32 blen, int meta); u64 bstart, u32 blen, int meta);
extern void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd, extern void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
......
...@@ -61,11 +61,13 @@ void gfs2_jindex_free(struct gfs2_sbd *sdp) ...@@ -61,11 +61,13 @@ void gfs2_jindex_free(struct gfs2_sbd *sdp)
sdp->sd_journals = 0; sdp->sd_journals = 0;
spin_unlock(&sdp->sd_jindex_spin); spin_unlock(&sdp->sd_jindex_spin);
sdp->sd_jdesc = NULL;
while (!list_empty(&list)) { while (!list_empty(&list)) {
jd = list_entry(list.next, struct gfs2_jdesc, jd_list); jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
gfs2_free_journal_extents(jd); gfs2_free_journal_extents(jd);
list_del(&jd->jd_list); list_del(&jd->jd_list);
iput(jd->jd_inode); iput(jd->jd_inode);
jd->jd_inode = NULL;
kfree(jd); kfree(jd);
} }
} }
...@@ -171,9 +173,13 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp) ...@@ -171,9 +173,13 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
goto fail_threads; goto fail_threads;
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
if (gfs2_withdrawn(sdp)) {
error = -EIO;
goto fail;
}
error = gfs2_find_jhead(sdp->sd_jdesc, &head, false); error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
if (error) if (error || gfs2_withdrawn(sdp))
goto fail; goto fail;
if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) { if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
...@@ -187,7 +193,7 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp) ...@@ -187,7 +193,7 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
gfs2_log_pointers_init(sdp, head.lh_blkno); gfs2_log_pointers_init(sdp, head.lh_blkno);
error = gfs2_quota_init(sdp); error = gfs2_quota_init(sdp);
if (error) if (error || gfs2_withdrawn(sdp))
goto fail; goto fail;
set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
...@@ -446,7 +452,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp) ...@@ -446,7 +452,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
out: out:
while (!list_empty(&list)) { while (!list_empty(&list)) {
lfcc = list_entry(list.next, struct lfcc, list); lfcc = list_first_entry(&list, struct lfcc, list);
list_del(&lfcc->list); list_del(&lfcc->list);
gfs2_glock_dq_uninit(&lfcc->gh); gfs2_glock_dq_uninit(&lfcc->gh);
kfree(lfcc); kfree(lfcc);
...@@ -599,34 +605,63 @@ static void gfs2_dirty_inode(struct inode *inode, int flags) ...@@ -599,34 +605,63 @@ static void gfs2_dirty_inode(struct inode *inode, int flags)
int gfs2_make_fs_ro(struct gfs2_sbd *sdp) int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
{ {
struct gfs2_holder freeze_gh; struct gfs2_holder freeze_gh;
int error; int error = 0;
int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, GL_NOCACHE,
&freeze_gh); gfs2_holder_mark_uninitialized(&freeze_gh);
if (error && !gfs2_withdrawn(sdp)) if (sdp->sd_freeze_gl &&
return error; !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
if (!log_write_allowed) {
error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
LM_ST_SHARED, GL_NOCACHE |
LM_FLAG_TRY, &freeze_gh);
if (error == GLR_TRYFAILED)
error = 0;
} else {
error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
LM_ST_SHARED, GL_NOCACHE,
&freeze_gh);
if (error && !gfs2_withdrawn(sdp))
return error;
}
}
flush_workqueue(gfs2_delete_workqueue); flush_workqueue(gfs2_delete_workqueue);
if (sdp->sd_quotad_process) if (!log_write_allowed && current == sdp->sd_quotad_process)
fs_warn(sdp, "The quotad daemon is withdrawing.\n");
else if (sdp->sd_quotad_process)
kthread_stop(sdp->sd_quotad_process); kthread_stop(sdp->sd_quotad_process);
sdp->sd_quotad_process = NULL; sdp->sd_quotad_process = NULL;
if (sdp->sd_logd_process)
if (!log_write_allowed && current == sdp->sd_logd_process)
fs_warn(sdp, "The logd daemon is withdrawing.\n");
else if (sdp->sd_logd_process)
kthread_stop(sdp->sd_logd_process); kthread_stop(sdp->sd_logd_process);
sdp->sd_logd_process = NULL; sdp->sd_logd_process = NULL;
gfs2_quota_sync(sdp->sd_vfs, 0); if (log_write_allowed) {
gfs2_statfs_sync(sdp->sd_vfs, 0); gfs2_quota_sync(sdp->sd_vfs, 0);
gfs2_statfs_sync(sdp->sd_vfs, 0);
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
GFS2_LFC_MAKE_FS_RO);
wait_event(sdp->sd_reserving_log_wait, atomic_read(&sdp->sd_reserving_log) == 0);
gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks);
gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
GFS2_LFC_MAKE_FS_RO);
wait_event(sdp->sd_reserving_log_wait,
atomic_read(&sdp->sd_reserving_log) == 0);
gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) ==
sdp->sd_jdesc->jd_blocks);
} else {
wait_event_timeout(sdp->sd_reserving_log_wait,
atomic_read(&sdp->sd_reserving_log) == 0,
HZ * 5);
}
if (gfs2_holder_initialized(&freeze_gh)) if (gfs2_holder_initialized(&freeze_gh))
gfs2_glock_dq_uninit(&freeze_gh); gfs2_glock_dq_uninit(&freeze_gh);
gfs2_quota_cleanup(sdp); gfs2_quota_cleanup(sdp);
if (!log_write_allowed)
sdp->sd_vfs->s_flags |= SB_RDONLY;
return error; return error;
} }
...@@ -677,8 +712,10 @@ static void gfs2_put_super(struct super_block *sb) ...@@ -677,8 +712,10 @@ static void gfs2_put_super(struct super_block *sb)
gfs2_glock_put(sdp->sd_freeze_gl); gfs2_glock_put(sdp->sd_freeze_gl);
if (!sdp->sd_args.ar_spectator) { if (!sdp->sd_args.ar_spectator) {
gfs2_glock_dq_uninit(&sdp->sd_journal_gh); if (gfs2_holder_initialized(&sdp->sd_journal_gh))
gfs2_glock_dq_uninit(&sdp->sd_jinode_gh); gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
if (gfs2_holder_initialized(&sdp->sd_jinode_gh))
gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
gfs2_glock_dq_uninit(&sdp->sd_sc_gh); gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
gfs2_glock_dq_uninit(&sdp->sd_qc_gh); gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
iput(sdp->sd_sc_inode); iput(sdp->sd_sc_inode);
...@@ -1356,14 +1393,6 @@ static void gfs2_evict_inode(struct inode *inode) ...@@ -1356,14 +1393,6 @@ static void gfs2_evict_inode(struct inode *inode)
if (gfs2_rs_active(&ip->i_res)) if (gfs2_rs_active(&ip->i_res))
gfs2_rs_deltree(&ip->i_res); gfs2_rs_deltree(&ip->i_res);
if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq(&ip->i_iopen_gh);
}
gfs2_holder_uninit(&ip->i_iopen_gh);
}
if (gfs2_holder_initialized(&gh)) { if (gfs2_holder_initialized(&gh)) {
glock_clear_object(ip->i_gl, ip); glock_clear_object(ip->i_gl, ip);
gfs2_glock_dq_uninit(&gh); gfs2_glock_dq_uninit(&gh);
...@@ -1372,22 +1401,30 @@ static void gfs2_evict_inode(struct inode *inode) ...@@ -1372,22 +1401,30 @@ static void gfs2_evict_inode(struct inode *inode)
fs_warn(sdp, "gfs2_evict_inode: %d\n", error); fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
out: out:
truncate_inode_pages_final(&inode->i_data); truncate_inode_pages_final(&inode->i_data);
gfs2_rsqa_delete(ip, NULL); if (ip->i_qadata)
gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
gfs2_rs_delete(ip, NULL);
gfs2_qa_put(ip);
gfs2_ordered_del_inode(ip); gfs2_ordered_del_inode(ip);
clear_inode(inode); clear_inode(inode);
gfs2_dir_hash_inval(ip); gfs2_dir_hash_inval(ip);
glock_clear_object(ip->i_gl, ip); if (ip->i_gl) {
wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE); glock_clear_object(ip->i_gl, ip);
gfs2_glock_add_to_lru(ip->i_gl); wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
gfs2_glock_put_eventually(ip->i_gl); gfs2_glock_add_to_lru(ip->i_gl);
ip->i_gl = NULL; gfs2_glock_put_eventually(ip->i_gl);
ip->i_gl = NULL;
}
if (gfs2_holder_initialized(&ip->i_iopen_gh)) { if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
glock_clear_object(gl, ip); glock_clear_object(gl, ip);
ip->i_iopen_gh.gh_flags |= GL_NOCACHE; if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq(&ip->i_iopen_gh);
}
gfs2_glock_hold(gl); gfs2_glock_hold(gl);
gfs2_glock_dq_uninit(&ip->i_iopen_gh); gfs2_holder_uninit(&ip->i_iopen_gh);
gfs2_glock_put_eventually(gl); gfs2_glock_put_eventually(gl);
} }
} }
...@@ -1401,6 +1438,7 @@ static struct inode *gfs2_alloc_inode(struct super_block *sb) ...@@ -1401,6 +1438,7 @@ static struct inode *gfs2_alloc_inode(struct super_block *sb)
return NULL; return NULL;
ip->i_flags = 0; ip->i_flags = 0;
ip->i_gl = NULL; ip->i_gl = NULL;
gfs2_holder_mark_uninitialized(&ip->i_iopen_gh);
memset(&ip->i_res, 0, sizeof(ip->i_res)); memset(&ip->i_res, 0, sizeof(ip->i_res));
RB_CLEAR_NODE(&ip->i_res.rs_node); RB_CLEAR_NODE(&ip->i_res.rs_node);
ip->i_rahead = 0; ip->i_rahead = 0;
......
...@@ -26,7 +26,6 @@ extern void gfs2_jindex_free(struct gfs2_sbd *sdp); ...@@ -26,7 +26,6 @@ extern void gfs2_jindex_free(struct gfs2_sbd *sdp);
extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid); extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
extern int gfs2_jdesc_check(struct gfs2_jdesc *jd); extern int gfs2_jdesc_check(struct gfs2_jdesc *jd);
extern int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename, extern int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
struct gfs2_inode **ipp); struct gfs2_inode **ipp);
......
...@@ -136,7 +136,8 @@ static ssize_t withdraw_store(struct gfs2_sbd *sdp, const char *buf, size_t len) ...@@ -136,7 +136,8 @@ static ssize_t withdraw_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
if (val != 1) if (val != 1)
return -EINVAL; return -EINVAL;
gfs2_lm_withdraw(sdp, "withdrawing from cluster at user's request\n"); gfs2_lm(sdp, "withdrawing from cluster at user's request\n");
gfs2_withdraw(sdp);
return len; return len;
} }
...@@ -434,6 +435,8 @@ int gfs2_recover_set(struct gfs2_sbd *sdp, unsigned jid) ...@@ -434,6 +435,8 @@ int gfs2_recover_set(struct gfs2_sbd *sdp, unsigned jid)
* never clear the DFL_BLOCK_LOCKS flag, so all our locks would * never clear the DFL_BLOCK_LOCKS flag, so all our locks would
* permanently stop working. * permanently stop working.
*/ */
if (!sdp->sd_jdesc)
goto out;
if (sdp->sd_jdesc->jd_jid == jid && !sdp->sd_args.ar_spectator) if (sdp->sd_jdesc->jd_jid == jid && !sdp->sd_args.ar_spectator)
goto out; goto out;
rv = -ENOENT; rv = -ENOENT;
......
...@@ -228,6 +228,10 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) ...@@ -228,6 +228,10 @@ void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
fs_info(sdp, "GFS2:adding buf while frozen\n"); fs_info(sdp, "GFS2:adding buf while frozen\n");
gfs2_assert_withdraw(sdp, 0); gfs2_assert_withdraw(sdp, 0);
} }
if (unlikely(gfs2_withdrawn(sdp))) {
fs_info(sdp, "GFS2:adding buf while withdrawn! 0x%llx\n",
(unsigned long long)bd->bd_bh->b_blocknr);
}
gfs2_pin(sdp, bd->bd_bh); gfs2_pin(sdp, bd->bd_bh);
mh->__pad0 = cpu_to_be64(0); mh->__pad0 = cpu_to_be64(0);
mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
......
This diff is collapsed.
...@@ -36,41 +36,59 @@ do { \ ...@@ -36,41 +36,59 @@ do { \
} while (0) } while (0)
int gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion, void gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion,
const char *function, char *file, unsigned int line); const char *function, char *file, unsigned int line,
bool delayed);
#define gfs2_assert_withdraw(sdp, assertion) \ #define gfs2_assert_withdraw(sdp, assertion) \
((likely(assertion)) ? 0 : gfs2_assert_withdraw_i((sdp), #assertion, \ ({ \
__func__, __FILE__, __LINE__)) bool _bool = (assertion); \
if (unlikely(!_bool)) \
gfs2_assert_withdraw_i((sdp), #assertion, \
int gfs2_assert_warn_i(struct gfs2_sbd *sdp, char *assertion, __func__, __FILE__, __LINE__, false); \
const char *function, char *file, unsigned int line); !_bool; \
})
#define gfs2_assert_withdraw_delayed(sdp, assertion) \
({ \
bool _bool = (assertion); \
if (unlikely(!_bool)) \
gfs2_assert_withdraw_i((sdp), #assertion, \
__func__, __FILE__, __LINE__, true); \
!_bool; \
})
void gfs2_assert_warn_i(struct gfs2_sbd *sdp, char *assertion,
const char *function, char *file, unsigned int line);
#define gfs2_assert_warn(sdp, assertion) \ #define gfs2_assert_warn(sdp, assertion) \
((likely(assertion)) ? 0 : gfs2_assert_warn_i((sdp), #assertion, \ ({ \
__func__, __FILE__, __LINE__)) bool _bool = (assertion); \
if (unlikely(!_bool)) \
gfs2_assert_warn_i((sdp), #assertion, \
__func__, __FILE__, __LINE__); \
!_bool; \
})
int gfs2_consist_i(struct gfs2_sbd *sdp, int cluster_wide, void gfs2_consist_i(struct gfs2_sbd *sdp,
const char *function, char *file, unsigned int line); const char *function, char *file, unsigned int line);
#define gfs2_consist(sdp) \ #define gfs2_consist(sdp) \
gfs2_consist_i((sdp), 0, __func__, __FILE__, __LINE__) gfs2_consist_i((sdp), __func__, __FILE__, __LINE__)
int gfs2_consist_inode_i(struct gfs2_inode *ip, int cluster_wide, void gfs2_consist_inode_i(struct gfs2_inode *ip,
const char *function, char *file, unsigned int line); const char *function, char *file, unsigned int line);
#define gfs2_consist_inode(ip) \ #define gfs2_consist_inode(ip) \
gfs2_consist_inode_i((ip), 0, __func__, __FILE__, __LINE__) gfs2_consist_inode_i((ip), __func__, __FILE__, __LINE__)
int gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd, int cluster_wide, void gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd,
const char *function, char *file, unsigned int line); const char *function, char *file, unsigned int line);
#define gfs2_consist_rgrpd(rgd) \ #define gfs2_consist_rgrpd(rgd) \
gfs2_consist_rgrpd_i((rgd), 0, __func__, __FILE__, __LINE__) gfs2_consist_rgrpd_i((rgd), __func__, __FILE__, __LINE__)
int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh, int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
...@@ -129,6 +147,9 @@ static inline void gfs2_metatype_set(struct buffer_head *bh, u16 type, ...@@ -129,6 +147,9 @@ static inline void gfs2_metatype_set(struct buffer_head *bh, u16 type,
int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function, int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
char *file, unsigned int line); char *file, unsigned int line);
extern int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
bool verbose);
#define gfs2_io_error(sdp) \ #define gfs2_io_error(sdp) \
gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__); gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__);
...@@ -164,19 +185,30 @@ static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt, ...@@ -164,19 +185,30 @@ static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt,
return x; return x;
} }
/**
* gfs2_withdraw_delayed - withdraw as soon as possible without deadlocks
* @sdp: the superblock
*/
static inline void gfs2_withdraw_delayed(struct gfs2_sbd *sdp)
{
set_bit(SDF_WITHDRAWING, &sdp->sd_flags);
}
/** /**
* gfs2_withdrawn - test whether the file system is withdrawing or withdrawn * gfs2_withdrawn - test whether the file system is withdrawing or withdrawn
* @sdp: the superblock * @sdp: the superblock
*/ */
static inline bool gfs2_withdrawn(struct gfs2_sbd *sdp) static inline bool gfs2_withdrawn(struct gfs2_sbd *sdp)
{ {
return test_bit(SDF_WITHDRAWN, &sdp->sd_flags); return test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
test_bit(SDF_WITHDRAWING, &sdp->sd_flags);
} }
#define gfs2_tune_get(sdp, field) \ #define gfs2_tune_get(sdp, field) \
gfs2_tune_get_i(&(sdp)->sd_tune, &(sdp)->sd_tune.field) gfs2_tune_get_i(&(sdp)->sd_tune, &(sdp)->sd_tune.field)
__printf(2, 3) __printf(2, 3)
int gfs2_lm_withdraw(struct gfs2_sbd *sdp, const char *fmt, ...); void gfs2_lm(struct gfs2_sbd *sdp, const char *fmt, ...);
int gfs2_withdraw(struct gfs2_sbd *sdp);
#endif /* __UTIL_DOT_H__ */ #endif /* __UTIL_DOT_H__ */
...@@ -1222,7 +1222,7 @@ static int gfs2_xattr_set(const struct xattr_handler *handler, ...@@ -1222,7 +1222,7 @@ static int gfs2_xattr_set(const struct xattr_handler *handler,
struct gfs2_holder gh; struct gfs2_holder gh;
int ret; int ret;
ret = gfs2_rsqa_alloc(ip); ret = gfs2_qa_get(ip);
if (ret) if (ret)
return ret; return ret;
...@@ -1231,15 +1231,19 @@ static int gfs2_xattr_set(const struct xattr_handler *handler, ...@@ -1231,15 +1231,19 @@ static int gfs2_xattr_set(const struct xattr_handler *handler,
if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (ret) if (ret)
return ret; goto out;
} else { } else {
if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE)) if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE)) {
return -EIO; ret = -EIO;
goto out;
}
gfs2_holder_mark_uninitialized(&gh); gfs2_holder_mark_uninitialized(&gh);
} }
ret = __gfs2_xattr_set(inode, name, value, size, flags, handler->flags); ret = __gfs2_xattr_set(inode, name, value, size, flags, handler->flags);
if (gfs2_holder_initialized(&gh)) if (gfs2_holder_initialized(&gh))
gfs2_glock_dq_uninit(&gh); gfs2_glock_dq_uninit(&gh);
out:
gfs2_qa_put(ip);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment