Commit 9518ae6e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'gfs2-for-v6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2

Pull gfs2 updates from Andreas Gruenbacher:

 - Properly fix the glock shrinker this time: it broke in commit "gfs2:
   Make glock lru list scanning safer" and commit "gfs2: fix glock
   shrinker ref issues" wasn't actually enough to fix it

 - On unmount, keep glocks around long enough that no more dlm callbacks
   can occur on them

 - Some more folio conversion patches from Matthew Wilcox

 - Lots of other smaller fixes and cleanups

* tag 'gfs2-for-v6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2: (27 commits)
  gfs2: make timeout values more explicit
  gfs2: Convert gfs2_aspace_writepage() to use a folio
  gfs2: Add a migrate_folio operation for journalled files
  gfs2: Simplify gfs2_read_super
  gfs2: Convert gfs2_page_mkwrite() to use a folio
  gfs2: gfs2_freeze_unlock cleanup
  gfs2: Remove and replace gfs2_glock_queue_work
  gfs2: do_xmote fixes
  gfs2: finish_xmote cleanup
  gfs2: Unlock fewer glocks on unmount
  gfs2: Fix potential glock use-after-free on unmount
  gfs2: Remove ill-placed consistency check
  gfs2: Fix lru_count accounting
  gfs2: Fix "Make glock lru list scanning safer"
  Revert "gfs2: fix glock shrinker ref issues"
  gfs2: Fix "ignore unlock failures after withdraw"
  gfs2: Get rid of unnecessary test_and_set_bit
  gfs2: Don't set GLF_LOCK in gfs2_dispose_glock_lru
  gfs2: Replace gfs2_glock_queue_put with gfs2_glock_put_async
  gfs2: Get rid of gfs2_glock_queue_put in signal_our_withdraw
  ...
parents 6fffab66 c1c53c26
...@@ -116,8 +116,7 @@ static int gfs2_write_jdata_folio(struct folio *folio, ...@@ -116,8 +116,7 @@ static int gfs2_write_jdata_folio(struct folio *folio,
* @folio: The folio to write * @folio: The folio to write
* @wbc: The writeback control * @wbc: The writeback control
* *
* This is shared between writepage and writepages and implements the * Implements the core of write back. If a transaction is required then
* core of the writepage operation. If a transaction is required then
* the checked flag will have been set and the transaction will have * the checked flag will have been set and the transaction will have
* already been started before this is called. * already been started before this is called.
*/ */
...@@ -755,6 +754,7 @@ static const struct address_space_operations gfs2_jdata_aops = { ...@@ -755,6 +754,7 @@ static const struct address_space_operations gfs2_jdata_aops = {
.readahead = gfs2_readahead, .readahead = gfs2_readahead,
.dirty_folio = jdata_dirty_folio, .dirty_folio = jdata_dirty_folio,
.bmap = gfs2_bmap, .bmap = gfs2_bmap,
.migrate_folio = buffer_migrate_folio,
.invalidate_folio = gfs2_invalidate_folio, .invalidate_folio = gfs2_invalidate_folio,
.release_folio = gfs2_release_folio, .release_folio = gfs2_release_folio,
.is_partially_uptodate = block_is_partially_uptodate, .is_partially_uptodate = block_is_partially_uptodate,
......
...@@ -1827,7 +1827,7 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length) ...@@ -1827,7 +1827,7 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length)
gfs2_assert_withdraw(sdp, bh); gfs2_assert_withdraw(sdp, bh);
if (gfs2_assert_withdraw(sdp, if (gfs2_assert_withdraw(sdp,
prev_bnr != bh->b_blocknr)) { prev_bnr != bh->b_blocknr)) {
fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u," fs_emerg(sdp, "inode %llu, block:%llu, i_h:%u, "
"s_h:%u, mp_h:%u\n", "s_h:%u, mp_h:%u\n",
(unsigned long long)ip->i_no_addr, (unsigned long long)ip->i_no_addr,
prev_bnr, ip->i_height, strip_h, mp_h); prev_bnr, ip->i_height, strip_h, mp_h);
......
...@@ -562,15 +562,18 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf, ...@@ -562,15 +562,18 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
int ret = 0; int ret = 0;
ret = gfs2_dirent_offset(GFS2_SB(inode), buf); ret = gfs2_dirent_offset(GFS2_SB(inode), buf);
if (ret < 0) if (ret < 0) {
goto consist_inode; gfs2_consist_inode(GFS2_I(inode));
return ERR_PTR(-EIO);
}
offset = ret; offset = ret;
prev = NULL; prev = NULL;
dent = buf + offset; dent = buf + offset;
size = be16_to_cpu(dent->de_rec_len); size = be16_to_cpu(dent->de_rec_len);
if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size, len, 1)) if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size, len, 1)) {
goto consist_inode; gfs2_consist_inode(GFS2_I(inode));
return ERR_PTR(-EIO);
}
do { do {
ret = scan(dent, name, opaque); ret = scan(dent, name, opaque);
if (ret) if (ret)
...@@ -582,8 +585,10 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf, ...@@ -582,8 +585,10 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
dent = buf + offset; dent = buf + offset;
size = be16_to_cpu(dent->de_rec_len); size = be16_to_cpu(dent->de_rec_len);
if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size, if (gfs2_check_dirent(GFS2_SB(inode), dent, offset, size,
len, 0)) len, 0)) {
goto consist_inode; gfs2_consist_inode(GFS2_I(inode));
return ERR_PTR(-EIO);
}
} while(1); } while(1);
switch(ret) { switch(ret) {
...@@ -597,10 +602,6 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf, ...@@ -597,10 +602,6 @@ static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
BUG_ON(ret > 0); BUG_ON(ret > 0);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
consist_inode:
gfs2_consist_inode(GFS2_I(inode));
return ERR_PTR(-EIO);
} }
static int dirent_check_reclen(struct gfs2_inode *dip, static int dirent_check_reclen(struct gfs2_inode *dip,
...@@ -609,14 +610,16 @@ static int dirent_check_reclen(struct gfs2_inode *dip, ...@@ -609,14 +610,16 @@ static int dirent_check_reclen(struct gfs2_inode *dip,
const void *ptr = d; const void *ptr = d;
u16 rec_len = be16_to_cpu(d->de_rec_len); u16 rec_len = be16_to_cpu(d->de_rec_len);
if (unlikely(rec_len < sizeof(struct gfs2_dirent))) if (unlikely(rec_len < sizeof(struct gfs2_dirent))) {
goto broken; gfs2_consist_inode(dip);
return -EIO;
}
ptr += rec_len; ptr += rec_len;
if (ptr < end_p) if (ptr < end_p)
return rec_len; return rec_len;
if (ptr == end_p) if (ptr == end_p)
return -ENOENT; return -ENOENT;
broken:
gfs2_consist_inode(dip); gfs2_consist_inode(dip);
return -EIO; return -EIO;
} }
......
...@@ -376,23 +376,23 @@ static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size) ...@@ -376,23 +376,23 @@ static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
} }
/** /**
* gfs2_allocate_page_backing - Allocate blocks for a write fault * gfs2_allocate_folio_backing - Allocate blocks for a write fault
* @page: The (locked) page to allocate backing for * @folio: The (locked) folio to allocate backing for
* @length: Size of the allocation * @length: Size of the allocation
* *
* We try to allocate all the blocks required for the page in one go. This * We try to allocate all the blocks required for the folio in one go. This
* might fail for various reasons, so we keep trying until all the blocks to * might fail for various reasons, so we keep trying until all the blocks to
* back this page are allocated. If some of the blocks are already allocated, * back this folio are allocated. If some of the blocks are already allocated,
* that is ok too. * that is ok too.
*/ */
static int gfs2_allocate_page_backing(struct page *page, unsigned int length) static int gfs2_allocate_folio_backing(struct folio *folio, size_t length)
{ {
u64 pos = page_offset(page); u64 pos = folio_pos(folio);
do { do {
struct iomap iomap = { }; struct iomap iomap = { };
if (gfs2_iomap_alloc(page->mapping->host, pos, length, &iomap)) if (gfs2_iomap_alloc(folio->mapping->host, pos, length, &iomap))
return -EIO; return -EIO;
if (length < iomap.length) if (length < iomap.length)
...@@ -414,16 +414,16 @@ static int gfs2_allocate_page_backing(struct page *page, unsigned int length) ...@@ -414,16 +414,16 @@ static int gfs2_allocate_page_backing(struct page *page, unsigned int length)
static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
{ {
struct page *page = vmf->page; struct folio *folio = page_folio(vmf->page);
struct inode *inode = file_inode(vmf->vma->vm_file); struct inode *inode = file_inode(vmf->vma->vm_file);
struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_alloc_parms ap = {}; struct gfs2_alloc_parms ap = {};
u64 offset = page_offset(page); u64 pos = folio_pos(folio);
unsigned int data_blocks, ind_blocks, rblocks; unsigned int data_blocks, ind_blocks, rblocks;
vm_fault_t ret = VM_FAULT_LOCKED; vm_fault_t ret = VM_FAULT_LOCKED;
struct gfs2_holder gh; struct gfs2_holder gh;
unsigned int length; size_t length;
loff_t size; loff_t size;
int err; int err;
...@@ -436,23 +436,23 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) ...@@ -436,23 +436,23 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
goto out_uninit; goto out_uninit;
} }
/* Check page index against inode size */ /* Check folio index against inode size */
size = i_size_read(inode); size = i_size_read(inode);
if (offset >= size) { if (pos >= size) {
ret = VM_FAULT_SIGBUS; ret = VM_FAULT_SIGBUS;
goto out_unlock; goto out_unlock;
} }
/* Update file times before taking page lock */ /* Update file times before taking folio lock */
file_update_time(vmf->vma->vm_file); file_update_time(vmf->vma->vm_file);
/* page is wholly or partially inside EOF */ /* folio is wholly or partially inside EOF */
if (size - offset < PAGE_SIZE) if (size - pos < folio_size(folio))
length = size - offset; length = size - pos;
else else
length = PAGE_SIZE; length = folio_size(folio);
gfs2_size_hint(vmf->vma->vm_file, offset, length); gfs2_size_hint(vmf->vma->vm_file, pos, length);
set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
set_bit(GIF_SW_PAGED, &ip->i_flags); set_bit(GIF_SW_PAGED, &ip->i_flags);
...@@ -463,11 +463,12 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) ...@@ -463,11 +463,12 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
*/ */
if (!gfs2_is_stuffed(ip) && if (!gfs2_is_stuffed(ip) &&
!gfs2_write_alloc_required(ip, offset, length)) { !gfs2_write_alloc_required(ip, pos, length)) {
lock_page(page); folio_lock(folio);
if (!PageUptodate(page) || page->mapping != inode->i_mapping) { if (!folio_test_uptodate(folio) ||
folio->mapping != inode->i_mapping) {
ret = VM_FAULT_NOPAGE; ret = VM_FAULT_NOPAGE;
unlock_page(page); folio_unlock(folio);
} }
goto out_unlock; goto out_unlock;
} }
...@@ -504,7 +505,7 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) ...@@ -504,7 +505,7 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
goto out_trans_fail; goto out_trans_fail;
} }
/* Unstuff, if required, and allocate backing blocks for page */ /* Unstuff, if required, and allocate backing blocks for folio */
if (gfs2_is_stuffed(ip)) { if (gfs2_is_stuffed(ip)) {
err = gfs2_unstuff_dinode(ip); err = gfs2_unstuff_dinode(ip);
if (err) { if (err) {
...@@ -513,22 +514,22 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) ...@@ -513,22 +514,22 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
} }
} }
lock_page(page); folio_lock(folio);
/* If truncated, we must retry the operation, we may have raced /* If truncated, we must retry the operation, we may have raced
* with the glock demotion code. * with the glock demotion code.
*/ */
if (!PageUptodate(page) || page->mapping != inode->i_mapping) { if (!folio_test_uptodate(folio) || folio->mapping != inode->i_mapping) {
ret = VM_FAULT_NOPAGE; ret = VM_FAULT_NOPAGE;
goto out_page_locked; goto out_page_locked;
} }
err = gfs2_allocate_page_backing(page, length); err = gfs2_allocate_folio_backing(folio, length);
if (err) if (err)
ret = vmf_fs_error(err); ret = vmf_fs_error(err);
out_page_locked: out_page_locked:
if (ret != VM_FAULT_LOCKED) if (ret != VM_FAULT_LOCKED)
unlock_page(page); folio_unlock(folio);
out_trans_end: out_trans_end:
gfs2_trans_end(sdp); gfs2_trans_end(sdp);
out_trans_fail: out_trans_fail:
...@@ -540,8 +541,8 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf) ...@@ -540,8 +541,8 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
out_uninit: out_uninit:
gfs2_holder_uninit(&gh); gfs2_holder_uninit(&gh);
if (ret == VM_FAULT_LOCKED) { if (ret == VM_FAULT_LOCKED) {
set_page_dirty(page); folio_mark_dirty(folio);
wait_for_stable_page(page); folio_wait_stable(folio);
} }
sb_end_pagefault(inode->i_sb); sb_end_pagefault(inode->i_sb);
return ret; return ret;
......
...@@ -166,19 +166,45 @@ static bool glock_blocked_by_withdraw(struct gfs2_glock *gl) ...@@ -166,19 +166,45 @@ static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
return true; return true;
} }
void gfs2_glock_free(struct gfs2_glock *gl) static void __gfs2_glock_free(struct gfs2_glock *gl)
{ {
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
gfs2_glock_assert_withdraw(gl, atomic_read(&gl->gl_revokes) == 0);
rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms); rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
smp_mb(); smp_mb();
wake_up_glock(gl); wake_up_glock(gl);
call_rcu(&gl->gl_rcu, gfs2_glock_dealloc); call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
}
void gfs2_glock_free(struct gfs2_glock *gl) {
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
__gfs2_glock_free(gl);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_kill_wait);
}
void gfs2_glock_free_later(struct gfs2_glock *gl) {
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
spin_lock(&lru_lock);
list_add(&gl->gl_lru, &sdp->sd_dead_glocks);
spin_unlock(&lru_lock);
if (atomic_dec_and_test(&sdp->sd_glock_disposal)) if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_kill_wait); wake_up(&sdp->sd_kill_wait);
} }
static void gfs2_free_dead_glocks(struct gfs2_sbd *sdp)
{
struct list_head *list = &sdp->sd_dead_glocks;
while(!list_empty(list)) {
struct gfs2_glock *gl;
gl = list_first_entry(list, struct gfs2_glock, gl_lru);
list_del_init(&gl->gl_lru);
__gfs2_glock_free(gl);
}
}
/** /**
* gfs2_glock_hold() - increment reference count on glock * gfs2_glock_hold() - increment reference count on glock
* @gl: The glock to hold * @gl: The glock to hold
...@@ -248,7 +274,7 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) ...@@ -248,7 +274,7 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
* Enqueue the glock on the work queue. Passes one glock reference on to the * Enqueue the glock on the work queue. Passes one glock reference on to the
* work queue. * work queue.
*/ */
static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) { static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) { if (!queue_delayed_work(glock_workqueue, &gl->gl_work, delay)) {
/* /*
* We are holding the lockref spinlock, and the work was still * We are holding the lockref spinlock, and the work was still
...@@ -261,12 +287,6 @@ static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) ...@@ -261,12 +287,6 @@ static void __gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay)
} }
} }
static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
spin_lock(&gl->gl_lockref.lock);
__gfs2_glock_queue_work(gl, delay);
spin_unlock(&gl->gl_lockref.lock);
}
static void __gfs2_glock_put(struct gfs2_glock *gl) static void __gfs2_glock_put(struct gfs2_glock *gl)
{ {
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
...@@ -285,14 +305,6 @@ static void __gfs2_glock_put(struct gfs2_glock *gl) ...@@ -285,14 +305,6 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
sdp->sd_lockstruct.ls_ops->lm_put_lock(gl); sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
} }
/*
* Cause the glock to be put in work queue context.
*/
void gfs2_glock_queue_put(struct gfs2_glock *gl)
{
gfs2_glock_queue_work(gl, 0);
}
/** /**
* gfs2_glock_put() - Decrement reference count on glock * gfs2_glock_put() - Decrement reference count on glock
* @gl: The glock to put * @gl: The glock to put
...@@ -307,6 +319,23 @@ void gfs2_glock_put(struct gfs2_glock *gl) ...@@ -307,6 +319,23 @@ void gfs2_glock_put(struct gfs2_glock *gl)
__gfs2_glock_put(gl); __gfs2_glock_put(gl);
} }
/*
* gfs2_glock_put_async - Decrement reference count without sleeping
* @gl: The glock to put
*
* Decrement the reference count on glock immediately unless it is the last
* reference. Defer putting the last reference to work queue context.
*/
void gfs2_glock_put_async(struct gfs2_glock *gl)
{
if (lockref_put_or_lock(&gl->gl_lockref))
return;
GLOCK_BUG_ON(gl, gl->gl_lockref.count != 1);
gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock);
}
/** /**
* may_grant - check if it's ok to grant a new lock * may_grant - check if it's ok to grant a new lock
* @gl: The glock * @gl: The glock
...@@ -591,7 +620,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) ...@@ -591,7 +620,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
struct gfs2_holder *gh; struct gfs2_holder *gh;
unsigned state = ret & LM_OUT_ST_MASK; unsigned state = ret & LM_OUT_ST_MASK;
spin_lock(&gl->gl_lockref.lock);
trace_gfs2_glock_state_change(gl, state); trace_gfs2_glock_state_change(gl, state);
state_change(gl, state); state_change(gl, state);
gh = find_first_waiter(gl); gh = find_first_waiter(gl);
...@@ -639,7 +667,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) ...@@ -639,7 +667,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
gl->gl_target, state); gl->gl_target, state);
GLOCK_BUG_ON(gl, 1); GLOCK_BUG_ON(gl, 1);
} }
spin_unlock(&gl->gl_lockref.lock);
return; return;
} }
...@@ -662,7 +689,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) ...@@ -662,7 +689,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
} }
out: out:
clear_bit(GLF_LOCK, &gl->gl_flags); clear_bit(GLF_LOCK, &gl->gl_flags);
spin_unlock(&gl->gl_lockref.lock);
} }
static bool is_system_glock(struct gfs2_glock *gl) static bool is_system_glock(struct gfs2_glock *gl)
...@@ -690,6 +716,7 @@ __acquires(&gl->gl_lockref.lock) ...@@ -690,6 +716,7 @@ __acquires(&gl->gl_lockref.lock)
{ {
const struct gfs2_glock_operations *glops = gl->gl_ops; const struct gfs2_glock_operations *glops = gl->gl_ops;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0); unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
int ret; int ret;
...@@ -718,6 +745,9 @@ __acquires(&gl->gl_lockref.lock) ...@@ -718,6 +745,9 @@ __acquires(&gl->gl_lockref.lock)
(gl->gl_state == LM_ST_EXCLUSIVE) || (gl->gl_state == LM_ST_EXCLUSIVE) ||
(lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB))) (lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
clear_bit(GLF_BLOCKING, &gl->gl_flags); clear_bit(GLF_BLOCKING, &gl->gl_flags);
if (!glops->go_inval && !glops->go_sync)
goto skip_inval;
spin_unlock(&gl->gl_lockref.lock); spin_unlock(&gl->gl_lockref.lock);
if (glops->go_sync) { if (glops->go_sync) {
ret = glops->go_sync(gl); ret = glops->go_sync(gl);
...@@ -730,6 +760,7 @@ __acquires(&gl->gl_lockref.lock) ...@@ -730,6 +760,7 @@ __acquires(&gl->gl_lockref.lock)
fs_err(sdp, "Error %d syncing glock \n", ret); fs_err(sdp, "Error %d syncing glock \n", ret);
gfs2_dump_glock(NULL, gl, true); gfs2_dump_glock(NULL, gl, true);
} }
spin_lock(&gl->gl_lockref.lock);
goto skip_inval; goto skip_inval;
} }
} }
...@@ -750,9 +781,10 @@ __acquires(&gl->gl_lockref.lock) ...@@ -750,9 +781,10 @@ __acquires(&gl->gl_lockref.lock)
glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA); glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
} }
spin_lock(&gl->gl_lockref.lock);
skip_inval: skip_inval:
gfs2_glock_hold(gl); gl->gl_lockref.count++;
/* /*
* Check for an error encountered since we called go_sync and go_inval. * Check for an error encountered since we called go_sync and go_inval.
* If so, we can't withdraw from the glock code because the withdraw * If so, we can't withdraw from the glock code because the withdraw
...@@ -795,30 +827,36 @@ __acquires(&gl->gl_lockref.lock) ...@@ -795,30 +827,36 @@ __acquires(&gl->gl_lockref.lock)
clear_bit(GLF_LOCK, &gl->gl_flags); clear_bit(GLF_LOCK, &gl->gl_flags);
clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags); clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD); gfs2_glock_queue_work(gl, GL_GLOCK_DFT_HOLD);
goto out; return;
} else { } else {
clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
} }
} }
if (sdp->sd_lockstruct.ls_ops->lm_lock) { if (ls->ls_ops->lm_lock) {
/* lock_dlm */ spin_unlock(&gl->gl_lockref.lock);
ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); ret = ls->ls_ops->lm_lock(gl, target, lck_flags);
spin_lock(&gl->gl_lockref.lock);
if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED && if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED &&
target == LM_ST_UNLOCKED && target == LM_ST_UNLOCKED &&
test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) { test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
finish_xmote(gl, target); /*
gfs2_glock_queue_work(gl, 0); * The lockspace has been released and the lock has
* been unlocked implicitly.
*/
} else if (ret) { } else if (ret) {
fs_err(sdp, "lm_lock ret %d\n", ret); fs_err(sdp, "lm_lock ret %d\n", ret);
GLOCK_BUG_ON(gl, !gfs2_withdrawing_or_withdrawn(sdp)); target = gl->gl_state | LM_OUT_ERROR;
} else {
/* The operation will be completed asynchronously. */
return;
} }
} else { /* lock_nolock */ }
/* Complete the operation now. */
finish_xmote(gl, target); finish_xmote(gl, target);
gfs2_glock_queue_work(gl, 0); gfs2_glock_queue_work(gl, 0);
}
out:
spin_lock(&gl->gl_lockref.lock);
} }
/** /**
...@@ -834,8 +872,9 @@ __acquires(&gl->gl_lockref.lock) ...@@ -834,8 +872,9 @@ __acquires(&gl->gl_lockref.lock)
{ {
struct gfs2_holder *gh = NULL; struct gfs2_holder *gh = NULL;
if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) if (test_bit(GLF_LOCK, &gl->gl_flags))
return; return;
set_bit(GLF_LOCK, &gl->gl_flags);
GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)); GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
...@@ -865,7 +904,7 @@ __acquires(&gl->gl_lockref.lock) ...@@ -865,7 +904,7 @@ __acquires(&gl->gl_lockref.lock)
clear_bit(GLF_LOCK, &gl->gl_flags); clear_bit(GLF_LOCK, &gl->gl_flags);
smp_mb__after_atomic(); smp_mb__after_atomic();
gl->gl_lockref.count++; gl->gl_lockref.count++;
__gfs2_glock_queue_work(gl, 0); gfs2_glock_queue_work(gl, 0);
return; return;
out_unlock: out_unlock:
...@@ -1071,11 +1110,12 @@ static void glock_work_func(struct work_struct *work) ...@@ -1071,11 +1110,12 @@ static void glock_work_func(struct work_struct *work)
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work); struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
unsigned int drop_refs = 1; unsigned int drop_refs = 1;
if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) { spin_lock(&gl->gl_lockref.lock);
if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
clear_bit(GLF_REPLY_PENDING, &gl->gl_flags);
finish_xmote(gl, gl->gl_reply); finish_xmote(gl, gl->gl_reply);
drop_refs++; drop_refs++;
} }
spin_lock(&gl->gl_lockref.lock);
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
gl->gl_state != LM_ST_UNLOCKED && gl->gl_state != LM_ST_UNLOCKED &&
gl->gl_demote_state != LM_ST_EXCLUSIVE) { gl->gl_demote_state != LM_ST_EXCLUSIVE) {
...@@ -1096,12 +1136,12 @@ static void glock_work_func(struct work_struct *work) ...@@ -1096,12 +1136,12 @@ static void glock_work_func(struct work_struct *work)
drop_refs--; drop_refs--;
if (gl->gl_name.ln_type != LM_TYPE_INODE) if (gl->gl_name.ln_type != LM_TYPE_INODE)
delay = 0; delay = 0;
__gfs2_glock_queue_work(gl, delay); gfs2_glock_queue_work(gl, delay);
} }
/* /*
* Drop the remaining glock references manually here. (Mind that * Drop the remaining glock references manually here. (Mind that
* __gfs2_glock_queue_work depends on the lockref spinlock begin held * gfs2_glock_queue_work depends on the lockref spinlock begin held
* here as well.) * here as well.)
*/ */
gl->gl_lockref.count -= drop_refs; gl->gl_lockref.count -= drop_refs;
...@@ -1606,7 +1646,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh) ...@@ -1606,7 +1646,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) { test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
set_bit(GLF_REPLY_PENDING, &gl->gl_flags); set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
gl->gl_lockref.count++; gl->gl_lockref.count++;
__gfs2_glock_queue_work(gl, 0); gfs2_glock_queue_work(gl, 0);
} }
run_queue(gl, 1); run_queue(gl, 1);
spin_unlock(&gl->gl_lockref.lock); spin_unlock(&gl->gl_lockref.lock);
...@@ -1672,7 +1712,7 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh) ...@@ -1672,7 +1712,7 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh)
!test_bit(GLF_DEMOTE, &gl->gl_flags) && !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
gl->gl_name.ln_type == LM_TYPE_INODE) gl->gl_name.ln_type == LM_TYPE_INODE)
delay = gl->gl_hold_time; delay = gl->gl_hold_time;
__gfs2_glock_queue_work(gl, delay); gfs2_glock_queue_work(gl, delay);
} }
} }
...@@ -1896,7 +1936,7 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state) ...@@ -1896,7 +1936,7 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
delay = gl->gl_hold_time; delay = gl->gl_hold_time;
} }
handle_callback(gl, state, delay, true); handle_callback(gl, state, delay, true);
__gfs2_glock_queue_work(gl, delay); gfs2_glock_queue_work(gl, delay);
spin_unlock(&gl->gl_lockref.lock); spin_unlock(&gl->gl_lockref.lock);
} }
...@@ -1956,7 +1996,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret) ...@@ -1956,7 +1996,7 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
gl->gl_lockref.count++; gl->gl_lockref.count++;
set_bit(GLF_REPLY_PENDING, &gl->gl_flags); set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
__gfs2_glock_queue_work(gl, 0); gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock); spin_unlock(&gl->gl_lockref.lock);
} }
...@@ -1976,6 +2016,14 @@ static int glock_cmp(void *priv, const struct list_head *a, ...@@ -1976,6 +2016,14 @@ static int glock_cmp(void *priv, const struct list_head *a,
return 0; return 0;
} }
static bool can_free_glock(struct gfs2_glock *gl)
{
bool held = gl->gl_state != LM_ST_UNLOCKED;
return !test_bit(GLF_LOCK, &gl->gl_flags) &&
gl->gl_lockref.count == held;
}
/** /**
* gfs2_dispose_glock_lru - Demote a list of glocks * gfs2_dispose_glock_lru - Demote a list of glocks
* @list: The list to dispose of * @list: The list to dispose of
...@@ -1990,37 +2038,38 @@ static int glock_cmp(void *priv, const struct list_head *a, ...@@ -1990,37 +2038,38 @@ static int glock_cmp(void *priv, const struct list_head *a,
* private) * private)
*/ */
static void gfs2_dispose_glock_lru(struct list_head *list) static unsigned long gfs2_dispose_glock_lru(struct list_head *list)
__releases(&lru_lock) __releases(&lru_lock)
__acquires(&lru_lock) __acquires(&lru_lock)
{ {
struct gfs2_glock *gl; struct gfs2_glock *gl;
unsigned long freed = 0;
list_sort(NULL, list, glock_cmp); list_sort(NULL, list, glock_cmp);
while(!list_empty(list)) { while(!list_empty(list)) {
gl = list_first_entry(list, struct gfs2_glock, gl_lru); gl = list_first_entry(list, struct gfs2_glock, gl_lru);
list_del_init(&gl->gl_lru);
clear_bit(GLF_LRU, &gl->gl_flags);
if (!spin_trylock(&gl->gl_lockref.lock)) { if (!spin_trylock(&gl->gl_lockref.lock)) {
add_back_to_lru: add_back_to_lru:
list_add(&gl->gl_lru, &lru_list); list_move(&gl->gl_lru, &lru_list);
set_bit(GLF_LRU, &gl->gl_flags);
atomic_inc(&lru_count);
continue; continue;
} }
if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { if (!can_free_glock(gl)) {
spin_unlock(&gl->gl_lockref.lock); spin_unlock(&gl->gl_lockref.lock);
goto add_back_to_lru; goto add_back_to_lru;
} }
list_del_init(&gl->gl_lru);
atomic_dec(&lru_count);
clear_bit(GLF_LRU, &gl->gl_flags);
freed++;
gl->gl_lockref.count++; gl->gl_lockref.count++;
if (demote_ok(gl)) if (demote_ok(gl))
handle_callback(gl, LM_ST_UNLOCKED, 0, false); handle_callback(gl, LM_ST_UNLOCKED, 0, false);
WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags)); gfs2_glock_queue_work(gl, 0);
__gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock); spin_unlock(&gl->gl_lockref.lock);
cond_resched_lock(&lru_lock); cond_resched_lock(&lru_lock);
} }
return freed;
} }
/** /**
...@@ -2032,32 +2081,21 @@ __acquires(&lru_lock) ...@@ -2032,32 +2081,21 @@ __acquires(&lru_lock)
* gfs2_dispose_glock_lru() above. * gfs2_dispose_glock_lru() above.
*/ */
static long gfs2_scan_glock_lru(int nr) static unsigned long gfs2_scan_glock_lru(unsigned long nr)
{ {
struct gfs2_glock *gl, *next; struct gfs2_glock *gl, *next;
LIST_HEAD(dispose); LIST_HEAD(dispose);
long freed = 0; unsigned long freed = 0;
spin_lock(&lru_lock); spin_lock(&lru_lock);
list_for_each_entry_safe(gl, next, &lru_list, gl_lru) { list_for_each_entry_safe(gl, next, &lru_list, gl_lru) {
if (nr-- <= 0) if (!nr--)
break; break;
/* Test for being demotable */ if (can_free_glock(gl))
if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
if (!spin_trylock(&gl->gl_lockref.lock))
continue;
if (gl->gl_lockref.count <= 1 &&
(gl->gl_state == LM_ST_UNLOCKED ||
demote_ok(gl))) {
list_move(&gl->gl_lru, &dispose); list_move(&gl->gl_lru, &dispose);
atomic_dec(&lru_count);
freed++;
}
spin_unlock(&gl->gl_lockref.lock);
}
} }
if (!list_empty(&dispose)) if (!list_empty(&dispose))
gfs2_dispose_glock_lru(&dispose); freed = gfs2_dispose_glock_lru(&dispose);
spin_unlock(&lru_lock); spin_unlock(&lru_lock);
return freed; return freed;
...@@ -2148,8 +2186,11 @@ static void thaw_glock(struct gfs2_glock *gl) ...@@ -2148,8 +2186,11 @@ static void thaw_glock(struct gfs2_glock *gl)
return; return;
if (!lockref_get_not_dead(&gl->gl_lockref)) if (!lockref_get_not_dead(&gl->gl_lockref))
return; return;
spin_lock(&gl->gl_lockref.lock);
set_bit(GLF_REPLY_PENDING, &gl->gl_flags); set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
gfs2_glock_queue_work(gl, 0); gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock);
} }
/** /**
...@@ -2167,7 +2208,7 @@ static void clear_glock(struct gfs2_glock *gl) ...@@ -2167,7 +2208,7 @@ static void clear_glock(struct gfs2_glock *gl)
gl->gl_lockref.count++; gl->gl_lockref.count++;
if (gl->gl_state != LM_ST_UNLOCKED) if (gl->gl_state != LM_ST_UNLOCKED)
handle_callback(gl, LM_ST_UNLOCKED, 0, false); handle_callback(gl, LM_ST_UNLOCKED, 0, false);
__gfs2_glock_queue_work(gl, 0); gfs2_glock_queue_work(gl, 0);
} }
spin_unlock(&gl->gl_lockref.lock); spin_unlock(&gl->gl_lockref.lock);
} }
...@@ -2225,6 +2266,8 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) ...@@ -2225,6 +2266,8 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
wait_event_timeout(sdp->sd_kill_wait, wait_event_timeout(sdp->sd_kill_wait,
atomic_read(&sdp->sd_glock_disposal) == 0, atomic_read(&sdp->sd_glock_disposal) == 0,
HZ * 600); HZ * 600);
gfs2_lm_unmount(sdp);
gfs2_free_dead_glocks(sdp);
glock_hash_walk(dump_glock_func, sdp); glock_hash_walk(dump_glock_func, sdp);
} }
...@@ -2529,8 +2572,7 @@ static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n) ...@@ -2529,8 +2572,7 @@ static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
if (gl) { if (gl) {
if (n == 0) if (n == 0)
return; return;
if (!lockref_put_not_zero(&gl->gl_lockref)) gfs2_glock_put_async(gl);
gfs2_glock_queue_put(gl);
} }
for (;;) { for (;;) {
gl = rhashtable_walk_next(&gi->hti); gl = rhashtable_walk_next(&gi->hti);
......
...@@ -172,7 +172,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, ...@@ -172,7 +172,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
int create, struct gfs2_glock **glp); int create, struct gfs2_glock **glp);
struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl); struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl);
void gfs2_glock_put(struct gfs2_glock *gl); void gfs2_glock_put(struct gfs2_glock *gl);
void gfs2_glock_queue_put(struct gfs2_glock *gl); void gfs2_glock_put_async(struct gfs2_glock *gl);
void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
u16 flags, struct gfs2_holder *gh, u16 flags, struct gfs2_holder *gh,
...@@ -252,6 +252,7 @@ void gfs2_gl_dq_holders(struct gfs2_sbd *sdp); ...@@ -252,6 +252,7 @@ void gfs2_gl_dq_holders(struct gfs2_sbd *sdp);
void gfs2_glock_thaw(struct gfs2_sbd *sdp); void gfs2_glock_thaw(struct gfs2_sbd *sdp);
void gfs2_glock_add_to_lru(struct gfs2_glock *gl); void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
void gfs2_glock_free(struct gfs2_glock *gl); void gfs2_glock_free(struct gfs2_glock *gl);
void gfs2_glock_free_later(struct gfs2_glock *gl);
int __init gfs2_glock_init(void); int __init gfs2_glock_init(void);
void gfs2_glock_exit(void); void gfs2_glock_exit(void);
......
...@@ -82,6 +82,9 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, ...@@ -82,6 +82,9 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
spin_unlock(&sdp->sd_ail_lock); spin_unlock(&sdp->sd_ail_lock);
gfs2_log_unlock(sdp); gfs2_log_unlock(sdp);
if (gfs2_withdrawing(sdp))
gfs2_withdraw(sdp);
} }
...@@ -409,10 +412,14 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) ...@@ -409,10 +412,14 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
struct inode *inode = &ip->i_inode; struct inode *inode = &ip->i_inode;
bool is_new = inode->i_state & I_NEW; bool is_new = inode->i_state & I_NEW;
if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) {
goto corrupt; gfs2_consist_inode(ip);
if (unlikely(!is_new && inode_wrong_type(inode, mode))) return -EIO;
goto corrupt; }
if (unlikely(!is_new && inode_wrong_type(inode, mode))) {
gfs2_consist_inode(ip);
return -EIO;
}
ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino); ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
inode->i_mode = mode; inode->i_mode = mode;
if (is_new) { if (is_new) {
...@@ -449,26 +456,28 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) ...@@ -449,26 +456,28 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */ /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
gfs2_set_inode_flags(inode); gfs2_set_inode_flags(inode);
height = be16_to_cpu(str->di_height); height = be16_to_cpu(str->di_height);
if (unlikely(height > sdp->sd_max_height)) if (unlikely(height > sdp->sd_max_height)) {
goto corrupt; gfs2_consist_inode(ip);
return -EIO;
}
ip->i_height = (u8)height; ip->i_height = (u8)height;
depth = be16_to_cpu(str->di_depth); depth = be16_to_cpu(str->di_depth);
if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) {
goto corrupt; gfs2_consist_inode(ip);
return -EIO;
}
ip->i_depth = (u8)depth; ip->i_depth = (u8)depth;
ip->i_entries = be32_to_cpu(str->di_entries); ip->i_entries = be32_to_cpu(str->di_entries);
if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip)) if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip)) {
goto corrupt; gfs2_consist_inode(ip);
return -EIO;
}
if (S_ISREG(inode->i_mode)) if (S_ISREG(inode->i_mode))
gfs2_set_aops(inode); gfs2_set_aops(inode);
return 0; return 0;
corrupt:
gfs2_consist_inode(ip);
return -EIO;
} }
/** /**
......
...@@ -838,6 +838,7 @@ struct gfs2_sbd { ...@@ -838,6 +838,7 @@ struct gfs2_sbd {
/* For quiescing the filesystem */ /* For quiescing the filesystem */
struct gfs2_holder sd_freeze_gh; struct gfs2_holder sd_freeze_gh;
struct mutex sd_freeze_mutex; struct mutex sd_freeze_mutex;
struct list_head sd_dead_glocks;
char sd_fsname[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2]; char sd_fsname[GFS2_FSNAME_LEN + 3 * sizeof(int) + 2];
char sd_table_name[GFS2_FSNAME_LEN]; char sd_table_name[GFS2_FSNAME_LEN];
......
...@@ -121,6 +121,11 @@ static void gdlm_ast(void *arg) ...@@ -121,6 +121,11 @@ static void gdlm_ast(void *arg)
struct gfs2_glock *gl = arg; struct gfs2_glock *gl = arg;
unsigned ret = gl->gl_state; unsigned ret = gl->gl_state;
/* If the glock is dead, we only react to a dlm_unlock() reply. */
if (__lockref_is_dead(&gl->gl_lockref) &&
gl->gl_lksb.sb_status != -DLM_EUNLOCK)
return;
gfs2_update_reply_times(gl); gfs2_update_reply_times(gl);
BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED); BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
...@@ -171,6 +176,9 @@ static void gdlm_bast(void *arg, int mode) ...@@ -171,6 +176,9 @@ static void gdlm_bast(void *arg, int mode)
{ {
struct gfs2_glock *gl = arg; struct gfs2_glock *gl = arg;
if (__lockref_is_dead(&gl->gl_lockref))
return;
switch (mode) { switch (mode) {
case DLM_LOCK_EX: case DLM_LOCK_EX:
gfs2_glock_cb(gl, LM_ST_UNLOCKED); gfs2_glock_cb(gl, LM_ST_UNLOCKED);
...@@ -291,8 +299,12 @@ static void gdlm_put_lock(struct gfs2_glock *gl) ...@@ -291,8 +299,12 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
struct lm_lockstruct *ls = &sdp->sd_lockstruct; struct lm_lockstruct *ls = &sdp->sd_lockstruct;
int error; int error;
if (gl->gl_lksb.sb_lkid == 0) BUG_ON(!__lockref_is_dead(&gl->gl_lockref));
goto out_free;
if (gl->gl_lksb.sb_lkid == 0) {
gfs2_glock_free(gl);
return;
}
clear_bit(GLF_BLOCKING, &gl->gl_flags); clear_bit(GLF_BLOCKING, &gl->gl_flags);
gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
...@@ -300,13 +312,23 @@ static void gdlm_put_lock(struct gfs2_glock *gl) ...@@ -300,13 +312,23 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
gfs2_update_request_times(gl); gfs2_update_request_times(gl);
/* don't want to call dlm if we've unmounted the lock protocol */ /* don't want to call dlm if we've unmounted the lock protocol */
if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) if (test_bit(DFL_UNMOUNT, &ls->ls_recover_flags)) {
goto out_free; gfs2_glock_free(gl);
/* don't want to skip dlm_unlock writing the lvb when lock has one */ return;
}
/*
* When the lockspace is released, all remaining glocks will be
* unlocked automatically. This is more efficient than unlocking them
* individually, but when the lock is held in DLM_LOCK_EX or
* DLM_LOCK_PW mode, the lock value block (LVB) will be lost.
*/
if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) && if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
!gl->gl_lksb.sb_lvbptr) (!gl->gl_lksb.sb_lvbptr || gl->gl_state != LM_ST_EXCLUSIVE)) {
goto out_free; gfs2_glock_free_later(gl);
return;
}
again: again:
error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK, error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
...@@ -321,10 +343,6 @@ static void gdlm_put_lock(struct gfs2_glock *gl) ...@@ -321,10 +343,6 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
gl->gl_name.ln_type, gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number, error); (unsigned long long)gl->gl_name.ln_number, error);
} }
return;
out_free:
gfs2_glock_free(gl);
} }
static void gdlm_cancel(struct gfs2_glock *gl) static void gdlm_cancel(struct gfs2_glock *gl)
......
...@@ -786,7 +786,7 @@ void gfs2_glock_remove_revoke(struct gfs2_glock *gl) ...@@ -786,7 +786,7 @@ void gfs2_glock_remove_revoke(struct gfs2_glock *gl)
{ {
if (atomic_dec_return(&gl->gl_revokes) == 0) { if (atomic_dec_return(&gl->gl_revokes) == 0) {
clear_bit(GLF_LFLUSH, &gl->gl_flags); clear_bit(GLF_LFLUSH, &gl->gl_flags);
gfs2_glock_queue_put(gl); gfs2_glock_put_async(gl);
} }
} }
...@@ -1108,6 +1108,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) ...@@ -1108,6 +1108,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
lops_before_commit(sdp, tr); lops_before_commit(sdp, tr);
if (gfs2_withdrawing_or_withdrawn(sdp)) if (gfs2_withdrawing_or_withdrawn(sdp))
goto out_withdraw; goto out_withdraw;
if (sdp->sd_jdesc)
gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE); gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE);
if (gfs2_withdrawing_or_withdrawn(sdp)) if (gfs2_withdrawing_or_withdrawn(sdp))
goto out_withdraw; goto out_withdraw;
......
...@@ -32,14 +32,14 @@ ...@@ -32,14 +32,14 @@
static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc) static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
{ {
struct folio *folio = page_folio(page);
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
int nr_underway = 0; int nr_underway = 0;
blk_opf_t write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc); blk_opf_t write_flags = REQ_META | REQ_PRIO | wbc_to_write_flags(wbc);
BUG_ON(!PageLocked(page)); BUG_ON(!folio_test_locked(folio));
BUG_ON(!page_has_buffers(page));
head = page_buffers(page); head = folio_buffers(folio);
bh = head; bh = head;
do { do {
...@@ -55,7 +55,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb ...@@ -55,7 +55,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
if (wbc->sync_mode != WB_SYNC_NONE) { if (wbc->sync_mode != WB_SYNC_NONE) {
lock_buffer(bh); lock_buffer(bh);
} else if (!trylock_buffer(bh)) { } else if (!trylock_buffer(bh)) {
redirty_page_for_writepage(wbc, page); folio_redirty_for_writepage(wbc, folio);
continue; continue;
} }
if (test_clear_buffer_dirty(bh)) { if (test_clear_buffer_dirty(bh)) {
...@@ -69,8 +69,8 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb ...@@ -69,8 +69,8 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
* The page and its buffers are protected by PageWriteback(), so we can * The page and its buffers are protected by PageWriteback(), so we can
* drop the bh refcounts early. * drop the bh refcounts early.
*/ */
BUG_ON(PageWriteback(page)); BUG_ON(folio_test_writeback(folio));
set_page_writeback(page); folio_start_writeback(folio);
do { do {
struct buffer_head *next = bh->b_this_page; struct buffer_head *next = bh->b_this_page;
...@@ -80,10 +80,10 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb ...@@ -80,10 +80,10 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
} }
bh = next; bh = next;
} while (bh != head); } while (bh != head);
unlock_page(page); folio_unlock(folio);
if (nr_underway == 0) if (nr_underway == 0)
end_page_writeback(page); folio_end_writeback(folio);
return 0; return 0;
} }
......
...@@ -136,6 +136,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) ...@@ -136,6 +136,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
atomic_set(&sdp->sd_log_in_flight, 0); atomic_set(&sdp->sd_log_in_flight, 0);
init_waitqueue_head(&sdp->sd_log_flush_wait); init_waitqueue_head(&sdp->sd_log_flush_wait);
mutex_init(&sdp->sd_freeze_mutex); mutex_init(&sdp->sd_freeze_mutex);
INIT_LIST_HEAD(&sdp->sd_dead_glocks);
return sdp; return sdp;
...@@ -184,22 +185,10 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent) ...@@ -184,22 +185,10 @@ static int gfs2_check_sb(struct gfs2_sbd *sdp, int silent)
return 0; return 0;
} }
static void end_bio_io_page(struct bio *bio) static void gfs2_sb_in(struct gfs2_sbd *sdp, const struct gfs2_sb *str)
{
struct page *page = bio->bi_private;
if (!bio->bi_status)
SetPageUptodate(page);
else
pr_warn("error %d reading superblock\n", bio->bi_status);
unlock_page(page);
}
static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf)
{ {
struct gfs2_sb_host *sb = &sdp->sd_sb; struct gfs2_sb_host *sb = &sdp->sd_sb;
struct super_block *s = sdp->sd_vfs; struct super_block *s = sdp->sd_vfs;
const struct gfs2_sb *str = buf;
sb->sb_magic = be32_to_cpu(str->sb_header.mh_magic); sb->sb_magic = be32_to_cpu(str->sb_header.mh_magic);
sb->sb_type = be32_to_cpu(str->sb_header.mh_type); sb->sb_type = be32_to_cpu(str->sb_header.mh_type);
...@@ -239,34 +228,26 @@ static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf) ...@@ -239,34 +228,26 @@ static void gfs2_sb_in(struct gfs2_sbd *sdp, const void *buf)
static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent) static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
{ {
struct super_block *sb = sdp->sd_vfs; struct super_block *sb = sdp->sd_vfs;
struct gfs2_sb *p;
struct page *page; struct page *page;
struct bio *bio; struct bio_vec bvec;
struct bio bio;
int err;
page = alloc_page(GFP_NOFS); page = alloc_page(GFP_KERNEL);
if (unlikely(!page)) if (unlikely(!page))
return -ENOMEM; return -ENOMEM;
ClearPageUptodate(page); bio_init(&bio, sb->s_bdev, &bvec, 1, REQ_OP_READ | REQ_META);
ClearPageDirty(page); bio.bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
lock_page(page); __bio_add_page(&bio, page, PAGE_SIZE, 0);
bio = bio_alloc(sb->s_bdev, 1, REQ_OP_READ | REQ_META, GFP_NOFS);
bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
__bio_add_page(bio, page, PAGE_SIZE, 0);
bio->bi_end_io = end_bio_io_page; err = submit_bio_wait(&bio);
bio->bi_private = page; if (err) {
submit_bio(bio); pr_warn("error %d reading superblock\n", err);
wait_on_page_locked(page);
bio_put(bio);
if (!PageUptodate(page)) {
__free_page(page); __free_page(page);
return -EIO; return err;
} }
p = kmap(page); gfs2_sb_in(sdp, page_address(page));
gfs2_sb_in(sdp, p);
kunmap(page);
__free_page(page); __free_page(page);
return gfs2_check_sb(sdp, silent); return gfs2_check_sb(sdp, silent);
} }
...@@ -1288,7 +1269,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) ...@@ -1288,7 +1269,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
error = gfs2_make_fs_rw(sdp); error = gfs2_make_fs_rw(sdp);
if (error) { if (error) {
gfs2_freeze_unlock(&sdp->sd_freeze_gh); gfs2_freeze_unlock(sdp);
gfs2_destroy_threads(sdp); gfs2_destroy_threads(sdp);
fs_err(sdp, "can't make FS RW: %d\n", error); fs_err(sdp, "can't make FS RW: %d\n", error);
goto fail_per_node; goto fail_per_node;
......
...@@ -814,11 +814,11 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd) ...@@ -814,11 +814,11 @@ static int compute_bitstructs(struct gfs2_rgrpd *rgd)
bi = rgd->rd_bits + (length - 1); bi = rgd->rd_bits + (length - 1);
if ((bi->bi_start + bi->bi_bytes) * GFS2_NBBY != rgd->rd_data) { if ((bi->bi_start + bi->bi_bytes) * GFS2_NBBY != rgd->rd_data) {
gfs2_lm(sdp, gfs2_lm(sdp,
"ri_addr = %llu\n" "ri_addr=%llu "
"ri_length = %u\n" "ri_length=%u "
"ri_data0 = %llu\n" "ri_data0=%llu "
"ri_data = %u\n" "ri_data=%u "
"ri_bitbytes = %u\n" "ri_bitbytes=%u "
"start=%u len=%u offset=%u\n", "start=%u len=%u offset=%u\n",
(unsigned long long)rgd->rd_addr, (unsigned long long)rgd->rd_addr,
rgd->rd_length, rgd->rd_length,
......
...@@ -67,9 +67,13 @@ void gfs2_jindex_free(struct gfs2_sbd *sdp) ...@@ -67,9 +67,13 @@ void gfs2_jindex_free(struct gfs2_sbd *sdp)
sdp->sd_journals = 0; sdp->sd_journals = 0;
spin_unlock(&sdp->sd_jindex_spin); spin_unlock(&sdp->sd_jindex_spin);
down_write(&sdp->sd_log_flush_lock);
sdp->sd_jdesc = NULL; sdp->sd_jdesc = NULL;
up_write(&sdp->sd_log_flush_lock);
while (!list_empty(&list)) { while (!list_empty(&list)) {
jd = list_first_entry(&list, struct gfs2_jdesc, jd_list); jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
BUG_ON(jd->jd_log_bio);
gfs2_free_journal_extents(jd); gfs2_free_journal_extents(jd);
list_del(&jd->jd_list); list_del(&jd->jd_list);
iput(jd->jd_inode); iput(jd->jd_inode);
...@@ -354,7 +358,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp) ...@@ -354,7 +358,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
list_add(&lfcc->list, &list); list_add(&lfcc->list, &list);
} }
gfs2_freeze_unlock(&sdp->sd_freeze_gh); gfs2_freeze_unlock(sdp);
error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE, error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
LM_FLAG_NOEXP | GL_NOPID, LM_FLAG_NOEXP | GL_NOPID,
...@@ -378,7 +382,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp) ...@@ -378,7 +382,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
if (!error) if (!error)
goto out; /* success */ goto out; /* success */
gfs2_freeze_unlock(&sdp->sd_freeze_gh); gfs2_freeze_unlock(sdp);
relock_shared: relock_shared:
error2 = gfs2_freeze_lock_shared(sdp); error2 = gfs2_freeze_lock_shared(sdp);
...@@ -617,7 +621,7 @@ static void gfs2_put_super(struct super_block *sb) ...@@ -617,7 +621,7 @@ static void gfs2_put_super(struct super_block *sb)
/* Release stuff */ /* Release stuff */
gfs2_freeze_unlock(&sdp->sd_freeze_gh); gfs2_freeze_unlock(sdp);
iput(sdp->sd_jindex); iput(sdp->sd_jindex);
iput(sdp->sd_statfs_inode); iput(sdp->sd_statfs_inode);
...@@ -646,10 +650,7 @@ static void gfs2_put_super(struct super_block *sb) ...@@ -646,10 +650,7 @@ static void gfs2_put_super(struct super_block *sb)
gfs2_gl_hash_clear(sdp); gfs2_gl_hash_clear(sdp);
truncate_inode_pages_final(&sdp->sd_aspace); truncate_inode_pages_final(&sdp->sd_aspace);
gfs2_delete_debugfs_file(sdp); gfs2_delete_debugfs_file(sdp);
/* Unmount the locking protocol */
gfs2_lm_unmount(sdp);
/* At this point, we're through participating in the lockspace */
gfs2_sys_fs_del(sdp); gfs2_sys_fs_del(sdp);
free_sbd(sdp); free_sbd(sdp);
} }
...@@ -706,7 +707,7 @@ void gfs2_freeze_func(struct work_struct *work) ...@@ -706,7 +707,7 @@ void gfs2_freeze_func(struct work_struct *work)
if (error) if (error)
goto freeze_failed; goto freeze_failed;
gfs2_freeze_unlock(&sdp->sd_freeze_gh); gfs2_freeze_unlock(sdp);
set_bit(SDF_FROZEN, &sdp->sd_flags); set_bit(SDF_FROZEN, &sdp->sd_flags);
error = gfs2_do_thaw(sdp); error = gfs2_do_thaw(sdp);
...@@ -811,7 +812,7 @@ static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who) ...@@ -811,7 +812,7 @@ static int gfs2_thaw_super(struct super_block *sb, enum freeze_holder who)
} }
atomic_inc(&sb->s_active); atomic_inc(&sb->s_active);
gfs2_freeze_unlock(&sdp->sd_freeze_gh); gfs2_freeze_unlock(sdp);
error = gfs2_do_thaw(sdp); error = gfs2_do_thaw(sdp);
...@@ -832,7 +833,7 @@ void gfs2_thaw_freeze_initiator(struct super_block *sb) ...@@ -832,7 +833,7 @@ void gfs2_thaw_freeze_initiator(struct super_block *sb)
if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags)) if (!test_bit(SDF_FREEZE_INITIATOR, &sdp->sd_flags))
goto out; goto out;
gfs2_freeze_unlock(&sdp->sd_freeze_gh); gfs2_freeze_unlock(sdp);
out: out:
mutex_unlock(&sdp->sd_freeze_mutex); mutex_unlock(&sdp->sd_freeze_mutex);
...@@ -1045,7 +1046,7 @@ static int gfs2_drop_inode(struct inode *inode) ...@@ -1045,7 +1046,7 @@ static int gfs2_drop_inode(struct inode *inode)
gfs2_glock_hold(gl); gfs2_glock_hold(gl);
if (!gfs2_queue_try_to_evict(gl)) if (!gfs2_queue_try_to_evict(gl))
gfs2_glock_queue_put(gl); gfs2_glock_put_async(gl);
return 0; return 0;
} }
...@@ -1251,7 +1252,7 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip) ...@@ -1251,7 +1252,7 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
static void gfs2_glock_put_eventually(struct gfs2_glock *gl) static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
{ {
if (current->flags & PF_MEMALLOC) if (current->flags & PF_MEMALLOC)
gfs2_glock_queue_put(gl); gfs2_glock_put_async(gl);
else else
gfs2_glock_put(gl); gfs2_glock_put(gl);
} }
...@@ -1261,7 +1262,6 @@ static bool gfs2_upgrade_iopen_glock(struct inode *inode) ...@@ -1261,7 +1262,6 @@ static bool gfs2_upgrade_iopen_glock(struct inode *inode)
struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_holder *gh = &ip->i_iopen_gh; struct gfs2_holder *gh = &ip->i_iopen_gh;
long timeout = 5 * HZ;
int error; int error;
gh->gh_flags |= GL_NOCACHE; gh->gh_flags |= GL_NOCACHE;
...@@ -1292,10 +1292,10 @@ static bool gfs2_upgrade_iopen_glock(struct inode *inode) ...@@ -1292,10 +1292,10 @@ static bool gfs2_upgrade_iopen_glock(struct inode *inode)
if (error) if (error)
return false; return false;
timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait, wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
!test_bit(HIF_WAIT, &gh->gh_iflags) || !test_bit(HIF_WAIT, &gh->gh_iflags) ||
test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags), test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags),
timeout); 5 * HZ);
if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) { if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
gfs2_glock_dq(gh); gfs2_glock_dq(gh);
return false; return false;
......
...@@ -88,7 +88,7 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf) ...@@ -88,7 +88,7 @@ static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
"Withdraw In Prog: %d\n" "Withdraw In Prog: %d\n"
"Remote Withdraw: %d\n" "Remote Withdraw: %d\n"
"Withdraw Recovery: %d\n" "Withdraw Recovery: %d\n"
"Deactivating: %d\n" "Killing: %d\n"
"sd_log_error: %d\n" "sd_log_error: %d\n"
"sd_log_flush_lock: %d\n" "sd_log_flush_lock: %d\n"
"sd_log_num_revoke: %u\n" "sd_log_num_revoke: %u\n"
...@@ -336,7 +336,7 @@ static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len ...@@ -336,7 +336,7 @@ static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len
return -EINVAL; return -EINVAL;
if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags)) if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags))
fs_info(sdp, "demote interface used\n"); fs_info(sdp, "demote interface used\n");
rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl); rv = gfs2_glock_get(sdp, glnum, glops, NO_CREATE, &gl);
if (rv) if (rv)
return rv; return rv;
gfs2_glock_cb(gl, glmode); gfs2_glock_cb(gl, glmode);
......
...@@ -109,10 +109,10 @@ int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp) ...@@ -109,10 +109,10 @@ int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp)
return error; return error;
} }
void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh) void gfs2_freeze_unlock(struct gfs2_sbd *sdp)
{ {
if (gfs2_holder_initialized(freeze_gh)) if (gfs2_holder_initialized(&sdp->sd_freeze_gh))
gfs2_glock_dq_uninit(freeze_gh); gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
} }
static void signal_our_withdraw(struct gfs2_sbd *sdp) static void signal_our_withdraw(struct gfs2_sbd *sdp)
...@@ -255,7 +255,7 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp) ...@@ -255,7 +255,7 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
gfs2_glock_nq(&sdp->sd_live_gh); gfs2_glock_nq(&sdp->sd_live_gh);
} }
gfs2_glock_queue_put(live_gl); /* drop extra reference we acquired */ gfs2_glock_put(live_gl); /* drop extra reference we acquired */
clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags); clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
/* /*
...@@ -350,7 +350,6 @@ int gfs2_withdraw(struct gfs2_sbd *sdp) ...@@ -350,7 +350,6 @@ int gfs2_withdraw(struct gfs2_sbd *sdp)
fs_err(sdp, "telling LM to unmount\n"); fs_err(sdp, "telling LM to unmount\n");
lm->lm_unmount(sdp); lm->lm_unmount(sdp);
} }
set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags);
fs_err(sdp, "File system withdrawn\n"); fs_err(sdp, "File system withdrawn\n");
dump_stack(); dump_stack();
clear_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags); clear_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags);
...@@ -376,8 +375,8 @@ void gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion, ...@@ -376,8 +375,8 @@ void gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion,
return; return;
fs_err(sdp, fs_err(sdp,
"fatal: assertion \"%s\" failed\n" "fatal: assertion \"%s\" failed - "
" function = %s, file = %s, line = %u\n", "function = %s, file = %s, line = %u\n",
assertion, function, file, line); assertion, function, file, line);
/* /*
...@@ -407,7 +406,8 @@ void gfs2_assert_warn_i(struct gfs2_sbd *sdp, char *assertion, ...@@ -407,7 +406,8 @@ void gfs2_assert_warn_i(struct gfs2_sbd *sdp, char *assertion,
return; return;
if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW) if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW)
fs_warn(sdp, "warning: assertion \"%s\" failed at function = %s, file = %s, line = %u\n", fs_warn(sdp, "warning: assertion \"%s\" failed - "
"function = %s, file = %s, line = %u\n",
assertion, function, file, line); assertion, function, file, line);
if (sdp->sd_args.ar_debug) if (sdp->sd_args.ar_debug)
...@@ -416,10 +416,10 @@ void gfs2_assert_warn_i(struct gfs2_sbd *sdp, char *assertion, ...@@ -416,10 +416,10 @@ void gfs2_assert_warn_i(struct gfs2_sbd *sdp, char *assertion,
dump_stack(); dump_stack();
if (sdp->sd_args.ar_errors == GFS2_ERRORS_PANIC) if (sdp->sd_args.ar_errors == GFS2_ERRORS_PANIC)
panic("GFS2: fsid=%s: warning: assertion \"%s\" failed\n" panic("GFS2: fsid=%s: warning: assertion \"%s\" failed - "
"GFS2: fsid=%s: function = %s, file = %s, line = %u\n", "function = %s, file = %s, line = %u\n",
sdp->sd_fsname, assertion, sdp->sd_fsname, assertion,
sdp->sd_fsname, function, file, line); function, file, line);
sdp->sd_last_warning = jiffies; sdp->sd_last_warning = jiffies;
} }
...@@ -432,7 +432,8 @@ void gfs2_consist_i(struct gfs2_sbd *sdp, const char *function, ...@@ -432,7 +432,8 @@ void gfs2_consist_i(struct gfs2_sbd *sdp, const char *function,
char *file, unsigned int line) char *file, unsigned int line)
{ {
gfs2_lm(sdp, gfs2_lm(sdp,
"fatal: filesystem consistency error - function = %s, file = %s, line = %u\n", "fatal: filesystem consistency error - "
"function = %s, file = %s, line = %u\n",
function, file, line); function, file, line);
gfs2_withdraw(sdp); gfs2_withdraw(sdp);
} }
...@@ -447,9 +448,9 @@ void gfs2_consist_inode_i(struct gfs2_inode *ip, ...@@ -447,9 +448,9 @@ void gfs2_consist_inode_i(struct gfs2_inode *ip,
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
gfs2_lm(sdp, gfs2_lm(sdp,
"fatal: filesystem consistency error\n" "fatal: filesystem consistency error - "
" inode = %llu %llu\n" "inode = %llu %llu, "
" function = %s, file = %s, line = %u\n", "function = %s, file = %s, line = %u\n",
(unsigned long long)ip->i_no_formal_ino, (unsigned long long)ip->i_no_formal_ino,
(unsigned long long)ip->i_no_addr, (unsigned long long)ip->i_no_addr,
function, file, line); function, file, line);
...@@ -470,9 +471,9 @@ void gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd, ...@@ -470,9 +471,9 @@ void gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd,
sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname); sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname);
gfs2_rgrp_dump(NULL, rgd, fs_id_buf); gfs2_rgrp_dump(NULL, rgd, fs_id_buf);
gfs2_lm(sdp, gfs2_lm(sdp,
"fatal: filesystem consistency error\n" "fatal: filesystem consistency error - "
" RG = %llu\n" "RG = %llu, "
" function = %s, file = %s, line = %u\n", "function = %s, file = %s, line = %u\n",
(unsigned long long)rgd->rd_addr, (unsigned long long)rgd->rd_addr,
function, file, line); function, file, line);
gfs2_dump_glock(NULL, rgd->rd_gl, 1); gfs2_dump_glock(NULL, rgd->rd_gl, 1);
...@@ -486,16 +487,16 @@ void gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd, ...@@ -486,16 +487,16 @@ void gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd,
*/ */
int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh, int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
const char *type, const char *function, char *file, const char *function, char *file,
unsigned int line) unsigned int line)
{ {
int me; int me;
gfs2_lm(sdp, gfs2_lm(sdp,
"fatal: invalid metadata block\n" "fatal: invalid metadata block - "
" bh = %llu (%s)\n" "bh = %llu (bad magic number), "
" function = %s, file = %s, line = %u\n", "function = %s, file = %s, line = %u\n",
(unsigned long long)bh->b_blocknr, type, (unsigned long long)bh->b_blocknr,
function, file, line); function, file, line);
me = gfs2_withdraw(sdp); me = gfs2_withdraw(sdp);
return (me) ? -1 : -2; return (me) ? -1 : -2;
...@@ -514,9 +515,9 @@ int gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh, ...@@ -514,9 +515,9 @@ int gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
int me; int me;
gfs2_lm(sdp, gfs2_lm(sdp,
"fatal: invalid metadata block\n" "fatal: invalid metadata block - "
" bh = %llu (type: exp=%u, found=%u)\n" "bh = %llu (type: exp=%u, found=%u), "
" function = %s, file = %s, line = %u\n", "function = %s, file = %s, line = %u\n",
(unsigned long long)bh->b_blocknr, type, t, (unsigned long long)bh->b_blocknr, type, t,
function, file, line); function, file, line);
me = gfs2_withdraw(sdp); me = gfs2_withdraw(sdp);
...@@ -533,8 +534,8 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function, char *file, ...@@ -533,8 +534,8 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function, char *file,
unsigned int line) unsigned int line)
{ {
gfs2_lm(sdp, gfs2_lm(sdp,
"fatal: I/O error\n" "fatal: I/O error - "
" function = %s, file = %s, line = %u\n", "function = %s, file = %s, line = %u\n",
function, file, line); function, file, line);
return gfs2_withdraw(sdp); return gfs2_withdraw(sdp);
} }
...@@ -551,9 +552,9 @@ void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh, ...@@ -551,9 +552,9 @@ void gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
if (gfs2_withdrawing_or_withdrawn(sdp)) if (gfs2_withdrawing_or_withdrawn(sdp))
return; return;
fs_err(sdp, "fatal: I/O error\n" fs_err(sdp, "fatal: I/O error - "
" block = %llu\n" "block = %llu, "
" function = %s, file = %s, line = %u\n", "function = %s, file = %s, line = %u\n",
(unsigned long long)bh->b_blocknr, function, file, line); (unsigned long long)bh->b_blocknr, function, file, line);
if (withdraw) if (withdraw)
gfs2_withdraw(sdp); gfs2_withdraw(sdp);
......
...@@ -92,7 +92,7 @@ gfs2_consist_rgrpd_i((rgd), __func__, __FILE__, __LINE__) ...@@ -92,7 +92,7 @@ gfs2_consist_rgrpd_i((rgd), __func__, __FILE__, __LINE__)
int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh, int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
const char *type, const char *function, const char *function,
char *file, unsigned int line); char *file, unsigned int line);
static inline int gfs2_meta_check(struct gfs2_sbd *sdp, static inline int gfs2_meta_check(struct gfs2_sbd *sdp,
...@@ -123,7 +123,7 @@ static inline int gfs2_metatype_check_i(struct gfs2_sbd *sdp, ...@@ -123,7 +123,7 @@ static inline int gfs2_metatype_check_i(struct gfs2_sbd *sdp,
u32 magic = be32_to_cpu(mh->mh_magic); u32 magic = be32_to_cpu(mh->mh_magic);
u16 t = be32_to_cpu(mh->mh_type); u16 t = be32_to_cpu(mh->mh_type);
if (unlikely(magic != GFS2_MAGIC)) if (unlikely(magic != GFS2_MAGIC))
return gfs2_meta_check_ii(sdp, bh, "magic number", function, return gfs2_meta_check_ii(sdp, bh, function,
file, line); file, line);
if (unlikely(t != type)) if (unlikely(t != type))
return gfs2_metatype_check_ii(sdp, bh, type, t, function, return gfs2_metatype_check_ii(sdp, bh, type, t, function,
...@@ -150,7 +150,7 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function, ...@@ -150,7 +150,7 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
bool verbose); bool verbose);
int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp); int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp);
void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh); void gfs2_freeze_unlock(struct gfs2_sbd *sdp);
#define gfs2_io_error(sdp) \ #define gfs2_io_error(sdp) \
gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__) gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__)
......
...@@ -96,30 +96,34 @@ static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh, ...@@ -96,30 +96,34 @@ static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
return -EIO; return -EIO;
for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) { for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
if (!GFS2_EA_REC_LEN(ea)) if (!GFS2_EA_REC_LEN(ea)) {
goto fail; gfs2_consist_inode(ip);
return -EIO;
}
if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <= if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
bh->b_data + bh->b_size)) bh->b_data + bh->b_size)) {
goto fail; gfs2_consist_inode(ip);
if (!gfs2_eatype_valid(sdp, ea->ea_type)) return -EIO;
goto fail; }
if (!gfs2_eatype_valid(sdp, ea->ea_type)) {
gfs2_consist_inode(ip);
return -EIO;
}
error = ea_call(ip, bh, ea, prev, data); error = ea_call(ip, bh, ea, prev, data);
if (error) if (error)
return error; return error;
if (GFS2_EA_IS_LAST(ea)) { if (GFS2_EA_IS_LAST(ea)) {
if ((char *)GFS2_EA2NEXT(ea) != if ((char *)GFS2_EA2NEXT(ea) !=
bh->b_data + bh->b_size) bh->b_data + bh->b_size) {
goto fail; gfs2_consist_inode(ip);
return -EIO;
}
break; break;
} }
} }
return error; return error;
fail:
gfs2_consist_inode(ip);
return -EIO;
} }
static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data) static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment