Commit e719340f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'gfs2-v5.7-rc1.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2

Pull gfs2 fixes from Andreas Gruenbacher:
 "Various gfs2 fixes.

  Fixes for bugs prior to v5.7:
   - Fix random block reads when reading fragmented journals (v5.2)
   - Fix a possible random memory access in gfs2_walk_metadata (v5.3)

  Fixes for v5.7:
   - Fix several overlooked gfs2_qa_get / gfs2_qa_put imbalances
   - Fix several bugs in the new filesystem withdraw logic"

* tag 'gfs2-v5.7-rc1.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2:
  Revert "gfs2: Don't demote a glock until its revokes are written"
  gfs2: If go_sync returns error, withdraw but skip invalidate
  gfs2: Grab glock reference sooner in gfs2_add_revoke
  gfs2: don't call quota_unhold if quotas are not locked
  gfs2: move privileged user check to gfs2_quota_lock_check
  gfs2: remove check for quotas on in gfs2_quota_check
  gfs2: Change BUG_ON to an assert_withdraw in gfs2_quota_change
  gfs2: Fix problems regarding gfs2_qa_get and _put
  gfs2: More gfs2_find_jhead fixes
  gfs2: Another gfs2_walk_metadata fix
  gfs2: Fix use-after-free in gfs2_logd after withdraw
  gfs2: Fix BUG during unmount after file system withdraw
  gfs2: Fix error exit in do_xmote
  gfs2: fix withdraw sequence deadlock
parents 152036d1 b14c9490
......@@ -528,10 +528,12 @@ static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
/* Advance in metadata tree. */
(mp->mp_list[hgt])++;
if (mp->mp_list[hgt] >= sdp->sd_inptrs) {
if (!hgt)
break;
if (hgt) {
if (mp->mp_list[hgt] >= sdp->sd_inptrs)
goto lower_metapath;
} else {
if (mp->mp_list[hgt] >= sdp->sd_diptrs)
break;
}
fill_up_metapath:
......@@ -876,10 +878,9 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
ret = -ENOENT;
goto unlock;
} else {
/* report a hole */
iomap->offset = pos;
iomap->length = length;
goto do_alloc;
goto hole_found;
}
}
iomap->length = size;
......@@ -933,8 +934,6 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
return ret;
do_alloc:
iomap->addr = IOMAP_NULL_ADDR;
iomap->type = IOMAP_HOLE;
if (flags & IOMAP_REPORT) {
if (pos >= size)
ret = -ENOENT;
......@@ -956,6 +955,9 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
if (pos < size && height == ip->i_height)
ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
}
hole_found:
iomap->addr = IOMAP_NULL_ADDR;
iomap->type = IOMAP_HOLE;
goto out;
}
......
......@@ -613,7 +613,7 @@ __acquires(&gl->gl_lockref.lock)
fs_err(sdp, "Error %d syncing glock \n", ret);
gfs2_dump_glock(NULL, gl, true);
}
return;
goto skip_inval;
}
}
if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) {
......@@ -633,6 +633,7 @@ __acquires(&gl->gl_lockref.lock)
clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
}
skip_inval:
gfs2_glock_hold(gl);
/*
* Check for an error encountered since we called go_sync and go_inval.
......@@ -722,9 +723,6 @@ __acquires(&gl->gl_lockref.lock)
goto out_unlock;
if (nonblock)
goto out_sched;
smp_mb();
if (atomic_read(&gl->gl_revokes) != 0)
goto out_sched;
set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
gl->gl_target = gl->gl_demote_state;
......
......@@ -622,7 +622,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
error = finish_no_open(file, NULL);
}
gfs2_glock_dq_uninit(ghs);
return error;
goto fail;
} else if (error != -ENOENT) {
goto fail_gunlock;
}
......@@ -764,9 +764,11 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
error = finish_open(file, dentry, gfs2_open_common);
}
gfs2_glock_dq_uninit(ghs);
gfs2_qa_put(ip);
gfs2_glock_dq_uninit(ghs + 1);
clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
gfs2_glock_put(io_gl);
gfs2_qa_put(dip);
return error;
fail_gunlock3:
......@@ -776,7 +778,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
gfs2_glock_put(io_gl);
fail_free_inode:
gfs2_qa_put(ip);
if (ip->i_gl) {
glock_clear_object(ip->i_gl, ip);
gfs2_glock_put(ip->i_gl);
......@@ -1005,7 +1006,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir,
out_child:
gfs2_glock_dq(ghs);
out_parent:
gfs2_qa_put(ip);
gfs2_qa_put(dip);
gfs2_holder_uninit(ghs);
gfs2_holder_uninit(ghs + 1);
return error;
......
......@@ -669,13 +669,13 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
struct buffer_head *bh = bd->bd_bh;
struct gfs2_glock *gl = bd->bd_gl;
sdp->sd_log_num_revoke++;
if (atomic_inc_return(&gl->gl_revokes) == 1)
gfs2_glock_hold(gl);
bh->b_private = NULL;
bd->bd_blkno = bh->b_blocknr;
gfs2_remove_from_ail(bd); /* drops ref on bh */
bd->bd_bh = NULL;
sdp->sd_log_num_revoke++;
if (atomic_inc_return(&gl->gl_revokes) == 1)
gfs2_glock_hold(gl);
set_bit(GLF_LFLUSH, &gl->gl_flags);
list_add(&bd->bd_list, &sdp->sd_log_revokes);
}
......@@ -1131,6 +1131,10 @@ int gfs2_logd(void *data)
while (!kthread_should_stop()) {
if (gfs2_withdrawn(sdp)) {
msleep_interruptible(HZ);
continue;
}
/* Check for errors writing to the journal */
if (sdp->sd_log_error) {
gfs2_lm(sdp,
......@@ -1139,6 +1143,7 @@ int gfs2_logd(void *data)
"prevent further damage.\n",
sdp->sd_fsname, sdp->sd_log_error);
gfs2_withdraw(sdp);
continue;
}
did_flush = false;
......
......@@ -263,7 +263,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
struct super_block *sb = sdp->sd_vfs;
struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
bio->bi_iter.bi_sector = blkno << (sb->s_blocksize_bits - 9);
bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
bio_set_dev(bio, sb->s_bdev);
bio->bi_end_io = end_io;
bio->bi_private = sdp;
......@@ -509,7 +509,7 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
unsigned int bsize = sdp->sd_sb.sb_bsize, off;
unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
unsigned int shift = PAGE_SHIFT - bsize_shift;
unsigned int readahead_blocks = BIO_MAX_PAGES << shift;
unsigned int max_bio_size = 2 * 1024 * 1024;
struct gfs2_journal_extent *je;
int sz, ret = 0;
struct bio *bio = NULL;
......@@ -537,12 +537,17 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
off = 0;
}
if (!bio || (bio_chained && !off)) {
if (!bio || (bio_chained && !off) ||
bio->bi_iter.bi_size >= max_bio_size) {
/* start new bio */
} else {
sector_t sector = dblock << sdp->sd_fsb2bb_shift;
if (bio_end_sector(bio) == sector) {
sz = bio_add_page(bio, page, bsize, off);
if (sz == bsize)
goto block_added;
}
if (off) {
unsigned int blocks =
(PAGE_SIZE - off) >> bsize_shift;
......@@ -568,7 +573,7 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
off += bsize;
if (off == PAGE_SIZE)
page = NULL;
if (blocks_submitted < blocks_read + readahead_blocks) {
if (blocks_submitted < 2 * max_bio_size >> bsize_shift) {
/* Keep at least one bio in flight */
continue;
}
......
......@@ -252,7 +252,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
int num = 0;
if (unlikely(gfs2_withdrawn(sdp)) &&
(!sdp->sd_jdesc || (blkno != sdp->sd_jdesc->jd_no_addr))) {
(!sdp->sd_jdesc || gl != sdp->sd_jinode_gl)) {
*bhp = NULL;
return -EIO;
}
......
......@@ -1051,8 +1051,7 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
u32 x;
int error = 0;
if (capable(CAP_SYS_RESOURCE) ||
sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
return 0;
error = gfs2_quota_hold(ip, uid, gid);
......@@ -1125,7 +1124,7 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
int found;
if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
goto out;
return;
for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
struct gfs2_quota_data *qd;
......@@ -1162,7 +1161,6 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
qd_unlock(qda[x]);
}
out:
gfs2_quota_unhold(ip);
}
......@@ -1210,9 +1208,6 @@ int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
return 0;
if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
return 0;
for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
qd = ip->i_qadata->qa_qd[x];
......@@ -1270,7 +1265,9 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
if (ip->i_diskflags & GFS2_DIF_SYSTEM)
return;
BUG_ON(ip->i_qadata->qa_ref <= 0);
if (gfs2_assert_withdraw(sdp, ip->i_qadata &&
ip->i_qadata->qa_ref > 0))
return;
for (x = 0; x < ip->i_qadata->qa_qd_num; x++) {
qd = ip->i_qadata->qa_qd[x];
......
......@@ -44,7 +44,8 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
int ret;
ap->allowed = UINT_MAX; /* Assume we are permitted a whole lot */
if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
if (capable(CAP_SYS_RESOURCE) ||
sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
return 0;
ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
if (ret)
......
......@@ -1404,7 +1404,6 @@ static void gfs2_evict_inode(struct inode *inode)
if (ip->i_qadata)
gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
gfs2_rs_delete(ip, NULL);
gfs2_qa_put(ip);
gfs2_ordered_del_inode(ip);
clear_inode(inode);
gfs2_dir_hash_inval(ip);
......
......@@ -119,6 +119,12 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
if (!sb_rdonly(sdp->sd_vfs))
ret = gfs2_make_fs_ro(sdp);
if (sdp->sd_lockstruct.ls_ops->lm_lock == NULL) { /* lock_nolock */
if (!ret)
ret = -EIO;
clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
goto skip_recovery;
}
/*
* Drop the glock for our journal so another node can recover it.
*/
......@@ -159,10 +165,6 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
wait_on_bit(&gl->gl_flags, GLF_FREEING, TASK_UNINTERRUPTIBLE);
}
if (sdp->sd_lockstruct.ls_ops->lm_lock == NULL) { /* lock_nolock */
clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
goto skip_recovery;
}
/*
* Dequeue the "live" glock, but keep a reference so it's never freed.
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment