Commit 5fdf4939 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'gfs2-4.8.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2

Pull gfs2 updates from Bob Peterson:
 "We've only got six GFS2 patches for this merge window.  In patch
  order:

   - Fabian Frederick submitted a nice cleanup that uses the BIT macro
     rather than bit shifting.

   - Andreas Gruenbacher contributed a patch that fixes a long-standing
     annoyance whereby GFS2 warned about dirty pages.

   - Andreas also fixed a problem with the recent extended attribute
     readahead feature.

   - Chao Yu contributed a patch that checks the return code from
     function register_shrinker and reacts accordingly. Previously, it
     was not checked.

   - Andreas Gruenbacher also fixed a problem whereby incore file
     timestamps were forgotten if the file was invalidated. This merely
     moves the assignment inside the inode glock where it belongs.

   - Andreas also fixed a problem where incore timestamps were not
     initialized"

* tag 'gfs2-4.8.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2:
  gfs2: Initialize atime of I_NEW inodes
  gfs2: Update file times after grabbing glock
  gfs2: fix to detect failure of register_shrinker
  gfs2: Fix extended attribute readahead optimization
  gfs2: Remove dirty buffer warning from gfs2_releasepage
  GFS2: use BIT() macro
parents c35bcfd8 332f51d7
...@@ -187,7 +187,7 @@ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *w ...@@ -187,7 +187,7 @@ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *w
ClearPageChecked(page); ClearPageChecked(page);
if (!page_has_buffers(page)) { if (!page_has_buffers(page)) {
create_empty_buffers(page, inode->i_sb->s_blocksize, create_empty_buffers(page, inode->i_sb->s_blocksize,
(1 << BH_Dirty)|(1 << BH_Uptodate)); BIT(BH_Dirty)|BIT(BH_Uptodate));
} }
gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1); gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
} }
...@@ -1147,6 +1147,16 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask) ...@@ -1147,6 +1147,16 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
if (!page_has_buffers(page)) if (!page_has_buffers(page))
return 0; return 0;
/*
* From xfs_vm_releasepage: mm accommodates an old ext3 case where
* clean pages might not have had the dirty bit cleared. Thus, it can
* send actual dirty pages to ->releasepage() via shrink_active_list().
*
* As a workaround, we skip pages that contain dirty buffers below.
* Once ->releasepage isn't called on dirty pages anymore, we can warn
* on dirty buffers like we used to here again.
*/
gfs2_log_lock(sdp); gfs2_log_lock(sdp);
spin_lock(&sdp->sd_ail_lock); spin_lock(&sdp->sd_ail_lock);
head = bh = page_buffers(page); head = bh = page_buffers(page);
...@@ -1156,8 +1166,8 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask) ...@@ -1156,8 +1166,8 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
bd = bh->b_private; bd = bh->b_private;
if (bd && bd->bd_tr) if (bd && bd->bd_tr)
goto cannot_release; goto cannot_release;
if (buffer_pinned(bh) || buffer_dirty(bh)) if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
goto not_possible; goto cannot_release;
bh = bh->b_this_page; bh = bh->b_this_page;
} while(bh != head); } while(bh != head);
spin_unlock(&sdp->sd_ail_lock); spin_unlock(&sdp->sd_ail_lock);
...@@ -1180,9 +1190,6 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask) ...@@ -1180,9 +1190,6 @@ int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
return try_to_free_buffers(page); return try_to_free_buffers(page);
not_possible: /* Should never happen */
WARN_ON(buffer_dirty(bh));
WARN_ON(buffer_pinned(bh));
cannot_release: cannot_release:
spin_unlock(&sdp->sd_ail_lock); spin_unlock(&sdp->sd_ail_lock);
gfs2_log_unlock(sdp); gfs2_log_unlock(sdp);
......
...@@ -82,8 +82,8 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh, ...@@ -82,8 +82,8 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh,
} }
if (!page_has_buffers(page)) if (!page_has_buffers(page))
create_empty_buffers(page, 1 << inode->i_blkbits, create_empty_buffers(page, BIT(inode->i_blkbits),
(1 << BH_Uptodate)); BIT(BH_Uptodate));
bh = page_buffers(page); bh = page_buffers(page);
...@@ -690,7 +690,7 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi ...@@ -690,7 +690,7 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi
BUG_ON(!dblock); BUG_ON(!dblock);
BUG_ON(!new); BUG_ON(!new);
bh.b_size = 1 << (inode->i_blkbits + (create ? 0 : 5)); bh.b_size = BIT(inode->i_blkbits + (create ? 0 : 5));
ret = gfs2_block_map(inode, lblock, &bh, create); ret = gfs2_block_map(inode, lblock, &bh, create);
*extlen = bh.b_size >> inode->i_blkbits; *extlen = bh.b_size >> inode->i_blkbits;
*dblock = bh.b_blocknr; *dblock = bh.b_blocknr;
......
...@@ -351,7 +351,7 @@ static __be64 *gfs2_dir_get_hash_table(struct gfs2_inode *ip) ...@@ -351,7 +351,7 @@ static __be64 *gfs2_dir_get_hash_table(struct gfs2_inode *ip)
if (hc) if (hc)
return hc; return hc;
hsize = 1 << ip->i_depth; hsize = BIT(ip->i_depth);
hsize *= sizeof(__be64); hsize *= sizeof(__be64);
if (hsize != i_size_read(&ip->i_inode)) { if (hsize != i_size_read(&ip->i_inode)) {
gfs2_consist_inode(ip); gfs2_consist_inode(ip);
...@@ -819,8 +819,8 @@ static struct gfs2_dirent *gfs2_dirent_search(struct inode *inode, ...@@ -819,8 +819,8 @@ static struct gfs2_dirent *gfs2_dirent_search(struct inode *inode,
if (ip->i_diskflags & GFS2_DIF_EXHASH) { if (ip->i_diskflags & GFS2_DIF_EXHASH) {
struct gfs2_leaf *leaf; struct gfs2_leaf *leaf;
unsigned hsize = 1 << ip->i_depth; unsigned int hsize = BIT(ip->i_depth);
unsigned index; unsigned int index;
u64 ln; u64 ln;
if (hsize * sizeof(u64) != i_size_read(inode)) { if (hsize * sizeof(u64) != i_size_read(inode)) {
gfs2_consist_inode(ip); gfs2_consist_inode(ip);
...@@ -932,7 +932,7 @@ static int dir_make_exhash(struct inode *inode) ...@@ -932,7 +932,7 @@ static int dir_make_exhash(struct inode *inode)
return -ENOSPC; return -ENOSPC;
bn = bh->b_blocknr; bn = bh->b_blocknr;
gfs2_assert(sdp, dip->i_entries < (1 << 16)); gfs2_assert(sdp, dip->i_entries < BIT(16));
leaf->lf_entries = cpu_to_be16(dip->i_entries); leaf->lf_entries = cpu_to_be16(dip->i_entries);
/* Copy dirents */ /* Copy dirents */
...@@ -1041,7 +1041,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name) ...@@ -1041,7 +1041,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
bn = nbh->b_blocknr; bn = nbh->b_blocknr;
/* Compute the start and len of leaf pointers in the hash table. */ /* Compute the start and len of leaf pointers in the hash table. */
len = 1 << (dip->i_depth - be16_to_cpu(oleaf->lf_depth)); len = BIT(dip->i_depth - be16_to_cpu(oleaf->lf_depth));
half_len = len >> 1; half_len = len >> 1;
if (!half_len) { if (!half_len) {
pr_warn("i_depth %u lf_depth %u index %u\n", pr_warn("i_depth %u lf_depth %u index %u\n",
...@@ -1163,7 +1163,7 @@ static int dir_double_exhash(struct gfs2_inode *dip) ...@@ -1163,7 +1163,7 @@ static int dir_double_exhash(struct gfs2_inode *dip)
int x; int x;
int error = 0; int error = 0;
hsize = 1 << dip->i_depth; hsize = BIT(dip->i_depth);
hsize_bytes = hsize * sizeof(__be64); hsize_bytes = hsize * sizeof(__be64);
hc = gfs2_dir_get_hash_table(dip); hc = gfs2_dir_get_hash_table(dip);
...@@ -1539,7 +1539,7 @@ static int dir_e_read(struct inode *inode, struct dir_context *ctx, ...@@ -1539,7 +1539,7 @@ static int dir_e_read(struct inode *inode, struct dir_context *ctx,
int error = 0; int error = 0;
unsigned depth = 0; unsigned depth = 0;
hsize = 1 << dip->i_depth; hsize = BIT(dip->i_depth);
hash = gfs2_dir_offset2hash(ctx->pos); hash = gfs2_dir_offset2hash(ctx->pos);
index = hash >> (32 - dip->i_depth); index = hash >> (32 - dip->i_depth);
...@@ -1558,7 +1558,7 @@ static int dir_e_read(struct inode *inode, struct dir_context *ctx, ...@@ -1558,7 +1558,7 @@ static int dir_e_read(struct inode *inode, struct dir_context *ctx,
if (error) if (error)
break; break;
len = 1 << (dip->i_depth - depth); len = BIT(dip->i_depth - depth);
index = (index & ~(len - 1)) + len; index = (index & ~(len - 1)) + len;
} }
...@@ -2113,7 +2113,7 @@ int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip) ...@@ -2113,7 +2113,7 @@ int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip)
u64 leaf_no; u64 leaf_no;
int error = 0, last; int error = 0, last;
hsize = 1 << dip->i_depth; hsize = BIT(dip->i_depth);
lp = gfs2_dir_get_hash_table(dip); lp = gfs2_dir_get_hash_table(dip);
if (IS_ERR(lp)) if (IS_ERR(lp))
...@@ -2126,7 +2126,7 @@ int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip) ...@@ -2126,7 +2126,7 @@ int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip)
if (error) if (error)
goto out; goto out;
leaf = (struct gfs2_leaf *)bh->b_data; leaf = (struct gfs2_leaf *)bh->b_data;
len = 1 << (dip->i_depth - be16_to_cpu(leaf->lf_depth)); len = BIT(dip->i_depth - be16_to_cpu(leaf->lf_depth));
next_index = (index & ~(len - 1)) + len; next_index = (index & ~(len - 1)) + len;
last = ((next_index >= hsize) ? 1 : 0); last = ((next_index >= hsize) ? 1 : 0);
......
...@@ -395,9 +395,6 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -395,9 +395,6 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
sb_start_pagefault(inode->i_sb); sb_start_pagefault(inode->i_sb);
/* Update file times before taking page lock */
file_update_time(vma->vm_file);
ret = gfs2_rsqa_alloc(ip); ret = gfs2_rsqa_alloc(ip);
if (ret) if (ret)
goto out; goto out;
...@@ -409,6 +406,9 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -409,6 +406,9 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret) if (ret)
goto out_uninit; goto out_uninit;
/* Update file times before taking page lock */
file_update_time(vma->vm_file);
set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
set_bit(GIF_SW_PAGED, &ip->i_flags); set_bit(GIF_SW_PAGED, &ip->i_flags);
......
...@@ -69,7 +69,7 @@ static atomic_t lru_count = ATOMIC_INIT(0); ...@@ -69,7 +69,7 @@ static atomic_t lru_count = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(lru_lock); static DEFINE_SPINLOCK(lru_lock);
#define GFS2_GL_HASH_SHIFT 15 #define GFS2_GL_HASH_SHIFT 15
#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) #define GFS2_GL_HASH_SIZE BIT(GFS2_GL_HASH_SHIFT)
static struct rhashtable_params ht_parms = { static struct rhashtable_params ht_parms = {
.nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4, .nelem_hint = GFS2_GL_HASH_SIZE * 3 / 4,
...@@ -1781,7 +1781,13 @@ int __init gfs2_glock_init(void) ...@@ -1781,7 +1781,13 @@ int __init gfs2_glock_init(void)
return -ENOMEM; return -ENOMEM;
} }
register_shrinker(&glock_shrinker); ret = register_shrinker(&glock_shrinker);
if (ret) {
destroy_workqueue(gfs2_delete_workqueue);
destroy_workqueue(glock_workqueue);
rhashtable_destroy(&gl_hash_table);
return ret;
}
return 0; return 0;
} }
......
...@@ -187,6 +187,10 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, ...@@ -187,6 +187,10 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
} }
gfs2_set_iop(inode); gfs2_set_iop(inode);
inode->i_atime.tv_sec = 0;
inode->i_atime.tv_nsec = 0;
unlock_new_inode(inode); unlock_new_inode(inode);
} }
......
...@@ -85,7 +85,7 @@ static inline int gfs2_check_internal_file_size(struct inode *inode, ...@@ -85,7 +85,7 @@ static inline int gfs2_check_internal_file_size(struct inode *inode,
u64 size = i_size_read(inode); u64 size = i_size_read(inode);
if (size < minsize || size > maxsize) if (size < minsize || size > maxsize)
goto err; goto err;
if (size & ((1 << inode->i_blkbits) - 1)) if (size & (BIT(inode->i_blkbits) - 1))
goto err; goto err;
return 0; return 0;
err: err:
......
...@@ -145,7 +145,9 @@ static int __init init_gfs2_fs(void) ...@@ -145,7 +145,9 @@ static int __init init_gfs2_fs(void)
if (!gfs2_qadata_cachep) if (!gfs2_qadata_cachep)
goto fail; goto fail;
register_shrinker(&gfs2_qd_shrinker); error = register_shrinker(&gfs2_qd_shrinker);
if (error)
goto fail;
error = register_filesystem(&gfs2_fs_type); error = register_filesystem(&gfs2_fs_type);
if (error) if (error)
......
...@@ -216,23 +216,26 @@ static void gfs2_meta_read_endio(struct bio *bio) ...@@ -216,23 +216,26 @@ static void gfs2_meta_read_endio(struct bio *bio)
static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[], static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[],
int num) int num)
{ {
struct buffer_head *bh = bhs[0]; while (num > 0) {
struct bio *bio; struct buffer_head *bh = *bhs;
int i; struct bio *bio;
if (!num) bio = bio_alloc(GFP_NOIO, num);
return; bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_bdev = bh->b_bdev;
bio = bio_alloc(GFP_NOIO, num); while (num > 0) {
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bh = *bhs;
bio->bi_bdev = bh->b_bdev; if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {
for (i = 0; i < num; i++) { BUG_ON(bio->bi_iter.bi_size == 0);
bh = bhs[i]; break;
bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); }
bhs++;
num--;
}
bio->bi_end_io = gfs2_meta_read_endio;
bio_set_op_attrs(bio, op, op_flags);
submit_bio(bio);
} }
bio->bi_end_io = gfs2_meta_read_endio;
bio_set_op_attrs(bio, op, op_flags);
submit_bio(bio);
} }
/** /**
......
...@@ -58,7 +58,7 @@ static void gfs2_tune_init(struct gfs2_tune *gt) ...@@ -58,7 +58,7 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
gt->gt_quota_scale_num = 1; gt->gt_quota_scale_num = 1;
gt->gt_quota_scale_den = 1; gt->gt_quota_scale_den = 1;
gt->gt_new_files_jdata = 0; gt->gt_new_files_jdata = 0;
gt->gt_max_readahead = 1 << 18; gt->gt_max_readahead = BIT(18);
gt->gt_complain_secs = 10; gt->gt_complain_secs = 10;
} }
...@@ -284,7 +284,7 @@ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent) ...@@ -284,7 +284,7 @@ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
GFS2_BASIC_BLOCK_SHIFT; GFS2_BASIC_BLOCK_SHIFT;
sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift; sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
sdp->sd_diptrs = (sdp->sd_sb.sb_bsize - sdp->sd_diptrs = (sdp->sd_sb.sb_bsize -
sizeof(struct gfs2_dinode)) / sizeof(u64); sizeof(struct gfs2_dinode)) / sizeof(u64);
sdp->sd_inptrs = (sdp->sd_sb.sb_bsize - sdp->sd_inptrs = (sdp->sd_sb.sb_bsize -
...@@ -302,7 +302,7 @@ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent) ...@@ -302,7 +302,7 @@ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
/* Compute maximum reservation required to add a entry to a directory */ /* Compute maximum reservation required to add a entry to a directory */
hash_blocks = DIV_ROUND_UP(sizeof(u64) * (1 << GFS2_DIR_MAX_DEPTH), hash_blocks = DIV_ROUND_UP(sizeof(u64) * BIT(GFS2_DIR_MAX_DEPTH),
sdp->sd_jbsize); sdp->sd_jbsize);
ind_blocks = 0; ind_blocks = 0;
...@@ -1089,7 +1089,7 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent ...@@ -1089,7 +1089,7 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits; sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits;
sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
GFS2_BASIC_BLOCK_SHIFT; GFS2_BASIC_BLOCK_SHIFT;
sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift; sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit; sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit;
sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum; sdp->sd_tune.gt_quota_quantum = sdp->sd_args.ar_quota_quantum;
......
...@@ -75,7 +75,7 @@ ...@@ -75,7 +75,7 @@
#include "util.h" #include "util.h"
#define GFS2_QD_HASH_SHIFT 12 #define GFS2_QD_HASH_SHIFT 12
#define GFS2_QD_HASH_SIZE (1 << GFS2_QD_HASH_SHIFT) #define GFS2_QD_HASH_SIZE BIT(GFS2_QD_HASH_SHIFT)
#define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1) #define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
/* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */ /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
...@@ -384,7 +384,7 @@ static int bh_get(struct gfs2_quota_data *qd) ...@@ -384,7 +384,7 @@ static int bh_get(struct gfs2_quota_data *qd)
block = qd->qd_slot / sdp->sd_qc_per_block; block = qd->qd_slot / sdp->sd_qc_per_block;
offset = qd->qd_slot % sdp->sd_qc_per_block; offset = qd->qd_slot % sdp->sd_qc_per_block;
bh_map.b_size = 1 << ip->i_inode.i_blkbits; bh_map.b_size = BIT(ip->i_inode.i_blkbits);
error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0); error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
if (error) if (error)
goto fail; goto fail;
......
...@@ -359,7 +359,7 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd) ...@@ -359,7 +359,7 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd)
struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
u64 size = i_size_read(jd->jd_inode); u64 size = i_size_read(jd->jd_inode);
if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, 1 << 30)) if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30)))
return -EIO; return -EIO;
jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift; jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment