Commit e589db7a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 fixes from Ted Ts'o:
 "Various bug fixes for ext4.  The most serious of them fixes a security
  bug (CVE-2012-4508) which leads to stale data exposure when we have
  fallocate racing against writes to files undergoing delayed
  allocation.  We also have two fixes for the metadata checksum feature,
  the most serious of which can cause the superblock to have a invalid
  checksum after a power failure."

* tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
  ext4: Avoid underflow in ext4_trim_fs()
  ext4: Checksum the block bitmap properly with bigalloc enabled
  ext4: fix undefined bit shift result in ext4_fill_flex_info
  ext4: fix metadata checksum calculation for the superblock
  ext4: race-condition protection for ext4_convert_unwritten_extents_endio
  ext4: serialize fallocate with ext4_convert_unwritten_extents
parents 344ba37b 5de35e8d
...@@ -174,8 +174,7 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, ...@@ -174,8 +174,7 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
ext4_free_inodes_set(sb, gdp, 0); ext4_free_inodes_set(sb, gdp, 0);
ext4_itable_unused_set(sb, gdp, 0); ext4_itable_unused_set(sb, gdp, 0);
memset(bh->b_data, 0xff, sb->s_blocksize); memset(bh->b_data, 0xff, sb->s_blocksize);
ext4_block_bitmap_csum_set(sb, block_group, gdp, bh, ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
EXT4_BLOCKS_PER_GROUP(sb) / 8);
return; return;
} }
memset(bh->b_data, 0, sb->s_blocksize); memset(bh->b_data, 0, sb->s_blocksize);
...@@ -212,8 +211,7 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh, ...@@ -212,8 +211,7 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
*/ */
ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group), ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
sb->s_blocksize * 8, bh->b_data); sb->s_blocksize * 8, bh->b_data);
ext4_block_bitmap_csum_set(sb, block_group, gdp, bh, ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
EXT4_BLOCKS_PER_GROUP(sb) / 8);
ext4_group_desc_csum_set(sb, block_group, gdp); ext4_group_desc_csum_set(sb, block_group, gdp);
} }
...@@ -350,7 +348,7 @@ void ext4_validate_block_bitmap(struct super_block *sb, ...@@ -350,7 +348,7 @@ void ext4_validate_block_bitmap(struct super_block *sb,
return; return;
} }
if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group, if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
desc, bh, EXT4_BLOCKS_PER_GROUP(sb) / 8))) { desc, bh))) {
ext4_unlock_group(sb, block_group); ext4_unlock_group(sb, block_group);
ext4_error(sb, "bg %u: bad block bitmap checksum", block_group); ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
return; return;
......
...@@ -58,11 +58,12 @@ void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group, ...@@ -58,11 +58,12 @@ void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
struct ext4_group_desc *gdp, struct ext4_group_desc *gdp,
struct buffer_head *bh, int sz) struct buffer_head *bh)
{ {
__u32 hi; __u32 hi;
__u32 provided, calculated; __u32 provided, calculated;
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8;
if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
...@@ -84,8 +85,9 @@ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, ...@@ -84,8 +85,9 @@ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group, void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
struct ext4_group_desc *gdp, struct ext4_group_desc *gdp,
struct buffer_head *bh, int sz) struct buffer_head *bh)
{ {
int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8;
__u32 csum; __u32 csum;
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
......
...@@ -1882,10 +1882,10 @@ int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, ...@@ -1882,10 +1882,10 @@ int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
struct buffer_head *bh, int sz); struct buffer_head *bh, int sz);
void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group, void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
struct ext4_group_desc *gdp, struct ext4_group_desc *gdp,
struct buffer_head *bh, int sz); struct buffer_head *bh);
int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
struct ext4_group_desc *gdp, struct ext4_group_desc *gdp,
struct buffer_head *bh, int sz); struct buffer_head *bh);
/* balloc.c */ /* balloc.c */
extern void ext4_validate_block_bitmap(struct super_block *sb, extern void ext4_validate_block_bitmap(struct super_block *sb,
...@@ -2063,8 +2063,7 @@ extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count); ...@@ -2063,8 +2063,7 @@ extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
extern int ext4_calculate_overhead(struct super_block *sb); extern int ext4_calculate_overhead(struct super_block *sb);
extern int ext4_superblock_csum_verify(struct super_block *sb, extern int ext4_superblock_csum_verify(struct super_block *sb,
struct ext4_super_block *es); struct ext4_super_block *es);
extern void ext4_superblock_csum_set(struct super_block *sb, extern void ext4_superblock_csum_set(struct super_block *sb);
struct ext4_super_block *es);
extern void *ext4_kvmalloc(size_t size, gfp_t flags); extern void *ext4_kvmalloc(size_t size, gfp_t flags);
extern void *ext4_kvzalloc(size_t size, gfp_t flags); extern void *ext4_kvzalloc(size_t size, gfp_t flags);
extern void ext4_kvfree(void *ptr); extern void ext4_kvfree(void *ptr);
......
...@@ -143,17 +143,13 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line, ...@@ -143,17 +143,13 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line,
struct buffer_head *bh = EXT4_SB(sb)->s_sbh; struct buffer_head *bh = EXT4_SB(sb)->s_sbh;
int err = 0; int err = 0;
ext4_superblock_csum_set(sb);
if (ext4_handle_valid(handle)) { if (ext4_handle_valid(handle)) {
ext4_superblock_csum_set(sb,
(struct ext4_super_block *)bh->b_data);
err = jbd2_journal_dirty_metadata(handle, bh); err = jbd2_journal_dirty_metadata(handle, bh);
if (err) if (err)
ext4_journal_abort_handle(where, line, __func__, ext4_journal_abort_handle(where, line, __func__,
bh, handle, err); bh, handle, err);
} else { } else
ext4_superblock_csum_set(sb,
(struct ext4_super_block *)bh->b_data);
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
}
return err; return err;
} }
...@@ -52,6 +52,9 @@ ...@@ -52,6 +52,9 @@
#define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */ #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
#define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */ #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
#define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
#define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
static __le32 ext4_extent_block_csum(struct inode *inode, static __le32 ext4_extent_block_csum(struct inode *inode,
struct ext4_extent_header *eh) struct ext4_extent_header *eh)
{ {
...@@ -2914,6 +2917,9 @@ static int ext4_split_extent_at(handle_t *handle, ...@@ -2914,6 +2917,9 @@ static int ext4_split_extent_at(handle_t *handle,
unsigned int ee_len, depth; unsigned int ee_len, depth;
int err = 0; int err = 0;
BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
(EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
ext_debug("ext4_split_extents_at: inode %lu, logical" ext_debug("ext4_split_extents_at: inode %lu, logical"
"block %llu\n", inode->i_ino, (unsigned long long)split); "block %llu\n", inode->i_ino, (unsigned long long)split);
...@@ -2972,7 +2978,14 @@ static int ext4_split_extent_at(handle_t *handle, ...@@ -2972,7 +2978,14 @@ static int ext4_split_extent_at(handle_t *handle,
err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
err = ext4_ext_zeroout(inode, &orig_ex); if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
if (split_flag & EXT4_EXT_DATA_VALID1)
err = ext4_ext_zeroout(inode, ex2);
else
err = ext4_ext_zeroout(inode, ex);
} else
err = ext4_ext_zeroout(inode, &orig_ex);
if (err) if (err)
goto fix_extent_len; goto fix_extent_len;
/* update the extent length and mark as initialized */ /* update the extent length and mark as initialized */
...@@ -3025,12 +3038,13 @@ static int ext4_split_extent(handle_t *handle, ...@@ -3025,12 +3038,13 @@ static int ext4_split_extent(handle_t *handle,
uninitialized = ext4_ext_is_uninitialized(ex); uninitialized = ext4_ext_is_uninitialized(ex);
if (map->m_lblk + map->m_len < ee_block + ee_len) { if (map->m_lblk + map->m_len < ee_block + ee_len) {
split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
EXT4_EXT_MAY_ZEROOUT : 0;
flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
if (uninitialized) if (uninitialized)
split_flag1 |= EXT4_EXT_MARK_UNINIT1 | split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
EXT4_EXT_MARK_UNINIT2; EXT4_EXT_MARK_UNINIT2;
if (split_flag & EXT4_EXT_DATA_VALID2)
split_flag1 |= EXT4_EXT_DATA_VALID1;
err = ext4_split_extent_at(handle, inode, path, err = ext4_split_extent_at(handle, inode, path,
map->m_lblk + map->m_len, split_flag1, flags1); map->m_lblk + map->m_len, split_flag1, flags1);
if (err) if (err)
...@@ -3043,8 +3057,8 @@ static int ext4_split_extent(handle_t *handle, ...@@ -3043,8 +3057,8 @@ static int ext4_split_extent(handle_t *handle,
return PTR_ERR(path); return PTR_ERR(path);
if (map->m_lblk >= ee_block) { if (map->m_lblk >= ee_block) {
split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? split_flag1 = split_flag & (EXT4_EXT_MAY_ZEROOUT |
EXT4_EXT_MAY_ZEROOUT : 0; EXT4_EXT_DATA_VALID2);
if (uninitialized) if (uninitialized)
split_flag1 |= EXT4_EXT_MARK_UNINIT1; split_flag1 |= EXT4_EXT_MARK_UNINIT1;
if (split_flag & EXT4_EXT_MARK_UNINIT2) if (split_flag & EXT4_EXT_MARK_UNINIT2)
...@@ -3323,26 +3337,47 @@ static int ext4_split_unwritten_extents(handle_t *handle, ...@@ -3323,26 +3337,47 @@ static int ext4_split_unwritten_extents(handle_t *handle,
split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
split_flag |= EXT4_EXT_MARK_UNINIT2; split_flag |= EXT4_EXT_MARK_UNINIT2;
if (flags & EXT4_GET_BLOCKS_CONVERT)
split_flag |= EXT4_EXT_DATA_VALID2;
flags |= EXT4_GET_BLOCKS_PRE_IO; flags |= EXT4_GET_BLOCKS_PRE_IO;
return ext4_split_extent(handle, inode, path, map, split_flag, flags); return ext4_split_extent(handle, inode, path, map, split_flag, flags);
} }
static int ext4_convert_unwritten_extents_endio(handle_t *handle, static int ext4_convert_unwritten_extents_endio(handle_t *handle,
struct inode *inode, struct inode *inode,
struct ext4_ext_path *path) struct ext4_map_blocks *map,
struct ext4_ext_path *path)
{ {
struct ext4_extent *ex; struct ext4_extent *ex;
ext4_lblk_t ee_block;
unsigned int ee_len;
int depth; int depth;
int err = 0; int err = 0;
depth = ext_depth(inode); depth = ext_depth(inode);
ex = path[depth].p_ext; ex = path[depth].p_ext;
ee_block = le32_to_cpu(ex->ee_block);
ee_len = ext4_ext_get_actual_len(ex);
ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical" ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
"block %llu, max_blocks %u\n", inode->i_ino, "block %llu, max_blocks %u\n", inode->i_ino,
(unsigned long long)le32_to_cpu(ex->ee_block), (unsigned long long)ee_block, ee_len);
ext4_ext_get_actual_len(ex));
/* If extent is larger than requested then split is required */
if (ee_block != map->m_lblk || ee_len > map->m_len) {
err = ext4_split_unwritten_extents(handle, inode, map, path,
EXT4_GET_BLOCKS_CONVERT);
if (err < 0)
goto out;
ext4_ext_drop_refs(path);
path = ext4_ext_find_extent(inode, map->m_lblk, path);
if (IS_ERR(path)) {
err = PTR_ERR(path);
goto out;
}
depth = ext_depth(inode);
ex = path[depth].p_ext;
}
err = ext4_ext_get_access(handle, inode, path + depth); err = ext4_ext_get_access(handle, inode, path + depth);
if (err) if (err)
...@@ -3652,7 +3687,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode, ...@@ -3652,7 +3687,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
} }
/* IO end_io complete, convert the filled extent to written */ /* IO end_io complete, convert the filled extent to written */
if ((flags & EXT4_GET_BLOCKS_CONVERT)) { if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
ret = ext4_convert_unwritten_extents_endio(handle, inode, ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
path); path);
if (ret >= 0) { if (ret >= 0) {
ext4_update_inode_fsync_trans(handle, inode, 1); ext4_update_inode_fsync_trans(handle, inode, 1);
...@@ -4428,6 +4463,9 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) ...@@ -4428,6 +4463,9 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
*/ */
if (len <= EXT_UNINIT_MAX_LEN << blkbits) if (len <= EXT_UNINIT_MAX_LEN << blkbits)
flags |= EXT4_GET_BLOCKS_NO_NORMALIZE; flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
/* Prevent race condition between unwritten */
ext4_flush_unwritten_io(inode);
retry: retry:
while (ret >= 0 && ret < max_blocks) { while (ret >= 0 && ret < max_blocks) {
map.m_lblk = map.m_lblk + ret; map.m_lblk = map.m_lblk + ret;
......
...@@ -762,9 +762,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode, ...@@ -762,9 +762,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, umode_t mode,
ext4_free_group_clusters_set(sb, gdp, ext4_free_group_clusters_set(sb, gdp,
ext4_free_clusters_after_init(sb, group, gdp)); ext4_free_clusters_after_init(sb, group, gdp));
ext4_block_bitmap_csum_set(sb, group, gdp, ext4_block_bitmap_csum_set(sb, group, gdp,
block_bitmap_bh, block_bitmap_bh);
EXT4_BLOCKS_PER_GROUP(sb) /
8);
ext4_group_desc_csum_set(sb, group, gdp); ext4_group_desc_csum_set(sb, group, gdp);
} }
ext4_unlock_group(sb, group); ext4_unlock_group(sb, group);
......
...@@ -2805,8 +2805,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, ...@@ -2805,8 +2805,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
} }
len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
ext4_free_group_clusters_set(sb, gdp, len); ext4_free_group_clusters_set(sb, gdp, len);
ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh, ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh);
EXT4_BLOCKS_PER_GROUP(sb) / 8);
ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
ext4_unlock_group(sb, ac->ac_b_ex.fe_group); ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
...@@ -4666,8 +4665,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, ...@@ -4666,8 +4665,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
ret = ext4_free_group_clusters(sb, gdp) + count_clusters; ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
ext4_free_group_clusters_set(sb, gdp, ret); ext4_free_group_clusters_set(sb, gdp, ret);
ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh, ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
EXT4_BLOCKS_PER_GROUP(sb) / 8);
ext4_group_desc_csum_set(sb, block_group, gdp); ext4_group_desc_csum_set(sb, block_group, gdp);
ext4_unlock_group(sb, block_group); ext4_unlock_group(sb, block_group);
percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters); percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
...@@ -4811,8 +4809,7 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, ...@@ -4811,8 +4809,7 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
mb_free_blocks(NULL, &e4b, bit, count); mb_free_blocks(NULL, &e4b, bit, count);
blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc); blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc);
ext4_free_group_clusters_set(sb, desc, blk_free_count); ext4_free_group_clusters_set(sb, desc, blk_free_count);
ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh, ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh);
EXT4_BLOCKS_PER_GROUP(sb) / 8);
ext4_group_desc_csum_set(sb, block_group, desc); ext4_group_desc_csum_set(sb, block_group, desc);
ext4_unlock_group(sb, block_group); ext4_unlock_group(sb, block_group);
percpu_counter_add(&sbi->s_freeclusters_counter, percpu_counter_add(&sbi->s_freeclusters_counter,
...@@ -4993,8 +4990,9 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) ...@@ -4993,8 +4990,9 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
minlen = EXT4_NUM_B2C(EXT4_SB(sb), minlen = EXT4_NUM_B2C(EXT4_SB(sb),
range->minlen >> sb->s_blocksize_bits); range->minlen >> sb->s_blocksize_bits);
if (unlikely(minlen > EXT4_CLUSTERS_PER_GROUP(sb)) || if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) ||
unlikely(start >= max_blks)) start >= max_blks ||
range->len < sb->s_blocksize)
return -EINVAL; return -EINVAL;
if (end >= max_blks) if (end >= max_blks)
end = max_blks - 1; end = max_blks - 1;
......
...@@ -1212,8 +1212,7 @@ static int ext4_set_bitmap_checksums(struct super_block *sb, ...@@ -1212,8 +1212,7 @@ static int ext4_set_bitmap_checksums(struct super_block *sb,
bh = ext4_get_bitmap(sb, group_data->block_bitmap); bh = ext4_get_bitmap(sb, group_data->block_bitmap);
if (!bh) if (!bh)
return -EIO; return -EIO;
ext4_block_bitmap_csum_set(sb, group, gdp, bh, ext4_block_bitmap_csum_set(sb, group, gdp, bh);
EXT4_BLOCKS_PER_GROUP(sb) / 8);
brelse(bh); brelse(bh);
return 0; return 0;
......
...@@ -143,9 +143,10 @@ int ext4_superblock_csum_verify(struct super_block *sb, ...@@ -143,9 +143,10 @@ int ext4_superblock_csum_verify(struct super_block *sb,
return es->s_checksum == ext4_superblock_csum(sb, es); return es->s_checksum == ext4_superblock_csum(sb, es);
} }
void ext4_superblock_csum_set(struct super_block *sb, void ext4_superblock_csum_set(struct super_block *sb)
struct ext4_super_block *es)
{ {
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
return; return;
...@@ -1963,7 +1964,7 @@ static int ext4_fill_flex_info(struct super_block *sb) ...@@ -1963,7 +1964,7 @@ static int ext4_fill_flex_info(struct super_block *sb)
sbi->s_log_groups_per_flex = 0; sbi->s_log_groups_per_flex = 0;
return 1; return 1;
} }
groups_per_flex = 1 << sbi->s_log_groups_per_flex; groups_per_flex = 1U << sbi->s_log_groups_per_flex;
err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count); err = ext4_alloc_flex_bg_array(sb, sbi->s_groups_count);
if (err) if (err)
...@@ -4381,7 +4382,7 @@ static int ext4_commit_super(struct super_block *sb, int sync) ...@@ -4381,7 +4382,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
cpu_to_le32(percpu_counter_sum_positive( cpu_to_le32(percpu_counter_sum_positive(
&EXT4_SB(sb)->s_freeinodes_counter)); &EXT4_SB(sb)->s_freeinodes_counter));
BUFFER_TRACE(sbh, "marking dirty"); BUFFER_TRACE(sbh, "marking dirty");
ext4_superblock_csum_set(sb, es); ext4_superblock_csum_set(sb);
mark_buffer_dirty(sbh); mark_buffer_dirty(sbh);
if (sync) { if (sync) {
error = sync_dirty_buffer(sbh); error = sync_dirty_buffer(sbh);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment