Commit a3163ca0 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 fixes from Ted Ts'o:
 "More miscellaneous ext4 bug fixes (all stable fodder)"

* tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
  ext4: fix mount failure with quota configured as module
  jbd2: fix ocfs2 corrupt when clearing block group bits
  ext4: fix race between writepages and enabling EXT4_EXTENTS_FL
  ext4: rename s_journal_flag_rwsem to s_writepages_rwsem
  ext4: fix potential race between s_flex_groups online resizing and access
  ext4: fix potential race between s_group_info online resizing and access
  ext4: fix potential race between online resizing and write operations
  ext4: add cond_resched() to __ext4_find_entry()
  ext4: fix a data race in EXT4_I(inode)->i_disksize
parents c6188dff 9db176bc
...@@ -270,6 +270,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, ...@@ -270,6 +270,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
ext4_group_t ngroups = ext4_get_groups_count(sb); ext4_group_t ngroups = ext4_get_groups_count(sb);
struct ext4_group_desc *desc; struct ext4_group_desc *desc;
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
struct buffer_head *bh_p;
if (block_group >= ngroups) { if (block_group >= ngroups) {
ext4_error(sb, "block_group >= groups_count - block_group = %u," ext4_error(sb, "block_group >= groups_count - block_group = %u,"
...@@ -280,7 +281,14 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, ...@@ -280,7 +281,14 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb); group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1); offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
if (!sbi->s_group_desc[group_desc]) { bh_p = sbi_array_rcu_deref(sbi, s_group_desc, group_desc);
/*
* sbi_array_rcu_deref returns with rcu unlocked, this is ok since
* the pointer being dereferenced won't be dereferenced again. By
* looking at the usage in add_new_gdb() the value isn't modified,
* just the pointer, and so it remains valid.
*/
if (!bh_p) {
ext4_error(sb, "Group descriptor not loaded - " ext4_error(sb, "Group descriptor not loaded - "
"block_group = %u, group_desc = %u, desc = %u", "block_group = %u, group_desc = %u, desc = %u",
block_group, group_desc, offset); block_group, group_desc, offset);
...@@ -288,10 +296,10 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, ...@@ -288,10 +296,10 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
} }
desc = (struct ext4_group_desc *)( desc = (struct ext4_group_desc *)(
(__u8 *)sbi->s_group_desc[group_desc]->b_data + (__u8 *)bh_p->b_data +
offset * EXT4_DESC_SIZE(sb)); offset * EXT4_DESC_SIZE(sb));
if (bh) if (bh)
*bh = sbi->s_group_desc[group_desc]; *bh = bh_p;
return desc; return desc;
} }
......
...@@ -1400,7 +1400,7 @@ struct ext4_sb_info { ...@@ -1400,7 +1400,7 @@ struct ext4_sb_info {
loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */ loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */
struct buffer_head * s_sbh; /* Buffer containing the super block */ struct buffer_head * s_sbh; /* Buffer containing the super block */
struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */ struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */
struct buffer_head **s_group_desc; struct buffer_head * __rcu *s_group_desc;
unsigned int s_mount_opt; unsigned int s_mount_opt;
unsigned int s_mount_opt2; unsigned int s_mount_opt2;
unsigned int s_mount_flags; unsigned int s_mount_flags;
...@@ -1462,7 +1462,7 @@ struct ext4_sb_info { ...@@ -1462,7 +1462,7 @@ struct ext4_sb_info {
#endif #endif
/* for buddy allocator */ /* for buddy allocator */
struct ext4_group_info ***s_group_info; struct ext4_group_info ** __rcu *s_group_info;
struct inode *s_buddy_cache; struct inode *s_buddy_cache;
spinlock_t s_md_lock; spinlock_t s_md_lock;
unsigned short *s_mb_offsets; unsigned short *s_mb_offsets;
...@@ -1512,7 +1512,7 @@ struct ext4_sb_info { ...@@ -1512,7 +1512,7 @@ struct ext4_sb_info {
unsigned int s_extent_max_zeroout_kb; unsigned int s_extent_max_zeroout_kb;
unsigned int s_log_groups_per_flex; unsigned int s_log_groups_per_flex;
struct flex_groups *s_flex_groups; struct flex_groups * __rcu *s_flex_groups;
ext4_group_t s_flex_groups_allocated; ext4_group_t s_flex_groups_allocated;
/* workqueue for reserved extent conversions (buffered io) */ /* workqueue for reserved extent conversions (buffered io) */
...@@ -1552,8 +1552,11 @@ struct ext4_sb_info { ...@@ -1552,8 +1552,11 @@ struct ext4_sb_info {
struct ratelimit_state s_warning_ratelimit_state; struct ratelimit_state s_warning_ratelimit_state;
struct ratelimit_state s_msg_ratelimit_state; struct ratelimit_state s_msg_ratelimit_state;
/* Barrier between changing inodes' journal flags and writepages ops. */ /*
struct percpu_rw_semaphore s_journal_flag_rwsem; * Barrier between writepages ops and changing any inode's JOURNAL_DATA
* or EXTENTS flag.
*/
struct percpu_rw_semaphore s_writepages_rwsem;
struct dax_device *s_daxdev; struct dax_device *s_daxdev;
#ifdef CONFIG_EXT4_DEBUG #ifdef CONFIG_EXT4_DEBUG
unsigned long s_simulate_fail; unsigned long s_simulate_fail;
...@@ -1576,6 +1579,23 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) ...@@ -1576,6 +1579,23 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
} }
/*
* Returns: sbi->field[index]
* Used to access an array element from the following sbi fields which require
* rcu protection to avoid dereferencing an invalid pointer due to reassignment
* - s_group_desc
* - s_group_info
* - s_flex_group
*/
#define sbi_array_rcu_deref(sbi, field, index) \
({ \
typeof(*((sbi)->field)) _v; \
rcu_read_lock(); \
_v = ((typeof(_v)*)rcu_dereference((sbi)->field))[index]; \
rcu_read_unlock(); \
_v; \
})
/* /*
* Simulate_fail codes * Simulate_fail codes
*/ */
...@@ -2730,6 +2750,7 @@ extern int ext4_generic_delete_entry(handle_t *handle, ...@@ -2730,6 +2750,7 @@ extern int ext4_generic_delete_entry(handle_t *handle,
extern bool ext4_empty_dir(struct inode *inode); extern bool ext4_empty_dir(struct inode *inode);
/* resize.c */ /* resize.c */
extern void ext4_kvfree_array_rcu(void *to_free);
extern int ext4_group_add(struct super_block *sb, extern int ext4_group_add(struct super_block *sb,
struct ext4_new_group_data *input); struct ext4_new_group_data *input);
extern int ext4_group_extend(struct super_block *sb, extern int ext4_group_extend(struct super_block *sb,
...@@ -2976,13 +2997,13 @@ static inline ...@@ -2976,13 +2997,13 @@ static inline
struct ext4_group_info *ext4_get_group_info(struct super_block *sb, struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
ext4_group_t group) ext4_group_t group)
{ {
struct ext4_group_info ***grp_info; struct ext4_group_info **grp_info;
long indexv, indexh; long indexv, indexh;
BUG_ON(group >= EXT4_SB(sb)->s_groups_count); BUG_ON(group >= EXT4_SB(sb)->s_groups_count);
grp_info = EXT4_SB(sb)->s_group_info;
indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb)); indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1); indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
return grp_info[indexv][indexh]; grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
return grp_info[indexh];
} }
/* /*
...@@ -3032,7 +3053,7 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize) ...@@ -3032,7 +3053,7 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
!inode_is_locked(inode)); !inode_is_locked(inode));
down_write(&EXT4_I(inode)->i_data_sem); down_write(&EXT4_I(inode)->i_data_sem);
if (newsize > EXT4_I(inode)->i_disksize) if (newsize > EXT4_I(inode)->i_disksize)
EXT4_I(inode)->i_disksize = newsize; WRITE_ONCE(EXT4_I(inode)->i_disksize, newsize);
up_write(&EXT4_I(inode)->i_data_sem); up_write(&EXT4_I(inode)->i_data_sem);
} }
......
...@@ -328,11 +328,13 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) ...@@ -328,11 +328,13 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
percpu_counter_inc(&sbi->s_freeinodes_counter); percpu_counter_inc(&sbi->s_freeinodes_counter);
if (sbi->s_log_groups_per_flex) { if (sbi->s_log_groups_per_flex) {
ext4_group_t f = ext4_flex_group(sbi, block_group); struct flex_groups *fg;
atomic_inc(&sbi->s_flex_groups[f].free_inodes); fg = sbi_array_rcu_deref(sbi, s_flex_groups,
ext4_flex_group(sbi, block_group));
atomic_inc(&fg->free_inodes);
if (is_directory) if (is_directory)
atomic_dec(&sbi->s_flex_groups[f].used_dirs); atomic_dec(&fg->used_dirs);
} }
BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata"); BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
fatal = ext4_handle_dirty_metadata(handle, NULL, bh2); fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
...@@ -368,12 +370,13 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g, ...@@ -368,12 +370,13 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
int flex_size, struct orlov_stats *stats) int flex_size, struct orlov_stats *stats)
{ {
struct ext4_group_desc *desc; struct ext4_group_desc *desc;
struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
if (flex_size > 1) { if (flex_size > 1) {
stats->free_inodes = atomic_read(&flex_group[g].free_inodes); struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb),
stats->free_clusters = atomic64_read(&flex_group[g].free_clusters); s_flex_groups, g);
stats->used_dirs = atomic_read(&flex_group[g].used_dirs); stats->free_inodes = atomic_read(&fg->free_inodes);
stats->free_clusters = atomic64_read(&fg->free_clusters);
stats->used_dirs = atomic_read(&fg->used_dirs);
return; return;
} }
...@@ -1054,7 +1057,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir, ...@@ -1054,7 +1057,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
if (sbi->s_log_groups_per_flex) { if (sbi->s_log_groups_per_flex) {
ext4_group_t f = ext4_flex_group(sbi, group); ext4_group_t f = ext4_flex_group(sbi, group);
atomic_inc(&sbi->s_flex_groups[f].used_dirs); atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups,
f)->used_dirs);
} }
} }
if (ext4_has_group_desc_csum(sb)) { if (ext4_has_group_desc_csum(sb)) {
...@@ -1077,7 +1081,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir, ...@@ -1077,7 +1081,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
if (sbi->s_log_groups_per_flex) { if (sbi->s_log_groups_per_flex) {
flex_group = ext4_flex_group(sbi, group); flex_group = ext4_flex_group(sbi, group);
atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes); atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups,
flex_group)->free_inodes);
} }
inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb); inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
......
...@@ -2465,7 +2465,7 @@ static int mpage_map_and_submit_extent(handle_t *handle, ...@@ -2465,7 +2465,7 @@ static int mpage_map_and_submit_extent(handle_t *handle,
* truncate are avoided by checking i_size under i_data_sem. * truncate are avoided by checking i_size under i_data_sem.
*/ */
disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT; disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
if (disksize > EXT4_I(inode)->i_disksize) { if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
int err2; int err2;
loff_t i_size; loff_t i_size;
...@@ -2628,7 +2628,7 @@ static int ext4_writepages(struct address_space *mapping, ...@@ -2628,7 +2628,7 @@ static int ext4_writepages(struct address_space *mapping,
if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
return -EIO; return -EIO;
percpu_down_read(&sbi->s_journal_flag_rwsem); percpu_down_read(&sbi->s_writepages_rwsem);
trace_ext4_writepages(inode, wbc); trace_ext4_writepages(inode, wbc);
/* /*
...@@ -2849,7 +2849,7 @@ static int ext4_writepages(struct address_space *mapping, ...@@ -2849,7 +2849,7 @@ static int ext4_writepages(struct address_space *mapping,
out_writepages: out_writepages:
trace_ext4_writepages_result(inode, wbc, ret, trace_ext4_writepages_result(inode, wbc, ret,
nr_to_write - wbc->nr_to_write); nr_to_write - wbc->nr_to_write);
percpu_up_read(&sbi->s_journal_flag_rwsem); percpu_up_read(&sbi->s_writepages_rwsem);
return ret; return ret;
} }
...@@ -2864,13 +2864,13 @@ static int ext4_dax_writepages(struct address_space *mapping, ...@@ -2864,13 +2864,13 @@ static int ext4_dax_writepages(struct address_space *mapping,
if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
return -EIO; return -EIO;
percpu_down_read(&sbi->s_journal_flag_rwsem); percpu_down_read(&sbi->s_writepages_rwsem);
trace_ext4_writepages(inode, wbc); trace_ext4_writepages(inode, wbc);
ret = dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc); ret = dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc);
trace_ext4_writepages_result(inode, wbc, ret, trace_ext4_writepages_result(inode, wbc, ret,
nr_to_write - wbc->nr_to_write); nr_to_write - wbc->nr_to_write);
percpu_up_read(&sbi->s_journal_flag_rwsem); percpu_up_read(&sbi->s_writepages_rwsem);
return ret; return ret;
} }
...@@ -5861,7 +5861,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val) ...@@ -5861,7 +5861,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
} }
} }
percpu_down_write(&sbi->s_journal_flag_rwsem); percpu_down_write(&sbi->s_writepages_rwsem);
jbd2_journal_lock_updates(journal); jbd2_journal_lock_updates(journal);
/* /*
...@@ -5878,7 +5878,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val) ...@@ -5878,7 +5878,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
err = jbd2_journal_flush(journal); err = jbd2_journal_flush(journal);
if (err < 0) { if (err < 0) {
jbd2_journal_unlock_updates(journal); jbd2_journal_unlock_updates(journal);
percpu_up_write(&sbi->s_journal_flag_rwsem); percpu_up_write(&sbi->s_writepages_rwsem);
return err; return err;
} }
ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
...@@ -5886,7 +5886,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val) ...@@ -5886,7 +5886,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
ext4_set_aops(inode); ext4_set_aops(inode);
jbd2_journal_unlock_updates(journal); jbd2_journal_unlock_updates(journal);
percpu_up_write(&sbi->s_journal_flag_rwsem); percpu_up_write(&sbi->s_writepages_rwsem);
if (val) if (val)
up_write(&EXT4_I(inode)->i_mmap_sem); up_write(&EXT4_I(inode)->i_mmap_sem);
......
...@@ -2356,7 +2356,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) ...@@ -2356,7 +2356,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
{ {
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
unsigned size; unsigned size;
struct ext4_group_info ***new_groupinfo; struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
EXT4_DESC_PER_BLOCK_BITS(sb); EXT4_DESC_PER_BLOCK_BITS(sb);
...@@ -2369,13 +2369,16 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) ...@@ -2369,13 +2369,16 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
return -ENOMEM; return -ENOMEM;
} }
if (sbi->s_group_info) { rcu_read_lock();
memcpy(new_groupinfo, sbi->s_group_info, old_groupinfo = rcu_dereference(sbi->s_group_info);
if (old_groupinfo)
memcpy(new_groupinfo, old_groupinfo,
sbi->s_group_info_size * sizeof(*sbi->s_group_info)); sbi->s_group_info_size * sizeof(*sbi->s_group_info));
kvfree(sbi->s_group_info); rcu_read_unlock();
} rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
sbi->s_group_info = new_groupinfo;
sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
if (old_groupinfo)
ext4_kvfree_array_rcu(old_groupinfo);
ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
sbi->s_group_info_size); sbi->s_group_info_size);
return 0; return 0;
...@@ -2387,6 +2390,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, ...@@ -2387,6 +2390,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
{ {
int i; int i;
int metalen = 0; int metalen = 0;
int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_group_info **meta_group_info; struct ext4_group_info **meta_group_info;
struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
...@@ -2405,12 +2409,12 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, ...@@ -2405,12 +2409,12 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
"for a buddy group"); "for a buddy group");
goto exit_meta_group_info; goto exit_meta_group_info;
} }
sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = rcu_read_lock();
meta_group_info; rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
rcu_read_unlock();
} }
meta_group_info = meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
...@@ -2458,8 +2462,13 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, ...@@ -2458,8 +2462,13 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
exit_group_info: exit_group_info:
/* If a meta_group_info table has been allocated, release it now */ /* If a meta_group_info table has been allocated, release it now */
if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]); struct ext4_group_info ***group_info;
sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
rcu_read_lock();
group_info = rcu_dereference(sbi->s_group_info);
kfree(group_info[idx]);
group_info[idx] = NULL;
rcu_read_unlock();
} }
exit_meta_group_info: exit_meta_group_info:
return -ENOMEM; return -ENOMEM;
...@@ -2472,6 +2481,7 @@ static int ext4_mb_init_backend(struct super_block *sb) ...@@ -2472,6 +2481,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
int err; int err;
struct ext4_group_desc *desc; struct ext4_group_desc *desc;
struct ext4_group_info ***group_info;
struct kmem_cache *cachep; struct kmem_cache *cachep;
err = ext4_mb_alloc_groupinfo(sb, ngroups); err = ext4_mb_alloc_groupinfo(sb, ngroups);
...@@ -2507,11 +2517,16 @@ static int ext4_mb_init_backend(struct super_block *sb) ...@@ -2507,11 +2517,16 @@ static int ext4_mb_init_backend(struct super_block *sb)
while (i-- > 0) while (i-- > 0)
kmem_cache_free(cachep, ext4_get_group_info(sb, i)); kmem_cache_free(cachep, ext4_get_group_info(sb, i));
i = sbi->s_group_info_size; i = sbi->s_group_info_size;
rcu_read_lock();
group_info = rcu_dereference(sbi->s_group_info);
while (i-- > 0) while (i-- > 0)
kfree(sbi->s_group_info[i]); kfree(group_info[i]);
rcu_read_unlock();
iput(sbi->s_buddy_cache); iput(sbi->s_buddy_cache);
err_freesgi: err_freesgi:
kvfree(sbi->s_group_info); rcu_read_lock();
kvfree(rcu_dereference(sbi->s_group_info));
rcu_read_unlock();
return -ENOMEM; return -ENOMEM;
} }
...@@ -2700,7 +2715,7 @@ int ext4_mb_release(struct super_block *sb) ...@@ -2700,7 +2715,7 @@ int ext4_mb_release(struct super_block *sb)
ext4_group_t ngroups = ext4_get_groups_count(sb); ext4_group_t ngroups = ext4_get_groups_count(sb);
ext4_group_t i; ext4_group_t i;
int num_meta_group_infos; int num_meta_group_infos;
struct ext4_group_info *grinfo; struct ext4_group_info *grinfo, ***group_info;
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
...@@ -2719,9 +2734,12 @@ int ext4_mb_release(struct super_block *sb) ...@@ -2719,9 +2734,12 @@ int ext4_mb_release(struct super_block *sb)
num_meta_group_infos = (ngroups + num_meta_group_infos = (ngroups +
EXT4_DESC_PER_BLOCK(sb) - 1) >> EXT4_DESC_PER_BLOCK(sb) - 1) >>
EXT4_DESC_PER_BLOCK_BITS(sb); EXT4_DESC_PER_BLOCK_BITS(sb);
rcu_read_lock();
group_info = rcu_dereference(sbi->s_group_info);
for (i = 0; i < num_meta_group_infos; i++) for (i = 0; i < num_meta_group_infos; i++)
kfree(sbi->s_group_info[i]); kfree(group_info[i]);
kvfree(sbi->s_group_info); kvfree(group_info);
rcu_read_unlock();
} }
kfree(sbi->s_mb_offsets); kfree(sbi->s_mb_offsets);
kfree(sbi->s_mb_maxs); kfree(sbi->s_mb_maxs);
...@@ -3020,7 +3038,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, ...@@ -3020,7 +3038,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
ext4_group_t flex_group = ext4_flex_group(sbi, ext4_group_t flex_group = ext4_flex_group(sbi,
ac->ac_b_ex.fe_group); ac->ac_b_ex.fe_group);
atomic64_sub(ac->ac_b_ex.fe_len, atomic64_sub(ac->ac_b_ex.fe_len,
&sbi->s_flex_groups[flex_group].free_clusters); &sbi_array_rcu_deref(sbi, s_flex_groups,
flex_group)->free_clusters);
} }
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
...@@ -4918,7 +4937,8 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, ...@@ -4918,7 +4937,8 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
if (sbi->s_log_groups_per_flex) { if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, block_group); ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
atomic64_add(count_clusters, atomic64_add(count_clusters,
&sbi->s_flex_groups[flex_group].free_clusters); &sbi_array_rcu_deref(sbi, s_flex_groups,
flex_group)->free_clusters);
} }
/* /*
...@@ -5075,7 +5095,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, ...@@ -5075,7 +5095,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
if (sbi->s_log_groups_per_flex) { if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, block_group); ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
atomic64_add(clusters_freed, atomic64_add(clusters_freed,
&sbi->s_flex_groups[flex_group].free_clusters); &sbi_array_rcu_deref(sbi, s_flex_groups,
flex_group)->free_clusters);
} }
ext4_mb_unload_buddy(&e4b); ext4_mb_unload_buddy(&e4b);
......
...@@ -407,6 +407,7 @@ static int free_ext_block(handle_t *handle, struct inode *inode) ...@@ -407,6 +407,7 @@ static int free_ext_block(handle_t *handle, struct inode *inode)
int ext4_ext_migrate(struct inode *inode) int ext4_ext_migrate(struct inode *inode)
{ {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
handle_t *handle; handle_t *handle;
int retval = 0, i; int retval = 0, i;
__le32 *i_data; __le32 *i_data;
...@@ -431,6 +432,8 @@ int ext4_ext_migrate(struct inode *inode) ...@@ -431,6 +432,8 @@ int ext4_ext_migrate(struct inode *inode)
*/ */
return retval; return retval;
percpu_down_write(&sbi->s_writepages_rwsem);
/* /*
* Worst case we can touch the allocation bitmaps, a bgd * Worst case we can touch the allocation bitmaps, a bgd
* block, and a block to link in the orphan list. We do need * block, and a block to link in the orphan list. We do need
...@@ -441,7 +444,7 @@ int ext4_ext_migrate(struct inode *inode) ...@@ -441,7 +444,7 @@ int ext4_ext_migrate(struct inode *inode)
if (IS_ERR(handle)) { if (IS_ERR(handle)) {
retval = PTR_ERR(handle); retval = PTR_ERR(handle);
return retval; goto out_unlock;
} }
goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) * goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
EXT4_INODES_PER_GROUP(inode->i_sb)) + 1; EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
...@@ -452,7 +455,7 @@ int ext4_ext_migrate(struct inode *inode) ...@@ -452,7 +455,7 @@ int ext4_ext_migrate(struct inode *inode)
if (IS_ERR(tmp_inode)) { if (IS_ERR(tmp_inode)) {
retval = PTR_ERR(tmp_inode); retval = PTR_ERR(tmp_inode);
ext4_journal_stop(handle); ext4_journal_stop(handle);
return retval; goto out_unlock;
} }
i_size_write(tmp_inode, i_size_read(inode)); i_size_write(tmp_inode, i_size_read(inode));
/* /*
...@@ -494,7 +497,7 @@ int ext4_ext_migrate(struct inode *inode) ...@@ -494,7 +497,7 @@ int ext4_ext_migrate(struct inode *inode)
*/ */
ext4_orphan_del(NULL, tmp_inode); ext4_orphan_del(NULL, tmp_inode);
retval = PTR_ERR(handle); retval = PTR_ERR(handle);
goto out; goto out_tmp_inode;
} }
ei = EXT4_I(inode); ei = EXT4_I(inode);
...@@ -576,10 +579,11 @@ int ext4_ext_migrate(struct inode *inode) ...@@ -576,10 +579,11 @@ int ext4_ext_migrate(struct inode *inode)
ext4_ext_tree_init(handle, tmp_inode); ext4_ext_tree_init(handle, tmp_inode);
out_stop: out_stop:
ext4_journal_stop(handle); ext4_journal_stop(handle);
out: out_tmp_inode:
unlock_new_inode(tmp_inode); unlock_new_inode(tmp_inode);
iput(tmp_inode); iput(tmp_inode);
out_unlock:
percpu_up_write(&sbi->s_writepages_rwsem);
return retval; return retval;
} }
...@@ -589,7 +593,8 @@ int ext4_ext_migrate(struct inode *inode) ...@@ -589,7 +593,8 @@ int ext4_ext_migrate(struct inode *inode)
int ext4_ind_migrate(struct inode *inode) int ext4_ind_migrate(struct inode *inode)
{ {
struct ext4_extent_header *eh; struct ext4_extent_header *eh;
struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_super_block *es = sbi->s_es;
struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_extent *ex; struct ext4_extent *ex;
unsigned int i, len; unsigned int i, len;
...@@ -613,9 +618,13 @@ int ext4_ind_migrate(struct inode *inode) ...@@ -613,9 +618,13 @@ int ext4_ind_migrate(struct inode *inode)
if (test_opt(inode->i_sb, DELALLOC)) if (test_opt(inode->i_sb, DELALLOC))
ext4_alloc_da_blocks(inode); ext4_alloc_da_blocks(inode);
percpu_down_write(&sbi->s_writepages_rwsem);
handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1); handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
if (IS_ERR(handle)) if (IS_ERR(handle)) {
return PTR_ERR(handle); ret = PTR_ERR(handle);
goto out_unlock;
}
down_write(&EXT4_I(inode)->i_data_sem); down_write(&EXT4_I(inode)->i_data_sem);
ret = ext4_ext_check_inode(inode); ret = ext4_ext_check_inode(inode);
...@@ -650,5 +659,7 @@ int ext4_ind_migrate(struct inode *inode) ...@@ -650,5 +659,7 @@ int ext4_ind_migrate(struct inode *inode)
errout: errout:
ext4_journal_stop(handle); ext4_journal_stop(handle);
up_write(&EXT4_I(inode)->i_data_sem); up_write(&EXT4_I(inode)->i_data_sem);
out_unlock:
percpu_up_write(&sbi->s_writepages_rwsem);
return ret; return ret;
} }
...@@ -1511,6 +1511,7 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir, ...@@ -1511,6 +1511,7 @@ static struct buffer_head *__ext4_find_entry(struct inode *dir,
/* /*
* We deal with the read-ahead logic here. * We deal with the read-ahead logic here.
*/ */
cond_resched();
if (ra_ptr >= ra_max) { if (ra_ptr >= ra_max) {
/* Refill the readahead buffer */ /* Refill the readahead buffer */
ra_ptr = 0; ra_ptr = 0;
......
...@@ -17,6 +17,33 @@ ...@@ -17,6 +17,33 @@
#include "ext4_jbd2.h" #include "ext4_jbd2.h"
struct ext4_rcu_ptr {
struct rcu_head rcu;
void *ptr;
};
static void ext4_rcu_ptr_callback(struct rcu_head *head)
{
struct ext4_rcu_ptr *ptr;
ptr = container_of(head, struct ext4_rcu_ptr, rcu);
kvfree(ptr->ptr);
kfree(ptr);
}
void ext4_kvfree_array_rcu(void *to_free)
{
struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
if (ptr) {
ptr->ptr = to_free;
call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
return;
}
synchronize_rcu();
kvfree(to_free);
}
int ext4_resize_begin(struct super_block *sb) int ext4_resize_begin(struct super_block *sb)
{ {
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
...@@ -542,8 +569,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb, ...@@ -542,8 +569,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
brelse(gdb); brelse(gdb);
goto out; goto out;
} }
memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data, memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
gdb->b_size); s_group_desc, j)->b_data, gdb->b_size);
set_buffer_uptodate(gdb); set_buffer_uptodate(gdb);
err = ext4_handle_dirty_metadata(handle, NULL, gdb); err = ext4_handle_dirty_metadata(handle, NULL, gdb);
...@@ -860,13 +887,15 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, ...@@ -860,13 +887,15 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
} }
brelse(dind); brelse(dind);
o_group_desc = EXT4_SB(sb)->s_group_desc; rcu_read_lock();
o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
memcpy(n_group_desc, o_group_desc, memcpy(n_group_desc, o_group_desc,
EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
rcu_read_unlock();
n_group_desc[gdb_num] = gdb_bh; n_group_desc[gdb_num] = gdb_bh;
EXT4_SB(sb)->s_group_desc = n_group_desc; rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
EXT4_SB(sb)->s_gdb_count++; EXT4_SB(sb)->s_gdb_count++;
kvfree(o_group_desc); ext4_kvfree_array_rcu(o_group_desc);
le16_add_cpu(&es->s_reserved_gdt_blocks, -1); le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
err = ext4_handle_dirty_super(handle, sb); err = ext4_handle_dirty_super(handle, sb);
...@@ -909,9 +938,11 @@ static int add_new_gdb_meta_bg(struct super_block *sb, ...@@ -909,9 +938,11 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
return err; return err;
} }
o_group_desc = EXT4_SB(sb)->s_group_desc; rcu_read_lock();
o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
memcpy(n_group_desc, o_group_desc, memcpy(n_group_desc, o_group_desc,
EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *)); EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
rcu_read_unlock();
n_group_desc[gdb_num] = gdb_bh; n_group_desc[gdb_num] = gdb_bh;
BUFFER_TRACE(gdb_bh, "get_write_access"); BUFFER_TRACE(gdb_bh, "get_write_access");
...@@ -922,9 +953,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb, ...@@ -922,9 +953,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
return err; return err;
} }
EXT4_SB(sb)->s_group_desc = n_group_desc; rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
EXT4_SB(sb)->s_gdb_count++; EXT4_SB(sb)->s_gdb_count++;
kvfree(o_group_desc); ext4_kvfree_array_rcu(o_group_desc);
return err; return err;
} }
...@@ -1188,7 +1219,8 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb, ...@@ -1188,7 +1219,8 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
* use non-sparse filesystems anymore. This is already checked above. * use non-sparse filesystems anymore. This is already checked above.
*/ */
if (gdb_off) { if (gdb_off) {
gdb_bh = sbi->s_group_desc[gdb_num]; gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
gdb_num);
BUFFER_TRACE(gdb_bh, "get_write_access"); BUFFER_TRACE(gdb_bh, "get_write_access");
err = ext4_journal_get_write_access(handle, gdb_bh); err = ext4_journal_get_write_access(handle, gdb_bh);
...@@ -1270,7 +1302,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb, ...@@ -1270,7 +1302,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
/* /*
* get_write_access() has been called on gdb_bh by ext4_add_new_desc(). * get_write_access() has been called on gdb_bh by ext4_add_new_desc().
*/ */
gdb_bh = sbi->s_group_desc[gdb_num]; gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
/* Update group descriptor block for new group */ /* Update group descriptor block for new group */
gdp = (struct ext4_group_desc *)(gdb_bh->b_data + gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
gdb_off * EXT4_DESC_SIZE(sb)); gdb_off * EXT4_DESC_SIZE(sb));
...@@ -1398,11 +1430,14 @@ static void ext4_update_super(struct super_block *sb, ...@@ -1398,11 +1430,14 @@ static void ext4_update_super(struct super_block *sb,
percpu_counter_read(&sbi->s_freeclusters_counter)); percpu_counter_read(&sbi->s_freeclusters_counter));
if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) { if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
ext4_group_t flex_group; ext4_group_t flex_group;
struct flex_groups *fg;
flex_group = ext4_flex_group(sbi, group_data[0].group); flex_group = ext4_flex_group(sbi, group_data[0].group);
fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
atomic64_add(EXT4_NUM_B2C(sbi, free_blocks), atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
&sbi->s_flex_groups[flex_group].free_clusters); &fg->free_clusters);
atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count, atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
&sbi->s_flex_groups[flex_group].free_inodes); &fg->free_inodes);
} }
/* /*
...@@ -1497,7 +1532,8 @@ static int ext4_flex_group_add(struct super_block *sb, ...@@ -1497,7 +1532,8 @@ static int ext4_flex_group_add(struct super_block *sb,
for (; gdb_num <= gdb_num_end; gdb_num++) { for (; gdb_num <= gdb_num_end; gdb_num++) {
struct buffer_head *gdb_bh; struct buffer_head *gdb_bh;
gdb_bh = sbi->s_group_desc[gdb_num]; gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
gdb_num);
if (old_gdb == gdb_bh->b_blocknr) if (old_gdb == gdb_bh->b_blocknr)
continue; continue;
update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data, update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
......
...@@ -1014,6 +1014,8 @@ static void ext4_put_super(struct super_block *sb) ...@@ -1014,6 +1014,8 @@ static void ext4_put_super(struct super_block *sb)
{ {
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es; struct ext4_super_block *es = sbi->s_es;
struct buffer_head **group_desc;
struct flex_groups **flex_groups;
int aborted = 0; int aborted = 0;
int i, err; int i, err;
...@@ -1046,15 +1048,23 @@ static void ext4_put_super(struct super_block *sb) ...@@ -1046,15 +1048,23 @@ static void ext4_put_super(struct super_block *sb)
if (!sb_rdonly(sb)) if (!sb_rdonly(sb))
ext4_commit_super(sb, 1); ext4_commit_super(sb, 1);
rcu_read_lock();
group_desc = rcu_dereference(sbi->s_group_desc);
for (i = 0; i < sbi->s_gdb_count; i++) for (i = 0; i < sbi->s_gdb_count; i++)
brelse(sbi->s_group_desc[i]); brelse(group_desc[i]);
kvfree(sbi->s_group_desc); kvfree(group_desc);
kvfree(sbi->s_flex_groups); flex_groups = rcu_dereference(sbi->s_flex_groups);
if (flex_groups) {
for (i = 0; i < sbi->s_flex_groups_allocated; i++)
kvfree(flex_groups[i]);
kvfree(flex_groups);
}
rcu_read_unlock();
percpu_counter_destroy(&sbi->s_freeclusters_counter); percpu_counter_destroy(&sbi->s_freeclusters_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter); percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter); percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
percpu_free_rwsem(&sbi->s_journal_flag_rwsem); percpu_free_rwsem(&sbi->s_writepages_rwsem);
#ifdef CONFIG_QUOTA #ifdef CONFIG_QUOTA
for (i = 0; i < EXT4_MAXQUOTAS; i++) for (i = 0; i < EXT4_MAXQUOTAS; i++)
kfree(get_qf_name(sb, sbi, i)); kfree(get_qf_name(sb, sbi, i));
...@@ -2380,8 +2390,8 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es, ...@@ -2380,8 +2390,8 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup) int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
{ {
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
struct flex_groups *new_groups; struct flex_groups **old_groups, **new_groups;
int size; int size, i;
if (!sbi->s_log_groups_per_flex) if (!sbi->s_log_groups_per_flex)
return 0; return 0;
...@@ -2390,22 +2400,37 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup) ...@@ -2390,22 +2400,37 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
if (size <= sbi->s_flex_groups_allocated) if (size <= sbi->s_flex_groups_allocated)
return 0; return 0;
size = roundup_pow_of_two(size * sizeof(struct flex_groups)); new_groups = kvzalloc(roundup_pow_of_two(size *
new_groups = kvzalloc(size, GFP_KERNEL); sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
if (!new_groups) { if (!new_groups) {
ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups", ext4_msg(sb, KERN_ERR,
size / (int) sizeof(struct flex_groups)); "not enough memory for %d flex group pointers", size);
return -ENOMEM; return -ENOMEM;
} }
for (i = sbi->s_flex_groups_allocated; i < size; i++) {
if (sbi->s_flex_groups) { new_groups[i] = kvzalloc(roundup_pow_of_two(
memcpy(new_groups, sbi->s_flex_groups, sizeof(struct flex_groups)),
(sbi->s_flex_groups_allocated * GFP_KERNEL);
sizeof(struct flex_groups))); if (!new_groups[i]) {
kvfree(sbi->s_flex_groups); for (i--; i >= sbi->s_flex_groups_allocated; i--)
kvfree(new_groups[i]);
kvfree(new_groups);
ext4_msg(sb, KERN_ERR,
"not enough memory for %d flex groups", size);
return -ENOMEM;
}
} }
sbi->s_flex_groups = new_groups; rcu_read_lock();
sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups); old_groups = rcu_dereference(sbi->s_flex_groups);
if (old_groups)
memcpy(new_groups, old_groups,
(sbi->s_flex_groups_allocated *
sizeof(struct flex_groups *)));
rcu_read_unlock();
rcu_assign_pointer(sbi->s_flex_groups, new_groups);
sbi->s_flex_groups_allocated = size;
if (old_groups)
ext4_kvfree_array_rcu(old_groups);
return 0; return 0;
} }
...@@ -2413,6 +2438,7 @@ static int ext4_fill_flex_info(struct super_block *sb) ...@@ -2413,6 +2438,7 @@ static int ext4_fill_flex_info(struct super_block *sb)
{ {
struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_group_desc *gdp = NULL; struct ext4_group_desc *gdp = NULL;
struct flex_groups *fg;
ext4_group_t flex_group; ext4_group_t flex_group;
int i, err; int i, err;
...@@ -2430,12 +2456,11 @@ static int ext4_fill_flex_info(struct super_block *sb) ...@@ -2430,12 +2456,11 @@ static int ext4_fill_flex_info(struct super_block *sb)
gdp = ext4_get_group_desc(sb, i, NULL); gdp = ext4_get_group_desc(sb, i, NULL);
flex_group = ext4_flex_group(sbi, i); flex_group = ext4_flex_group(sbi, i);
atomic_add(ext4_free_inodes_count(sb, gdp), fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
&sbi->s_flex_groups[flex_group].free_inodes); atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
atomic64_add(ext4_free_group_clusters(sb, gdp), atomic64_add(ext4_free_group_clusters(sb, gdp),
&sbi->s_flex_groups[flex_group].free_clusters); &fg->free_clusters);
atomic_add(ext4_used_dirs_count(sb, gdp), atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
&sbi->s_flex_groups[flex_group].used_dirs);
} }
return 1; return 1;
...@@ -3009,7 +3034,7 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly) ...@@ -3009,7 +3034,7 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
return 0; return 0;
} }
#if !defined(CONFIG_QUOTA) || !defined(CONFIG_QFMT_V2) #if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
if (!readonly && (ext4_has_feature_quota(sb) || if (!readonly && (ext4_has_feature_quota(sb) ||
ext4_has_feature_project(sb))) { ext4_has_feature_project(sb))) {
ext4_msg(sb, KERN_ERR, ext4_msg(sb, KERN_ERR,
...@@ -3634,9 +3659,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -3634,9 +3659,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
{ {
struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev); struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
char *orig_data = kstrdup(data, GFP_KERNEL); char *orig_data = kstrdup(data, GFP_KERNEL);
struct buffer_head *bh; struct buffer_head *bh, **group_desc;
struct ext4_super_block *es = NULL; struct ext4_super_block *es = NULL;
struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
struct flex_groups **flex_groups;
ext4_fsblk_t block; ext4_fsblk_t block;
ext4_fsblk_t sb_block = get_sb_block(&data); ext4_fsblk_t sb_block = get_sb_block(&data);
ext4_fsblk_t logical_sb_block; ext4_fsblk_t logical_sb_block;
...@@ -4290,9 +4316,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -4290,9 +4316,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount; goto failed_mount;
} }
} }
sbi->s_group_desc = kvmalloc_array(db_count, rcu_assign_pointer(sbi->s_group_desc,
sizeof(struct buffer_head *), kvmalloc_array(db_count,
GFP_KERNEL); sizeof(struct buffer_head *),
GFP_KERNEL));
if (sbi->s_group_desc == NULL) { if (sbi->s_group_desc == NULL) {
ext4_msg(sb, KERN_ERR, "not enough memory"); ext4_msg(sb, KERN_ERR, "not enough memory");
ret = -ENOMEM; ret = -ENOMEM;
...@@ -4308,14 +4335,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -4308,14 +4335,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
} }
for (i = 0; i < db_count; i++) { for (i = 0; i < db_count; i++) {
struct buffer_head *bh;
block = descriptor_loc(sb, logical_sb_block, i); block = descriptor_loc(sb, logical_sb_block, i);
sbi->s_group_desc[i] = sb_bread_unmovable(sb, block); bh = sb_bread_unmovable(sb, block);
if (!sbi->s_group_desc[i]) { if (!bh) {
ext4_msg(sb, KERN_ERR, ext4_msg(sb, KERN_ERR,
"can't read group descriptor %d", i); "can't read group descriptor %d", i);
db_count = i; db_count = i;
goto failed_mount2; goto failed_mount2;
} }
rcu_read_lock();
rcu_dereference(sbi->s_group_desc)[i] = bh;
rcu_read_unlock();
} }
sbi->s_gdb_count = db_count; sbi->s_gdb_count = db_count;
if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) { if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
...@@ -4594,7 +4626,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -4594,7 +4626,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0, err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
GFP_KERNEL); GFP_KERNEL);
if (!err) if (!err)
err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem); err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
if (err) { if (err) {
ext4_msg(sb, KERN_ERR, "insufficient memory"); ext4_msg(sb, KERN_ERR, "insufficient memory");
...@@ -4682,13 +4714,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -4682,13 +4714,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
ext4_unregister_li_request(sb); ext4_unregister_li_request(sb);
failed_mount6: failed_mount6:
ext4_mb_release(sb); ext4_mb_release(sb);
if (sbi->s_flex_groups) rcu_read_lock();
kvfree(sbi->s_flex_groups); flex_groups = rcu_dereference(sbi->s_flex_groups);
if (flex_groups) {
for (i = 0; i < sbi->s_flex_groups_allocated; i++)
kvfree(flex_groups[i]);
kvfree(flex_groups);
}
rcu_read_unlock();
percpu_counter_destroy(&sbi->s_freeclusters_counter); percpu_counter_destroy(&sbi->s_freeclusters_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter); percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter); percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter); percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
percpu_free_rwsem(&sbi->s_journal_flag_rwsem); percpu_free_rwsem(&sbi->s_writepages_rwsem);
failed_mount5: failed_mount5:
ext4_ext_release(sb); ext4_ext_release(sb);
ext4_release_system_zone(sb); ext4_release_system_zone(sb);
...@@ -4717,9 +4755,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) ...@@ -4717,9 +4755,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (sbi->s_mmp_tsk) if (sbi->s_mmp_tsk)
kthread_stop(sbi->s_mmp_tsk); kthread_stop(sbi->s_mmp_tsk);
failed_mount2: failed_mount2:
rcu_read_lock();
group_desc = rcu_dereference(sbi->s_group_desc);
for (i = 0; i < db_count; i++) for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]); brelse(group_desc[i]);
kvfree(sbi->s_group_desc); kvfree(group_desc);
rcu_read_unlock();
failed_mount: failed_mount:
if (sbi->s_chksum_driver) if (sbi->s_chksum_driver)
crypto_free_shash(sbi->s_chksum_driver); crypto_free_shash(sbi->s_chksum_driver);
......
...@@ -936,8 +936,6 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, ...@@ -936,8 +936,6 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
char *frozen_buffer = NULL; char *frozen_buffer = NULL;
unsigned long start_lock, time_lock; unsigned long start_lock, time_lock;
if (is_handle_aborted(handle))
return -EROFS;
journal = transaction->t_journal; journal = transaction->t_journal;
jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy); jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
...@@ -1189,6 +1187,9 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh) ...@@ -1189,6 +1187,9 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
struct journal_head *jh; struct journal_head *jh;
int rc; int rc;
if (is_handle_aborted(handle))
return -EROFS;
if (jbd2_write_access_granted(handle, bh, false)) if (jbd2_write_access_granted(handle, bh, false))
return 0; return 0;
...@@ -1326,6 +1327,9 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh) ...@@ -1326,6 +1327,9 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
struct journal_head *jh; struct journal_head *jh;
char *committed_data = NULL; char *committed_data = NULL;
if (is_handle_aborted(handle))
return -EROFS;
if (jbd2_write_access_granted(handle, bh, true)) if (jbd2_write_access_granted(handle, bh, true))
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment