Commit d17413c0 authored by Dmitry Monakhov's avatar Dmitry Monakhov Committed by Theodore Ts'o

ext4: clean up inode bitmaps manipulation in ext4_free_inode

- Reorganize locking scheme to batch two atomic operation in to one.
  This also allow us to state what healthy group must obey following rule
  ext4_free_inodes_count(sb, gdp) == ext4_count_free(inode_bitmap, NUM);
- Fix possible undefined pointer dereference.
- Even if group descriptor stats aren't accessible we have to update
  inode bitmaps.
- Move non-group members update out of group_lock.
Signed-off-by: default avatarDmitry Monakhov <dmonakhov@openvz.org>
Signed-off-by: default avatar"Theodore Ts'o" <tytso@mit.edu>
parent 21ca087a
...@@ -240,56 +240,49 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) ...@@ -240,56 +240,49 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
if (fatal) if (fatal)
goto error_return; goto error_return;
/* Ok, now we can actually update the inode bitmaps.. */ fatal = -ESRCH;
cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group), gdp = ext4_get_group_desc(sb, block_group, &bh2);
bit, bitmap_bh->b_data); if (gdp) {
if (!cleared)
ext4_error(sb, "bit already cleared for inode %lu", ino);
else {
gdp = ext4_get_group_desc(sb, block_group, &bh2);
BUFFER_TRACE(bh2, "get_write_access"); BUFFER_TRACE(bh2, "get_write_access");
fatal = ext4_journal_get_write_access(handle, bh2); fatal = ext4_journal_get_write_access(handle, bh2);
if (fatal) goto error_return; }
ext4_lock_group(sb, block_group);
if (gdp) { cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
ext4_lock_group(sb, block_group); if (fatal || !cleared) {
count = ext4_free_inodes_count(sb, gdp) + 1; ext4_unlock_group(sb, block_group);
ext4_free_inodes_set(sb, gdp, count); goto out;
if (is_directory) { }
count = ext4_used_dirs_count(sb, gdp) - 1;
ext4_used_dirs_set(sb, gdp, count);
if (sbi->s_log_groups_per_flex) {
ext4_group_t f;
f = ext4_flex_group(sbi, block_group);
atomic_dec(&sbi->s_flex_groups[f].used_dirs);
}
} count = ext4_free_inodes_count(sb, gdp) + 1;
gdp->bg_checksum = ext4_group_desc_csum(sbi, ext4_free_inodes_set(sb, gdp, count);
block_group, gdp); if (is_directory) {
ext4_unlock_group(sb, block_group); count = ext4_used_dirs_count(sb, gdp) - 1;
percpu_counter_inc(&sbi->s_freeinodes_counter); ext4_used_dirs_set(sb, gdp, count);
if (is_directory) percpu_counter_dec(&sbi->s_dirs_counter);
percpu_counter_dec(&sbi->s_dirs_counter);
if (sbi->s_log_groups_per_flex) {
ext4_group_t f;
f = ext4_flex_group(sbi, block_group);
atomic_inc(&sbi->s_flex_groups[f].free_inodes);
}
}
BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, NULL, bh2);
if (!fatal) fatal = err;
} }
BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata"); gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); ext4_unlock_group(sb, block_group);
if (!fatal)
fatal = err; percpu_counter_inc(&sbi->s_freeinodes_counter);
sb->s_dirt = 1; if (sbi->s_log_groups_per_flex) {
ext4_group_t f = ext4_flex_group(sbi, block_group);
atomic_inc(&sbi->s_flex_groups[f].free_inodes);
if (is_directory)
atomic_dec(&sbi->s_flex_groups[f].used_dirs);
}
BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
out:
if (cleared) {
BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
if (!fatal)
fatal = err;
sb->s_dirt = 1;
} else
ext4_error(sb, "bit already cleared for inode %lu", ino);
error_return: error_return:
brelse(bitmap_bh); brelse(bitmap_bh);
ext4_std_error(sb, fatal); ext4_std_error(sb, fatal);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment