Commit cdb7ee4c authored by Tahsin Erdogan's avatar Tahsin Erdogan Committed by Theodore Ts'o

ext4: add nombcache mount option

The main purpose of mb cache is to achieve deduplication in
extended attributes. In use cases where opportunity for deduplication
is unlikely, it only adds overhead.

Add a mount option to explicitly turn off mb cache.
Suggested-by: default avatarAndreas Dilger <adilger@dilger.ca>
Signed-off-by: default avatarTahsin Erdogan <tahsin@google.com>
Signed-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent b9fc761e
......@@ -1114,6 +1114,7 @@ struct ext4_inode_info {
/*
* Mount flags set via mount options or defaults
*/
#define EXT4_MOUNT_NO_MBCACHE 0x00001 /* Do not use mbcache */
#define EXT4_MOUNT_GRPID 0x00004 /* Create files with directory's group */
#define EXT4_MOUNT_DEBUG 0x00008 /* Some debugging messages */
#define EXT4_MOUNT_ERRORS_CONT 0x00010 /* Continue on errors */
......
......@@ -1336,7 +1336,7 @@ enum {
Opt_inode_readahead_blks, Opt_journal_ioprio,
Opt_dioread_nolock, Opt_dioread_lock,
Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
Opt_max_dir_size_kb, Opt_nojournal_checksum,
Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache,
};
static const match_table_t tokens = {
......@@ -1419,6 +1419,8 @@ static const match_table_t tokens = {
{Opt_noinit_itable, "noinit_itable"},
{Opt_max_dir_size_kb, "max_dir_size_kb=%u"},
{Opt_test_dummy_encryption, "test_dummy_encryption"},
{Opt_nombcache, "nombcache"},
{Opt_nombcache, "no_mbcache"}, /* for backward compatibility */
{Opt_removed, "check=none"}, /* mount option from ext2/3 */
{Opt_removed, "nocheck"}, /* mount option from ext2/3 */
{Opt_removed, "reservation"}, /* mount option from ext2/3 */
......@@ -1626,6 +1628,7 @@ static const struct mount_opts {
{Opt_jqfmt_vfsv1, QFMT_VFS_V1, MOPT_QFMT},
{Opt_max_dir_size_kb, 0, MOPT_GTE0},
{Opt_test_dummy_encryption, 0, MOPT_GTE0},
{Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET},
{Opt_err, 0, 0}
};
......@@ -4080,19 +4083,22 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
no_journal:
sbi->s_ea_block_cache = ext4_xattr_create_cache();
if (!sbi->s_ea_block_cache) {
ext4_msg(sb, KERN_ERR, "Failed to create ea_block_cache");
goto failed_mount_wq;
}
if (ext4_has_feature_ea_inode(sb)) {
sbi->s_ea_inode_cache = ext4_xattr_create_cache();
if (!sbi->s_ea_inode_cache) {
if (!test_opt(sb, NO_MBCACHE)) {
sbi->s_ea_block_cache = ext4_xattr_create_cache();
if (!sbi->s_ea_block_cache) {
ext4_msg(sb, KERN_ERR,
"Failed to create ea_inode_cache");
"Failed to create ea_block_cache");
goto failed_mount_wq;
}
if (ext4_has_feature_ea_inode(sb)) {
sbi->s_ea_inode_cache = ext4_xattr_create_cache();
if (!sbi->s_ea_inode_cache) {
ext4_msg(sb, KERN_ERR,
"Failed to create ea_inode_cache");
goto failed_mount_wq;
}
}
}
if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) &&
......@@ -4989,6 +4995,12 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
}
}
if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_NO_MBCACHE) {
ext4_msg(sb, KERN_ERR, "can't enable nombcache during remount");
err = -EINVAL;
goto restore_opts;
}
if ((sbi->s_mount_opt ^ old_opts.s_mount_opt) & EXT4_MOUNT_DAX) {
ext4_msg(sb, KERN_WARNING, "warning: refusing change of "
"dax flag with busy inodes while remounting");
......
......@@ -991,10 +991,13 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
set_nlink(ea_inode, 1);
ext4_orphan_del(handle, ea_inode);
hash = ext4_xattr_inode_get_hash(ea_inode);
mb_cache_entry_create(ea_inode_cache, GFP_NOFS, hash,
ea_inode->i_ino,
true /* reusable */);
if (ea_inode_cache) {
hash = ext4_xattr_inode_get_hash(ea_inode);
mb_cache_entry_create(ea_inode_cache,
GFP_NOFS, hash,
ea_inode->i_ino,
true /* reusable */);
}
}
} else {
WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld",
......@@ -1008,9 +1011,11 @@ static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
clear_nlink(ea_inode);
ext4_orphan_add(handle, ea_inode);
hash = ext4_xattr_inode_get_hash(ea_inode);
mb_cache_entry_delete(ea_inode_cache, hash,
ea_inode->i_ino);
if (ea_inode_cache) {
hash = ext4_xattr_inode_get_hash(ea_inode);
mb_cache_entry_delete(ea_inode_cache, hash,
ea_inode->i_ino);
}
}
}
......@@ -1194,7 +1199,9 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
* This must happen under buffer lock for
* ext4_xattr_block_set() to reliably detect freed block
*/
mb_cache_entry_delete(ea_block_cache, hash, bh->b_blocknr);
if (ea_block_cache)
mb_cache_entry_delete(ea_block_cache, hash,
bh->b_blocknr);
get_bh(bh);
unlock_buffer(bh);
......@@ -1214,11 +1221,13 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
if (ref == EXT4_XATTR_REFCOUNT_MAX - 1) {
struct mb_cache_entry *ce;
ce = mb_cache_entry_get(ea_block_cache, hash,
bh->b_blocknr);
if (ce) {
ce->e_reusable = 1;
mb_cache_entry_put(ea_block_cache, ce);
if (ea_block_cache) {
ce = mb_cache_entry_get(ea_block_cache, hash,
bh->b_blocknr);
if (ce) {
ce->e_reusable = 1;
mb_cache_entry_put(ea_block_cache, ce);
}
}
}
......@@ -1395,6 +1404,9 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
struct mb_cache *ea_inode_cache = EA_INODE_CACHE(inode);
void *ea_data;
if (!ea_inode_cache)
return NULL;
ce = mb_cache_entry_find_first(ea_inode_cache, hash);
if (!ce)
return NULL;
......@@ -1465,8 +1477,9 @@ static int ext4_xattr_inode_lookup_create(handle_t *handle, struct inode *inode,
return err;
}
mb_cache_entry_create(EA_INODE_CACHE(inode), GFP_NOFS, hash,
ea_inode->i_ino, true /* reusable */);
if (EA_INODE_CACHE(inode))
mb_cache_entry_create(EA_INODE_CACHE(inode), GFP_NOFS, hash,
ea_inode->i_ino, true /* reusable */);
*ret_inode = ea_inode;
return 0;
......@@ -1793,8 +1806,9 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
* ext4_xattr_block_set() to reliably detect modified
* block
*/
mb_cache_entry_delete(ea_block_cache, hash,
bs->bh->b_blocknr);
if (ea_block_cache)
mb_cache_entry_delete(ea_block_cache, hash,
bs->bh->b_blocknr);
ea_bdebug(bs->bh, "modifying in-place");
error = ext4_xattr_set_entry(i, s, handle, inode,
true /* is_block */);
......@@ -2883,6 +2897,8 @@ ext4_xattr_block_cache_insert(struct mb_cache *ea_block_cache,
EXT4_XATTR_REFCOUNT_MAX;
int error;
if (!ea_block_cache)
return;
error = mb_cache_entry_create(ea_block_cache, GFP_NOFS, hash,
bh->b_blocknr, reusable);
if (error) {
......@@ -2949,6 +2965,8 @@ ext4_xattr_block_cache_find(struct inode *inode,
struct mb_cache_entry *ce;
struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
if (!ea_block_cache)
return NULL;
if (!header->h_hash)
return NULL; /* never share */
ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment