Commit e2844cce authored by Filipe Manana's avatar Filipe Manana Committed by David Sterba

btrfs: remove inode_lock from struct btrfs_root and use xarray locks

Currently we use the spinlock inode_lock from struct btrfs_root to
serialize access to two different data structures:

1) The delayed inodes xarray (struct btrfs_root::delayed_nodes);
2) The inodes xarray (struct btrfs_root::inodes).

Instead of using our own lock, we can use the spinlock that is part of the
xarray implementation, by using the xa_lock() and xa_unlock() APIs and
using the xarray APIs with the double underscore prefix that don't take
the xarray locks and assume the caller is using xa_lock() and xa_unlock().

So remove the spinlock inode_lock from struct btrfs_root and use the
corresponding xarray locks. This brings 2 benefits:

1) We reduce the size of struct btrfs_root, from 1336 bytes down to
   1328 bytes on a 64 bits release kernel config;

2) We reduce lock contention by not using anymore the same lock for
   changing two different and unrelated xarrays.
Reviewed-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarFilipe Manana <fdmanana@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent d25f4ec1
......@@ -221,7 +221,6 @@ struct btrfs_root {
struct list_head root_list;
spinlock_t inode_lock;
/*
* Xarray that keeps track of in-memory inodes, protected by the lock
* @inode_lock.
......
......@@ -77,14 +77,14 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
return node;
}
spin_lock(&root->inode_lock);
xa_lock(&root->delayed_nodes);
node = xa_load(&root->delayed_nodes, ino);
if (node) {
if (btrfs_inode->delayed_node) {
refcount_inc(&node->refs); /* can be accessed */
BUG_ON(btrfs_inode->delayed_node != node);
spin_unlock(&root->inode_lock);
xa_unlock(&root->delayed_nodes);
return node;
}
......@@ -111,10 +111,10 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(
node = NULL;
}
spin_unlock(&root->inode_lock);
xa_unlock(&root->delayed_nodes);
return node;
}
spin_unlock(&root->inode_lock);
xa_unlock(&root->delayed_nodes);
return NULL;
}
......@@ -148,21 +148,21 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
kmem_cache_free(delayed_node_cache, node);
return ERR_PTR(-ENOMEM);
}
spin_lock(&root->inode_lock);
xa_lock(&root->delayed_nodes);
ptr = xa_load(&root->delayed_nodes, ino);
if (ptr) {
/* Somebody inserted it, go back and read it. */
spin_unlock(&root->inode_lock);
xa_unlock(&root->delayed_nodes);
kmem_cache_free(delayed_node_cache, node);
node = NULL;
goto again;
}
ptr = xa_store(&root->delayed_nodes, ino, node, GFP_ATOMIC);
ptr = __xa_store(&root->delayed_nodes, ino, node, GFP_ATOMIC);
ASSERT(xa_err(ptr) != -EINVAL);
ASSERT(xa_err(ptr) != -ENOMEM);
ASSERT(ptr == NULL);
btrfs_inode->delayed_node = node;
spin_unlock(&root->inode_lock);
xa_unlock(&root->delayed_nodes);
return node;
}
......@@ -275,14 +275,12 @@ static void __btrfs_release_delayed_node(
if (refcount_dec_and_test(&delayed_node->refs)) {
struct btrfs_root *root = delayed_node->root;
spin_lock(&root->inode_lock);
xa_erase(&root->delayed_nodes, delayed_node->inode_id);
/*
* Once our refcount goes to zero, nobody is allowed to bump it
* back up. We can delete it now.
*/
ASSERT(refcount_read(&delayed_node->refs) == 0);
xa_erase(&root->delayed_nodes, delayed_node->inode_id);
spin_unlock(&root->inode_lock);
kmem_cache_free(delayed_node_cache, delayed_node);
}
}
......@@ -2057,9 +2055,9 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
struct btrfs_delayed_node *node;
int count;
spin_lock(&root->inode_lock);
xa_lock(&root->delayed_nodes);
if (xa_empty(&root->delayed_nodes)) {
spin_unlock(&root->inode_lock);
xa_unlock(&root->delayed_nodes);
return;
}
......@@ -2076,7 +2074,7 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
if (count >= ARRAY_SIZE(delayed_nodes))
break;
}
spin_unlock(&root->inode_lock);
xa_unlock(&root->delayed_nodes);
index++;
for (int i = 0; i < count; i++) {
......
......@@ -674,7 +674,6 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
INIT_LIST_HEAD(&root->ordered_extents);
INIT_LIST_HEAD(&root->ordered_root);
INIT_LIST_HEAD(&root->reloc_dirty_list);
spin_lock_init(&root->inode_lock);
spin_lock_init(&root->delalloc_lock);
spin_lock_init(&root->ordered_extent_lock);
spin_lock_init(&root->accounting_lock);
......
......@@ -5509,9 +5509,7 @@ static int btrfs_add_inode_to_root(struct btrfs_inode *inode, bool prealloc)
return ret;
}
spin_lock(&root->inode_lock);
existing = xa_store(&root->inodes, ino, inode, GFP_ATOMIC);
spin_unlock(&root->inode_lock);
if (xa_is_err(existing)) {
ret = xa_err(existing);
......@@ -5531,16 +5529,16 @@ static void btrfs_del_inode_from_root(struct btrfs_inode *inode)
struct btrfs_inode *entry;
bool empty = false;
spin_lock(&root->inode_lock);
entry = xa_erase(&root->inodes, btrfs_ino(inode));
xa_lock(&root->inodes);
entry = __xa_erase(&root->inodes, btrfs_ino(inode));
if (entry == inode)
empty = xa_empty(&root->inodes);
spin_unlock(&root->inode_lock);
xa_unlock(&root->inodes);
if (empty && btrfs_root_refs(&root->root_item) == 0) {
spin_lock(&root->inode_lock);
xa_lock(&root->inodes);
empty = xa_empty(&root->inodes);
spin_unlock(&root->inode_lock);
xa_unlock(&root->inodes);
if (empty)
btrfs_add_dead_root(root);
}
......@@ -10874,7 +10872,7 @@ struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino)
struct btrfs_inode *inode;
unsigned long from = min_ino;
spin_lock(&root->inode_lock);
xa_lock(&root->inodes);
while (true) {
inode = xa_find(&root->inodes, &from, ULONG_MAX, XA_PRESENT);
if (!inode)
......@@ -10883,9 +10881,9 @@ struct btrfs_inode *btrfs_find_first_inode(struct btrfs_root *root, u64 min_ino)
break;
from = btrfs_ino(inode) + 1;
cond_resched_lock(&root->inode_lock);
cond_resched_lock(&root->inodes.xa_lock);
}
spin_unlock(&root->inode_lock);
xa_unlock(&root->inodes);
return inode;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment