Commit 945d8962 authored by Chris Mason's avatar Chris Mason

Merge branch 'cleanups' of git://repo.or.cz/linux-2.6/btrfs-unstable into inode_numbers

Conflicts:
	fs/btrfs/extent-tree.c
	fs/btrfs/free-space-cache.c
	fs/btrfs/inode.c
	fs/btrfs/tree-log.c
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parents 0d0ca30f 4ea02885
......@@ -288,7 +288,7 @@ int btrfs_acl_chmod(struct inode *inode)
return 0;
acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS);
if (IS_ERR(acl) || !acl)
if (IS_ERR_OR_NULL(acl))
return PTR_ERR(acl);
clone = posix_acl_clone(acl, GFP_KERNEL);
......
......@@ -333,7 +333,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
struct compressed_bio *cb;
unsigned long bytes_left;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
int page_index = 0;
int pg_index = 0;
struct page *page;
u64 first_byte = disk_start;
struct block_device *bdev;
......@@ -367,8 +367,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
/* create and submit bios for the compressed pages */
bytes_left = compressed_len;
for (page_index = 0; page_index < cb->nr_pages; page_index++) {
page = compressed_pages[page_index];
for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
page = compressed_pages[pg_index];
page->mapping = inode->i_mapping;
if (bio->bi_size)
ret = io_tree->ops->merge_bio_hook(page, 0,
......@@ -433,7 +433,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
struct compressed_bio *cb)
{
unsigned long end_index;
unsigned long page_index;
unsigned long pg_index;
u64 last_offset;
u64 isize = i_size_read(inode);
int ret;
......@@ -457,13 +457,13 @@ static noinline int add_ra_bio_pages(struct inode *inode,
end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
while (last_offset < compressed_end) {
page_index = last_offset >> PAGE_CACHE_SHIFT;
pg_index = last_offset >> PAGE_CACHE_SHIFT;
if (page_index > end_index)
if (pg_index > end_index)
break;
rcu_read_lock();
page = radix_tree_lookup(&mapping->page_tree, page_index);
page = radix_tree_lookup(&mapping->page_tree, pg_index);
rcu_read_unlock();
if (page) {
misses++;
......@@ -477,7 +477,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
if (!page)
break;
if (add_to_page_cache_lru(page, mapping, page_index,
if (add_to_page_cache_lru(page, mapping, pg_index,
GFP_NOFS)) {
page_cache_release(page);
goto next;
......@@ -561,7 +561,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
unsigned long compressed_len;
unsigned long nr_pages;
unsigned long page_index;
unsigned long pg_index;
struct page *page;
struct block_device *bdev;
struct bio *comp_bio;
......@@ -614,10 +614,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
for (page_index = 0; page_index < nr_pages; page_index++) {
cb->compressed_pages[page_index] = alloc_page(GFP_NOFS |
for (pg_index = 0; pg_index < nr_pages; pg_index++) {
cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
__GFP_HIGHMEM);
if (!cb->compressed_pages[page_index])
if (!cb->compressed_pages[pg_index])
goto fail2;
}
cb->nr_pages = nr_pages;
......@@ -635,8 +635,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
comp_bio->bi_end_io = end_compressed_bio_read;
atomic_inc(&cb->pending_bios);
for (page_index = 0; page_index < nr_pages; page_index++) {
page = cb->compressed_pages[page_index];
for (pg_index = 0; pg_index < nr_pages; pg_index++) {
page = cb->compressed_pages[pg_index];
page->mapping = inode->i_mapping;
page->index = em_start >> PAGE_CACHE_SHIFT;
......@@ -703,8 +703,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
return 0;
fail2:
for (page_index = 0; page_index < nr_pages; page_index++)
free_page((unsigned long)cb->compressed_pages[page_index]);
for (pg_index = 0; pg_index < nr_pages; pg_index++)
free_page((unsigned long)cb->compressed_pages[pg_index]);
kfree(cb->compressed_pages);
fail1:
......@@ -946,7 +946,7 @@ void btrfs_exit_compress(void)
int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
unsigned long total_out, u64 disk_start,
struct bio_vec *bvec, int vcnt,
unsigned long *page_index,
unsigned long *pg_index,
unsigned long *pg_offset)
{
unsigned long buf_offset;
......@@ -955,7 +955,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
unsigned long working_bytes = total_out - buf_start;
unsigned long bytes;
char *kaddr;
struct page *page_out = bvec[*page_index].bv_page;
struct page *page_out = bvec[*pg_index].bv_page;
/*
* start byte is the first byte of the page we're currently
......@@ -996,11 +996,11 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
/* check if we need to pick another page */
if (*pg_offset == PAGE_CACHE_SIZE) {
(*page_index)++;
if (*page_index >= vcnt)
(*pg_index)++;
if (*pg_index >= vcnt)
return 0;
page_out = bvec[*page_index].bv_page;
page_out = bvec[*pg_index].bv_page;
*pg_offset = 0;
start_byte = page_offset(page_out) - disk_start;
......
......@@ -37,7 +37,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
unsigned long total_out, u64 disk_start,
struct bio_vec *bvec, int vcnt,
unsigned long *page_index,
unsigned long *pg_index,
unsigned long *pg_offset);
int btrfs_submit_compressed_write(struct inode *inode, u64 start,
......
......@@ -102,7 +102,7 @@ void btrfs_free_path(struct btrfs_path *p)
{
if (!p)
return;
btrfs_release_path(NULL, p);
btrfs_release_path(p);
kmem_cache_free(btrfs_path_cachep, p);
}
......@@ -112,7 +112,7 @@ void btrfs_free_path(struct btrfs_path *p)
*
* It is safe to call this on paths that no locks or extent buffers held.
*/
noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
noinline void btrfs_release_path(struct btrfs_path *p)
{
int i;
......@@ -1323,7 +1323,7 @@ static noinline int reada_for_balance(struct btrfs_root *root,
ret = -EAGAIN;
/* release the whole path */
btrfs_release_path(root, path);
btrfs_release_path(path);
/* read the blocks */
if (block1)
......@@ -1470,7 +1470,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
return 0;
}
free_extent_buffer(tmp);
btrfs_release_path(NULL, p);
btrfs_release_path(p);
return -EIO;
}
}
......@@ -1489,7 +1489,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
if (p->reada)
reada_for_search(root, p, level, slot, key->objectid);
btrfs_release_path(NULL, p);
btrfs_release_path(p);
ret = -EAGAIN;
tmp = read_tree_block(root, blocknr, blocksize, 0);
......@@ -1558,7 +1558,7 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
}
b = p->nodes[level];
if (!b) {
btrfs_release_path(NULL, p);
btrfs_release_path(p);
goto again;
}
BUG_ON(btrfs_header_nritems(b) == 1);
......@@ -1748,7 +1748,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
if (!p->leave_spinning)
btrfs_set_path_blocking(p);
if (ret < 0)
btrfs_release_path(root, p);
btrfs_release_path(p);
return ret;
}
......@@ -3021,7 +3021,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
struct btrfs_file_extent_item);
extent_len = btrfs_file_extent_num_bytes(leaf, fi);
}
btrfs_release_path(root, path);
btrfs_release_path(path);
path->keep_locks = 1;
path->search_for_split = 1;
......@@ -3641,7 +3641,6 @@ int setup_items_for_insert(struct btrfs_trans_handle *trans,
ret = 0;
if (slot == 0) {
struct btrfs_disk_key disk_key;
btrfs_cpu_key_to_disk(&disk_key, cpu_key);
ret = fixup_low_keys(trans, root, path, &disk_key, 1);
}
......@@ -3943,7 +3942,7 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
else
return 1;
btrfs_release_path(root, path);
btrfs_release_path(path);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
return ret;
......@@ -4067,7 +4066,7 @@ int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
sret = btrfs_find_next_key(root, path, min_key, level,
cache_only, min_trans);
if (sret == 0) {
btrfs_release_path(root, path);
btrfs_release_path(path);
goto again;
} else {
goto out;
......@@ -4146,7 +4145,7 @@ int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
btrfs_node_key_to_cpu(c, &cur_key, slot);
orig_lowest = path->lowest_level;
btrfs_release_path(root, path);
btrfs_release_path(path);
path->lowest_level = level;
ret = btrfs_search_slot(NULL, root, &cur_key, path,
0, 0);
......@@ -4223,7 +4222,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
again:
level = 1;
next = NULL;
btrfs_release_path(root, path);
btrfs_release_path(path);
path->keep_locks = 1;
......@@ -4279,7 +4278,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
goto again;
if (ret < 0) {
btrfs_release_path(root, path);
btrfs_release_path(path);
goto done;
}
......@@ -4318,7 +4317,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
goto again;
if (ret < 0) {
btrfs_release_path(root, path);
btrfs_release_path(path);
goto done;
}
......
......@@ -746,12 +746,12 @@ struct btrfs_space_info {
*/
unsigned long reservation_progress;
int full:1; /* indicates that we cannot allocate any more
unsigned int full:1; /* indicates that we cannot allocate any more
chunks for this space */
int chunk_alloc:1; /* set if we are allocating a chunk */
unsigned int chunk_alloc:1; /* set if we are allocating a chunk */
int force_alloc; /* set if we need to force a chunk alloc for
this space */
unsigned int force_alloc; /* set if we need to force a chunk
alloc for this space */
struct list_head list;
......@@ -1463,26 +1463,12 @@ static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb,
return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr));
}
static inline void btrfs_set_stripe_offset_nr(struct extent_buffer *eb,
struct btrfs_chunk *c, int nr,
u64 val)
{
btrfs_set_stripe_offset(eb, btrfs_stripe_nr(c, nr), val);
}
static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb,
struct btrfs_chunk *c, int nr)
{
return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr));
}
static inline void btrfs_set_stripe_devid_nr(struct extent_buffer *eb,
struct btrfs_chunk *c, int nr,
u64 val)
{
btrfs_set_stripe_devid(eb, btrfs_stripe_nr(c, nr), val);
}
/* struct btrfs_block_group_item */
BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item,
used, 64);
......@@ -1540,14 +1526,6 @@ btrfs_inode_ctime(struct btrfs_inode_item *inode_item)
return (struct btrfs_timespec *)ptr;
}
static inline struct btrfs_timespec *
btrfs_inode_otime(struct btrfs_inode_item *inode_item)
{
unsigned long ptr = (unsigned long)inode_item;
ptr += offsetof(struct btrfs_inode_item, otime);
return (struct btrfs_timespec *)ptr;
}
BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64);
BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32);
......@@ -1898,33 +1876,6 @@ static inline u8 *btrfs_header_chunk_tree_uuid(struct extent_buffer *eb)
return (u8 *)ptr;
}
static inline u8 *btrfs_super_fsid(struct extent_buffer *eb)
{
unsigned long ptr = offsetof(struct btrfs_super_block, fsid);
return (u8 *)ptr;
}
static inline u8 *btrfs_header_csum(struct extent_buffer *eb)
{
unsigned long ptr = offsetof(struct btrfs_header, csum);
return (u8 *)ptr;
}
static inline struct btrfs_node *btrfs_buffer_node(struct extent_buffer *eb)
{
return NULL;
}
static inline struct btrfs_leaf *btrfs_buffer_leaf(struct extent_buffer *eb)
{
return NULL;
}
static inline struct btrfs_header *btrfs_buffer_header(struct extent_buffer *eb)
{
return NULL;
}
static inline int btrfs_is_leaf(struct extent_buffer *eb)
{
return btrfs_header_level(eb) == 0;
......@@ -2078,22 +2029,6 @@ static inline struct btrfs_root *btrfs_sb(struct super_block *sb)
return sb->s_fs_info;
}
static inline int btrfs_set_root_name(struct btrfs_root *root,
const char *name, int len)
{
/* if we already have a name just free it */
kfree(root->name);
root->name = kmalloc(len+1, GFP_KERNEL);
if (!root->name)
return -ENOMEM;
memcpy(root->name, name, len);
root->name[len] = '\0';
return 0;
}
static inline u32 btrfs_level_size(struct btrfs_root *root, int level)
{
if (level == 0)
......@@ -2138,12 +2073,9 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
u64 num_bytes, u64 *refs, u64 *flags);
int btrfs_pin_extent(struct btrfs_root *root,
u64 bytenr, u64 num, int reserved);
int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *leaf);
int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 objectid, u64 offset, u64 bytenr);
int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy);
struct btrfs_block_group_cache *btrfs_lookup_block_group(
struct btrfs_fs_info *info,
u64 bytenr);
......@@ -2320,7 +2252,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *parent,
int start_slot, int cache_only, u64 *last_ret,
struct btrfs_key *progress);
void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p);
void btrfs_release_path(struct btrfs_path *p);
struct btrfs_path *btrfs_alloc_path(void);
void btrfs_free_path(struct btrfs_path *p);
void btrfs_set_path_blocking(struct btrfs_path *p);
......@@ -2343,11 +2275,6 @@ int setup_items_for_insert(struct btrfs_trans_handle *trans,
u32 total_data, u32 total_size, int nr);
int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_key *key, void *data, u32 data_size);
int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_key *cpu_key, u32 *data_size,
int nr);
int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
......@@ -2393,8 +2320,6 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
*item);
int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
btrfs_root_item *item, struct btrfs_key *key);
int btrfs_search_root(struct btrfs_root *root, u64 search_start,
u64 *found_objectid);
int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
int btrfs_set_root_node(struct btrfs_root_item *item,
......@@ -2493,15 +2418,10 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_ordered_sum *sums);
int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
struct bio *bio, u64 file_start, int contig);
int btrfs_csum_file_bytes(struct btrfs_root *root, struct inode *inode,
u64 start, unsigned long len);
struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
u64 bytenr, int cow);
int btrfs_csum_truncate(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_path *path,
u64 isize);
int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start,
u64 end, struct list_head *list);
/* inode.c */
......@@ -2532,8 +2452,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
u32 min_type);
int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput,
int sync);
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
struct extent_state **cached_state);
int btrfs_writepages(struct address_space *mapping,
......@@ -2550,7 +2468,6 @@ unsigned long btrfs_force_ra(struct address_space *mapping,
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page);
void btrfs_evict_inode(struct inode *inode);
void btrfs_put_inode(struct inode *inode);
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
void btrfs_dirty_inode(struct inode *inode);
struct inode *btrfs_alloc_inode(struct super_block *sb);
......@@ -2561,10 +2478,8 @@ void btrfs_destroy_cachep(void);
long btrfs_ioctl_trans_end(struct file *file);
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
struct btrfs_root *root, int *was_new);
int btrfs_commit_write(struct file *file, struct page *page,
unsigned from, unsigned to);
struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
size_t page_offset, u64 start, u64 end,
size_t pg_offset, u64 start, u64 end,
int create);
int btrfs_update_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
......@@ -2601,7 +2516,6 @@ void btrfs_inherit_iflags(struct inode *inode, struct inode *dir);
int btrfs_sync_file(struct file *file, int datasync);
int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
int skip_pinned);
int btrfs_check_file(struct btrfs_root *root, struct inode *inode);
extern const struct file_operations btrfs_file_operations;
int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
u64 start, u64 end, u64 *hint_byte, int drop_cache);
......@@ -2621,10 +2535,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
/* sysfs.c */
int btrfs_init_sysfs(void);
void btrfs_exit_sysfs(void);
int btrfs_sysfs_add_super(struct btrfs_fs_info *fs);
int btrfs_sysfs_add_root(struct btrfs_root *root);
void btrfs_sysfs_del_root(struct btrfs_root *root);
void btrfs_sysfs_del_super(struct btrfs_fs_info *root);
/* xattr.c */
ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
......
......@@ -813,7 +813,7 @@ static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
ret = btrfs_insert_delayed_item(trans, root, path, curr);
if (ret < 0) {
btrfs_release_path(root, path);
btrfs_release_path(path);
goto insert_end;
}
......@@ -827,7 +827,7 @@ static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
btrfs_release_delayed_item(prev);
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_release_path(root, path);
btrfs_release_path(path);
mutex_unlock(&node->mutex);
goto do_again;
......@@ -925,7 +925,7 @@ static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
curr = __btrfs_next_delayed_item(prev);
btrfs_release_delayed_item(prev);
ret = 0;
btrfs_release_path(root, path);
btrfs_release_path(path);
if (curr)
goto do_again;
else
......@@ -933,12 +933,12 @@ static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
}
btrfs_batch_delete_items(trans, root, path, curr);
btrfs_release_path(root, path);
btrfs_release_path(path);
mutex_unlock(&node->mutex);
goto do_again;
delete_fail:
btrfs_release_path(root, path);
btrfs_release_path(path);
mutex_unlock(&node->mutex);
return ret;
}
......@@ -982,7 +982,7 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
key.offset = 0;
ret = btrfs_lookup_inode(trans, root, path, &key, 1);
if (ret > 0) {
btrfs_release_path(root, path);
btrfs_release_path(path);
mutex_unlock(&node->mutex);
return -ENOENT;
} else if (ret < 0) {
......@@ -997,7 +997,7 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
sizeof(struct btrfs_inode_item));
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(root, path);
btrfs_release_path(path);
btrfs_delayed_inode_release_metadata(root, node);
btrfs_release_delayed_inode(node);
......
......@@ -280,44 +280,6 @@ int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
return 1;
}
/*
* This checks to see if there are any delayed refs in the
* btree for a given bytenr. It returns one if it finds any
* and zero otherwise.
*
* If it only finds a head node, it returns 0.
*
* The idea is to use this when deciding if you can safely delete an
* extent from the extent allocation tree. There may be a pending
* ref in the rbtree that adds or removes references, so as long as this
* returns one you need to leave the BTRFS_EXTENT_ITEM in the extent
* allocation tree.
*/
int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr)
{
struct btrfs_delayed_ref_node *ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct rb_node *prev_node;
int ret = 0;
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
if (ref) {
prev_node = rb_prev(&ref->rb_node);
if (!prev_node)
goto out;
ref = rb_entry(prev_node, struct btrfs_delayed_ref_node,
rb_node);
if (ref->bytenr == bytenr)
ret = 1;
}
out:
spin_unlock(&delayed_refs->lock);
return ret;
}
/*
* helper function to update an extent delayed ref in the
* rbtree. existing and update must both have the same
......@@ -747,79 +709,3 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
return btrfs_delayed_node_to_head(ref);
return NULL;
}
/*
* add a delayed ref to the tree. This does all of the accounting required
* to make sure the delayed ref is eventually processed before this
* transaction commits.
*
* The main point of this call is to add and remove a backreference in a single
* shot, taking the lock only once, and only searching for the head node once.
*
* It is the same as doing a ref add and delete in two separate calls.
*/
#if 0
int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 orig_parent,
u64 parent, u64 orig_ref_root, u64 ref_root,
u64 orig_ref_generation, u64 ref_generation,
u64 owner_objectid, int pin)
{
struct btrfs_delayed_ref *ref;
struct btrfs_delayed_ref *old_ref;
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
int ret;
ref = kmalloc(sizeof(*ref), GFP_NOFS);
if (!ref)
return -ENOMEM;
old_ref = kmalloc(sizeof(*old_ref), GFP_NOFS);
if (!old_ref) {
kfree(ref);
return -ENOMEM;
}
/*
* the parent = 0 case comes from cases where we don't actually
* know the parent yet. It will get updated later via a add/drop
* pair.
*/
if (parent == 0)
parent = bytenr;
if (orig_parent == 0)
orig_parent = bytenr;
head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
if (!head_ref) {
kfree(ref);
kfree(old_ref);
return -ENOMEM;
}
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
/*
* insert both the head node and the new ref without dropping
* the spin lock
*/
ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes,
(u64)-1, 0, 0, 0,
BTRFS_UPDATE_DELAYED_HEAD, 0);
BUG_ON(ret);
ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes,
parent, ref_root, ref_generation,
owner_objectid, BTRFS_ADD_DELAYED_REF, 0);
BUG_ON(ret);
ret = __btrfs_add_delayed_ref(trans, &old_ref->node, bytenr, num_bytes,
orig_parent, orig_ref_root,
orig_ref_generation, owner_objectid,
BTRFS_DROP_DELAYED_REF, pin);
BUG_ON(ret);
spin_unlock(&delayed_refs->lock);
return 0;
}
#endif
......@@ -166,12 +166,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr);
int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 orig_parent,
u64 parent, u64 orig_ref_root, u64 ref_root,
u64 orig_ref_generation, u64 ref_generation,
u64 owner_objectid, int pin);
int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head);
int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
......
......@@ -176,7 +176,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
ret = 0;
goto out_free;
}
btrfs_release_path(root, path);
btrfs_release_path(path);
ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir,
&disk_key, type, index);
......
......@@ -29,6 +29,7 @@
#include <linux/crc32c.h>
#include <linux/slab.h>
#include <linux/migrate.h>
#include <linux/ratelimit.h>
#include <asm/unaligned.h>
#include "compat.h"
#include "ctree.h"
......@@ -138,7 +139,7 @@ static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = {
* that covers the entire device
*/
static struct extent_map *btree_get_extent(struct inode *inode,
struct page *page, size_t page_offset, u64 start, u64 len,
struct page *page, size_t pg_offset, u64 start, u64 len,
int create)
{
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
......@@ -155,7 +156,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
}
read_unlock(&em_tree->lock);
em = alloc_extent_map(GFP_NOFS);
em = alloc_extent_map();
if (!em) {
em = ERR_PTR(-ENOMEM);
goto out;
......@@ -255,14 +256,12 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
memcpy(&found, result, csum_size);
read_extent_buffer(buf, &val, 0, csum_size);
if (printk_ratelimit()) {
printk(KERN_INFO "btrfs: %s checksum verify "
printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
"failed on %llu wanted %X found %X "
"level %d\n",
root->fs_info->sb->s_id,
(unsigned long long)buf->start, val, found,
btrfs_header_level(buf));
}
if (result != (char *)&inline_result)
kfree(result);
return 1;
......@@ -297,13 +296,11 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
ret = 0;
goto out;
}
if (printk_ratelimit()) {
printk("parent transid verify failed on %llu wanted %llu "
printk_ratelimited("parent transid verify failed on %llu wanted %llu "
"found %llu\n",
(unsigned long long)eb->start,
(unsigned long long)parent_transid,
(unsigned long long)btrfs_header_generation(eb));
}
ret = 1;
clear_extent_buffer_uptodate(io_tree, eb, &cached_state);
out:
......@@ -381,7 +378,7 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
len = page->private >> 2;
WARN_ON(len == 0);
eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
eb = alloc_extent_buffer(tree, start, len, page);
if (eb == NULL) {
WARN_ON(1);
goto out;
......@@ -526,7 +523,7 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
len = page->private >> 2;
WARN_ON(len == 0);
eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
eb = alloc_extent_buffer(tree, start, len, page);
if (eb == NULL) {
ret = -EIO;
goto out;
......@@ -534,12 +531,10 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
found_start = btrfs_header_bytenr(eb);
if (found_start != start) {
if (printk_ratelimit()) {
printk(KERN_INFO "btrfs bad tree block start "
printk_ratelimited(KERN_INFO "btrfs bad tree block start "
"%llu %llu\n",
(unsigned long long)found_start,
(unsigned long long)eb->start);
}
ret = -EIO;
goto err;
}
......@@ -551,10 +546,8 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
goto err;
}
if (check_tree_block_fsid(root, eb)) {
if (printk_ratelimit()) {
printk(KERN_INFO "btrfs bad fsid on block %llu\n",
printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
(unsigned long long)eb->start);
}
ret = -EIO;
goto err;
}
......@@ -651,12 +644,6 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
return 256 * limit;
}
int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
{
return atomic_read(&info->nr_async_bios) >
btrfs_async_submit_limit(info);
}
static void run_one_async_start(struct btrfs_work *work)
{
struct async_submit_bio *async;
......@@ -964,7 +951,7 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
struct inode *btree_inode = root->fs_info->btree_inode;
struct extent_buffer *eb;
eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
bytenr, blocksize, GFP_NOFS);
bytenr, blocksize);
return eb;
}
......@@ -975,7 +962,7 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
struct extent_buffer *eb;
eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
bytenr, blocksize, NULL, GFP_NOFS);
bytenr, blocksize, NULL);
return eb;
}
......@@ -1082,7 +1069,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
root->log_transid = 0;
root->last_log_commit = 0;
extent_io_tree_init(&root->dirty_log_pages,
fs_info->btree_inode->i_mapping, GFP_NOFS);
fs_info->btree_inode->i_mapping);
memset(&root->root_key, 0, sizeof(root->root_key));
memset(&root->root_item, 0, sizeof(root->root_item));
......@@ -1285,21 +1272,6 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
return root;
}
struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
u64 root_objectid)
{
struct btrfs_root *root;
if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
return fs_info->tree_root;
if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
return fs_info->extent_root;
root = radix_tree_lookup(&fs_info->fs_roots_radix,
(unsigned long)root_objectid);
return root;
}
struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
struct btrfs_key *location)
{
......@@ -1384,41 +1356,6 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
return ERR_PTR(ret);
}
struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
struct btrfs_key *location,
const char *name, int namelen)
{
return btrfs_read_fs_root_no_name(fs_info, location);
#if 0
struct btrfs_root *root;
int ret;
root = btrfs_read_fs_root_no_name(fs_info, location);
if (!root)
return NULL;
if (root->in_sysfs)
return root;
ret = btrfs_set_root_name(root, name, namelen);
if (ret) {
free_extent_buffer(root->node);
kfree(root);
return ERR_PTR(ret);
}
ret = btrfs_sysfs_add_root(root);
if (ret) {
free_extent_buffer(root->node);
kfree(root->name);
kfree(root);
return ERR_PTR(ret);
}
root->in_sysfs = 1;
return root;
#endif
}
static int btrfs_congested_fn(void *congested_data, int bdi_bits)
{
struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
......@@ -1626,7 +1563,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
GFP_NOFS);
struct btrfs_root *tree_root = btrfs_sb(sb);
struct btrfs_fs_info *fs_info = tree_root->fs_info;
struct btrfs_fs_info *fs_info = NULL;
struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
GFP_NOFS);
struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
......@@ -1638,11 +1575,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
struct btrfs_super_block *disk_super;
if (!extent_root || !tree_root || !fs_info ||
if (!extent_root || !tree_root || !tree_root->fs_info ||
!chunk_root || !dev_root || !csum_root) {
err = -ENOMEM;
goto fail;
}
fs_info = tree_root->fs_info;
ret = init_srcu_struct(&fs_info->subvol_srcu);
if (ret) {
......@@ -1733,10 +1671,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
fs_info->btree_inode->i_mapping,
GFP_NOFS);
extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
GFP_NOFS);
fs_info->btree_inode->i_mapping);
extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
......@@ -1750,9 +1686,9 @@ struct btrfs_root *open_ctree(struct super_block *sb,
fs_info->block_group_cache_tree = RB_ROOT;
extent_io_tree_init(&fs_info->freed_extents[0],
fs_info->btree_inode->i_mapping, GFP_NOFS);
fs_info->btree_inode->i_mapping);
extent_io_tree_init(&fs_info->freed_extents[1],
fs_info->btree_inode->i_mapping, GFP_NOFS);
fs_info->btree_inode->i_mapping);
fs_info->pinned_extents = &fs_info->freed_extents[0];
fs_info->do_barriers = 1;
......@@ -2194,11 +2130,9 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
if (uptodate) {
set_buffer_uptodate(bh);
} else {
if (printk_ratelimit()) {
printk(KERN_WARNING "lost page write due to "
printk_ratelimited(KERN_WARNING "lost page write due to "
"I/O error on %s\n",
bdevname(bh->b_bdev, b));
}
/* note, we dont' set_buffer_write_io_error because we have
* our own ways of dealing with the IO errors
*/
......@@ -2756,7 +2690,7 @@ int btree_lock_page_hook(struct page *page)
goto out;
len = page->private >> 2;
eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS);
eb = find_extent_buffer(io_tree, bytenr, len);
if (!eb)
goto out;
......
......@@ -55,36 +55,20 @@ int btrfs_commit_super(struct btrfs_root *root);
int btrfs_error_commit_super(struct btrfs_root *root);
struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
u64 bytenr, u32 blocksize);
struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
u64 root_objectid);
struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
struct btrfs_key *location,
const char *name, int namelen);
struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
struct btrfs_key *location);
struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
struct btrfs_key *location);
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
int btrfs_insert_dev_radix(struct btrfs_root *root,
struct block_device *bdev,
u64 device_id,
u64 block_start,
u64 num_blocks);
void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
void btrfs_mark_buffer_dirty_nonblocking(struct extent_buffer *buf);
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid);
int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
int wait_on_tree_block_writeback(struct btrfs_root *root,
struct extent_buffer *buf);
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len);
void btrfs_csum_final(u32 crc, char *result);
int btrfs_open_device(struct btrfs_device *dev);
int btrfs_verify_block_csum(struct btrfs_root *root,
struct extent_buffer *buf);
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
int metadata);
int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
......@@ -92,8 +76,6 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
unsigned long bio_flags, u64 bio_offset,
extent_submit_bio_hook_t *submit_bio_start,
extent_submit_bio_hook_t *submit_bio_done);
int btrfs_congested_async(struct btrfs_fs_info *info, int iodone);
unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
int btrfs_write_tree_block(struct extent_buffer *buf);
int btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
......
This diff is collapsed.
This diff is collapsed.
......@@ -153,23 +153,14 @@ static inline int extent_compress_type(unsigned long bio_flags)
struct extent_map_tree;
static inline struct extent_state *extent_state_next(struct extent_state *state)
{
struct rb_node *node;
node = rb_next(&state->rb_node);
if (!node)
return NULL;
return rb_entry(node, struct extent_state, rb_node);
}
typedef struct extent_map *(get_extent_t)(struct inode *inode,
struct page *page,
size_t page_offset,
size_t pg_offset,
u64 start, u64 len,
int create);
void extent_io_tree_init(struct extent_io_tree *tree,
struct address_space *mapping, gfp_t mask);
struct address_space *mapping);
int try_release_extent_mapping(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page,
gfp_t mask);
......@@ -215,14 +206,8 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start,
u64 end, gfp_t mask);
int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached_state, gfp_t mask);
int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
u64 *start_ret, u64 *end_ret, int bits);
struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
......@@ -243,28 +228,17 @@ int extent_readpages(struct extent_io_tree *tree,
struct address_space *mapping,
struct list_head *pages, unsigned nr_pages,
get_extent_t get_extent);
int extent_prepare_write(struct extent_io_tree *tree,
struct inode *inode, struct page *page,
unsigned from, unsigned to, get_extent_t *get_extent);
int extent_commit_write(struct extent_io_tree *tree,
struct inode *inode, struct page *page,
unsigned from, unsigned to);
sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
get_extent_t *get_extent);
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len, get_extent_t *get_extent);
int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end);
int set_state_private(struct extent_io_tree *tree, u64 start, u64 private);
int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
void set_page_extent_mapped(struct page *page);
struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
u64 start, unsigned long len,
struct page *page0,
gfp_t mask);
struct page *page0);
struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
u64 start, unsigned long len,
gfp_t mask);
u64 start, unsigned long len);
void free_extent_buffer(struct extent_buffer *eb);
int read_extent_buffer_pages(struct extent_io_tree *tree,
struct extent_buffer *eb, u64 start, int wait,
......@@ -292,16 +266,11 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_offset, unsigned long len);
void memset_extent_buffer(struct extent_buffer *eb, char c,
unsigned long start, unsigned long len);
int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
struct extent_buffer *eb);
int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end);
int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits);
int clear_extent_buffer_dirty(struct extent_io_tree *tree,
struct extent_buffer *eb);
int set_extent_buffer_dirty(struct extent_io_tree *tree,
struct extent_buffer *eb);
int test_extent_buffer_dirty(struct extent_io_tree *tree,
struct extent_buffer *eb);
int set_extent_buffer_uptodate(struct extent_io_tree *tree,
struct extent_buffer *eb);
int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
......@@ -319,7 +288,6 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
unsigned long *map_start,
unsigned long *map_len, int km);
void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km);
int release_extent_buffer_tail_pages(struct extent_buffer *eb);
int extent_range_uptodate(struct extent_io_tree *tree,
u64 start, u64 end);
int extent_clear_unlock_delalloc(struct inode *inode,
......
......@@ -28,12 +28,11 @@ void extent_map_exit(void)
/**
* extent_map_tree_init - initialize extent map tree
* @tree: tree to initialize
* @mask: flags for memory allocations during tree operations
*
* Initialize the extent tree @tree. Should be called for each new inode
* or other user of the extent_map interface.
*/
void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
void extent_map_tree_init(struct extent_map_tree *tree)
{
tree->map = RB_ROOT;
rwlock_init(&tree->lock);
......@@ -41,16 +40,15 @@ void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
/**
* alloc_extent_map - allocate new extent map structure
* @mask: memory allocation flags
*
* Allocate a new extent_map structure. The new structure is
* returned with a reference count of one and needs to be
* freed using free_extent_map()
*/
struct extent_map *alloc_extent_map(gfp_t mask)
struct extent_map *alloc_extent_map(void)
{
struct extent_map *em;
em = kmem_cache_alloc(extent_map_cache, mask);
em = kmem_cache_alloc(extent_map_cache, GFP_NOFS);
if (!em)
return NULL;
em->in_tree = 0;
......
......@@ -49,14 +49,14 @@ static inline u64 extent_map_block_end(struct extent_map *em)
return em->block_start + em->block_len;
}
void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask);
void extent_map_tree_init(struct extent_map_tree *tree);
struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len);
int add_extent_mapping(struct extent_map_tree *tree,
struct extent_map *em);
int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
struct extent_map *alloc_extent_map(gfp_t mask);
struct extent_map *alloc_extent_map(void);
void free_extent_map(struct extent_map *em);
int __init extent_map_init(void);
void extent_map_exit(void);
......
......@@ -193,7 +193,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
u32 item_size;
if (item)
btrfs_release_path(root, path);
btrfs_release_path(path);
item = btrfs_lookup_csum(NULL, root->fs_info->csum_root,
path, disk_bytenr, 0);
if (IS_ERR(item)) {
......@@ -214,7 +214,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
(unsigned long long)offset);
}
item = NULL;
btrfs_release_path(root, path);
btrfs_release_path(path);
goto found;
}
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
......@@ -632,7 +632,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
if (key.offset < bytenr)
break;
}
btrfs_release_path(root, path);
btrfs_release_path(path);
}
out:
btrfs_free_path(path);
......@@ -723,7 +723,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
* at this point, we know the tree has an item, but it isn't big
* enough yet to put our csum in. Grow it
*/
btrfs_release_path(root, path);
btrfs_release_path(path);
ret = btrfs_search_slot(trans, root, &file_key, path,
csum_size, 1);
if (ret < 0)
......@@ -767,7 +767,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
}
insert:
btrfs_release_path(root, path);
btrfs_release_path(path);
csum_offset = 0;
if (found_next) {
u64 tmp = total_bytes + root->sectorsize;
......@@ -851,7 +851,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
}
btrfs_mark_buffer_dirty(path->nodes[0]);
if (total_bytes < sums->len) {
btrfs_release_path(root, path);
btrfs_release_path(path);
cond_resched();
goto again;
}
......
......@@ -191,9 +191,9 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
}
while (1) {
if (!split)
split = alloc_extent_map(GFP_NOFS);
split = alloc_extent_map();
if (!split2)
split2 = alloc_extent_map(GFP_NOFS);
split2 = alloc_extent_map();
BUG_ON(!split || !split2);
write_lock(&em_tree->lock);
......@@ -377,7 +377,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
search_start = max(key.offset, start);
if (recow) {
btrfs_release_path(root, path);
btrfs_release_path(path);
continue;
}
......@@ -394,7 +394,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
ret = btrfs_duplicate_item(trans, root, path,
&new_key);
if (ret == -EAGAIN) {
btrfs_release_path(root, path);
btrfs_release_path(path);
continue;
}
if (ret < 0)
......@@ -517,7 +517,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
del_nr = 0;
del_slot = 0;
btrfs_release_path(root, path);
btrfs_release_path(path);
continue;
}
......@@ -682,7 +682,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
new_key.offset = split;
ret = btrfs_duplicate_item(trans, root, path, &new_key);
if (ret == -EAGAIN) {
btrfs_release_path(root, path);
btrfs_release_path(path);
goto again;
}
BUG_ON(ret < 0);
......@@ -722,7 +722,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
ino, bytenr, orig_offset,
&other_start, &other_end)) {
if (recow) {
btrfs_release_path(root, path);
btrfs_release_path(path);
goto again;
}
extent_end = other_end;
......@@ -739,7 +739,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
ino, bytenr, orig_offset,
&other_start, &other_end)) {
if (recow) {
btrfs_release_path(root, path);
btrfs_release_path(path);
goto again;
}
key.offset = other_start;
......@@ -1376,7 +1376,7 @@ static long btrfs_fallocate(struct file *file, int mode,
while (1) {
em = btrfs_get_extent(inode, NULL, 0, cur_offset,
alloc_end - cur_offset, 0);
BUG_ON(IS_ERR(em) || !em);
BUG_ON(IS_ERR_OR_NULL(em));
last_byte = min(extent_map_end(em), alloc_end);
last_byte = (last_byte + mask) & ~mask;
if (em->block_start == EXTENT_MAP_HOLE ||
......
......@@ -53,7 +53,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
if (ret < 0)
return ERR_PTR(ret);
if (ret > 0) {
btrfs_release_path(root, path);
btrfs_release_path(path);
return ERR_PTR(-ENOENT);
}
......@@ -62,7 +62,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
struct btrfs_free_space_header);
btrfs_free_space_key(leaf, header, &disk_key);
btrfs_disk_key_to_cpu(&location, &disk_key);
btrfs_release_path(root, path);
btrfs_release_path(path);
inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
if (!inode)
......@@ -140,7 +140,7 @@ int __create_free_space_inode(struct btrfs_root *root,
btrfs_set_inode_transid(leaf, inode_item, trans->transid);
btrfs_set_inode_block_group(leaf, inode_item, offset);
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(root, path);
btrfs_release_path(path);
key.objectid = BTRFS_FREE_SPACE_OBJECTID;
key.offset = offset;
......@@ -149,7 +149,7 @@ int __create_free_space_inode(struct btrfs_root *root,
ret = btrfs_insert_empty_item(trans, root, path, &key,
sizeof(struct btrfs_free_space_header));
if (ret < 0) {
btrfs_release_path(root, path);
btrfs_release_path(path);
return ret;
}
leaf = path->nodes[0];
......@@ -158,7 +158,7 @@ int __create_free_space_inode(struct btrfs_root *root,
memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
btrfs_set_free_space_key(leaf, header, &disk_key);
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(root, path);
btrfs_release_path(path);
return 0;
}
......@@ -266,7 +266,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
if (ret < 0)
goto out;
else if (ret > 0) {
btrfs_release_path(root, path);
btrfs_release_path(path);
ret = 0;
goto out;
}
......@@ -279,7 +279,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
num_entries = btrfs_free_space_entries(leaf, header);
num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
generation = btrfs_free_space_generation(leaf, header);
btrfs_release_path(root, path);
btrfs_release_path(path);
if (BTRFS_I(inode)->generation != generation) {
printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
......@@ -842,7 +842,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING, 0, 0, NULL,
GFP_NOFS);
btrfs_release_path(root, path);
btrfs_release_path(path);
goto out_free;
}
}
......@@ -852,7 +852,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
btrfs_set_free_space_generation(leaf, header, trans->transid);
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(root, path);
btrfs_release_path(path);
ret = 1;
......@@ -1504,7 +1504,7 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
return ret;
}
bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info, bool update_stat)
{
struct btrfs_free_space *left_info;
......@@ -1984,8 +1984,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
while(1) {
if (entry->bytes < bytes ||
(!entry->bitmap && entry->offset < min_start)) {
struct rb_node *node;
node = rb_next(&entry->offset_index);
if (!node)
break;
......@@ -1999,7 +1997,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
cluster, entry, bytes,
min_start);
if (ret == 0) {
struct rb_node *node;
node = rb_next(&entry->offset_index);
if (!node)
break;
......
......@@ -86,7 +86,7 @@ static int caching_kthread(void *data)
* in the next search.
*/
btrfs_item_key_to_cpu(leaf, &key, 0);
btrfs_release_path(root, path);
btrfs_release_path(path);
root->cache_progress = last;
mutex_unlock(&root->fs_commit_mutex);
schedule_timeout(1);
......
This diff is collapsed.
......@@ -1402,7 +1402,7 @@ static noinline int search_ioctl(struct inode *inode,
}
ret = copy_to_sk(root, path, &key, sk, args->buf,
&sk_offset, &num_found);
btrfs_release_path(root, path);
btrfs_release_path(path);
if (ret || num_found >= sk->nr_items)
break;
......@@ -1509,7 +1509,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
break;
btrfs_release_path(root, path);
btrfs_release_path(path);
key.objectid = key.offset;
key.offset = (u64)-1;
dirid = key.objectid;
......@@ -1988,7 +1988,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
datal = btrfs_file_extent_ram_bytes(leaf,
extent);
}
btrfs_release_path(root, path);
btrfs_release_path(path);
if (key.offset + datal <= off ||
key.offset >= off+len)
......@@ -2098,7 +2098,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
}
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(root, path);
btrfs_release_path(path);
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
......@@ -2119,12 +2119,12 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
btrfs_end_transaction(trans, root);
}
next:
btrfs_release_path(root, path);
btrfs_release_path(path);
key.offset++;
}
ret = 0;
out:
btrfs_release_path(root, path);
btrfs_release_path(path);
unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
out_unlock:
mutex_unlock(&src->i_mutex);
......
......@@ -185,31 +185,6 @@ int btrfs_tree_lock(struct extent_buffer *eb)
return 0;
}
/*
* Very quick trylock, this does not spin or schedule. It returns
* 1 with the spinlock held if it was able to take the lock, or it
* returns zero if it was unable to take the lock.
*
* After this call, scheduling is not safe without first calling
* btrfs_set_lock_blocking()
*/
int btrfs_try_tree_lock(struct extent_buffer *eb)
{
if (spin_trylock(&eb->lock)) {
if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
/*
* we've got the spinlock, but the real owner is
* blocking. Drop the spinlock and return failure
*/
spin_unlock(&eb->lock);
return 0;
}
return 1;
}
/* someone else has the spinlock giveup */
return 0;
}
int btrfs_tree_unlock(struct extent_buffer *eb)
{
/*
......
......@@ -21,8 +21,6 @@
int btrfs_tree_lock(struct extent_buffer *eb);
int btrfs_tree_unlock(struct extent_buffer *eb);
int btrfs_try_tree_lock(struct extent_buffer *eb);
int btrfs_try_spin_lock(struct extent_buffer *eb);
void btrfs_set_lock_blocking(struct extent_buffer *eb);
......
......@@ -23,56 +23,6 @@
#include "ref-cache.h"
#include "transaction.h"
/*
* leaf refs are used to cache the information about which extents
* a given leaf has references on. This allows us to process that leaf
* in btrfs_drop_snapshot without needing to read it back from disk.
*/
/*
* kmalloc a leaf reference struct and update the counters for the
* total ref cache size
*/
struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root,
int nr_extents)
{
struct btrfs_leaf_ref *ref;
size_t size = btrfs_leaf_ref_size(nr_extents);
ref = kmalloc(size, GFP_NOFS);
if (ref) {
spin_lock(&root->fs_info->ref_cache_lock);
root->fs_info->total_ref_cache_size += size;
spin_unlock(&root->fs_info->ref_cache_lock);
memset(ref, 0, sizeof(*ref));
atomic_set(&ref->usage, 1);
INIT_LIST_HEAD(&ref->list);
}
return ref;
}
/*
* free a leaf reference struct and update the counters for the
* total ref cache size
*/
void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
{
if (!ref)
return;
WARN_ON(atomic_read(&ref->usage) == 0);
if (atomic_dec_and_test(&ref->usage)) {
size_t size = btrfs_leaf_ref_size(ref->nritems);
BUG_ON(ref->in_tree);
kfree(ref);
spin_lock(&root->fs_info->ref_cache_lock);
root->fs_info->total_ref_cache_size -= size;
spin_unlock(&root->fs_info->ref_cache_lock);
}
}
static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
struct rb_node *node)
{
......@@ -116,117 +66,3 @@ static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
}
return NULL;
}
int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
int shared)
{
struct btrfs_leaf_ref *ref = NULL;
struct btrfs_leaf_ref_tree *tree = root->ref_tree;
if (shared)
tree = &root->fs_info->shared_ref_tree;
if (!tree)
return 0;
spin_lock(&tree->lock);
while (!list_empty(&tree->list)) {
ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list);
BUG_ON(ref->tree != tree);
if (ref->root_gen > max_root_gen)
break;
if (!xchg(&ref->in_tree, 0)) {
cond_resched_lock(&tree->lock);
continue;
}
rb_erase(&ref->rb_node, &tree->root);
list_del_init(&ref->list);
spin_unlock(&tree->lock);
btrfs_free_leaf_ref(root, ref);
cond_resched();
spin_lock(&tree->lock);
}
spin_unlock(&tree->lock);
return 0;
}
/*
* find the leaf ref for a given extent. This returns the ref struct with
* a usage reference incremented
*/
struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
u64 bytenr)
{
struct rb_node *rb;
struct btrfs_leaf_ref *ref = NULL;
struct btrfs_leaf_ref_tree *tree = root->ref_tree;
again:
if (tree) {
spin_lock(&tree->lock);
rb = tree_search(&tree->root, bytenr);
if (rb)
ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node);
if (ref)
atomic_inc(&ref->usage);
spin_unlock(&tree->lock);
if (ref)
return ref;
}
if (tree != &root->fs_info->shared_ref_tree) {
tree = &root->fs_info->shared_ref_tree;
goto again;
}
return NULL;
}
/*
* add a fully filled in leaf ref struct
* remove all the refs older than a given root generation
*/
int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
int shared)
{
int ret = 0;
struct rb_node *rb;
struct btrfs_leaf_ref_tree *tree = root->ref_tree;
if (shared)
tree = &root->fs_info->shared_ref_tree;
spin_lock(&tree->lock);
rb = tree_insert(&tree->root, ref->bytenr, &ref->rb_node);
if (rb) {
ret = -EEXIST;
} else {
atomic_inc(&ref->usage);
ref->tree = tree;
ref->in_tree = 1;
list_add_tail(&ref->list, &tree->list);
}
spin_unlock(&tree->lock);
return ret;
}
/*
* remove a single leaf ref from the tree. This drops the ref held by the tree
* only
*/
int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
{
struct btrfs_leaf_ref_tree *tree;
if (!xchg(&ref->in_tree, 0))
return 0;
tree = ref->tree;
spin_lock(&tree->lock);
rb_erase(&ref->rb_node, &tree->root);
list_del_init(&ref->list);
spin_unlock(&tree->lock);
btrfs_free_leaf_ref(root, ref);
return 0;
}
......@@ -49,28 +49,4 @@ static inline size_t btrfs_leaf_ref_size(int nr_extents)
return sizeof(struct btrfs_leaf_ref) +
sizeof(struct btrfs_extent_info) * nr_extents;
}
static inline void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree)
{
tree->root = RB_ROOT;
INIT_LIST_HEAD(&tree->list);
spin_lock_init(&tree->lock);
}
static inline int btrfs_leaf_ref_tree_empty(struct btrfs_leaf_ref_tree *tree)
{
return RB_EMPTY_ROOT(&tree->root);
}
void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree);
struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root,
int nr_extents);
void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
u64 bytenr);
int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
int shared);
int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
int shared);
int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
#endif
......@@ -508,6 +508,7 @@ static int update_backref_cache(struct btrfs_trans_handle *trans,
return 1;
}
static int should_ignore_root(struct btrfs_root *root)
{
struct btrfs_root *reloc_root;
......@@ -530,7 +531,6 @@ static int should_ignore_root(struct btrfs_root *root)
*/
return 1;
}
/*
* find reloc tree by address of tree root
*/
......@@ -962,7 +962,7 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
lower = upper;
upper = NULL;
}
btrfs_release_path(root, path2);
btrfs_release_path(path2);
next:
if (ptr < end) {
ptr += btrfs_extent_inline_ref_size(key.type);
......@@ -975,7 +975,7 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
if (ptr >= end)
path1->slots[0]++;
}
btrfs_release_path(rc->extent_root, path1);
btrfs_release_path(path1);
cur->checked = 1;
WARN_ON(exist);
......@@ -1750,7 +1750,7 @@ int replace_path(struct btrfs_trans_handle *trans,
btrfs_node_key_to_cpu(path->nodes[level], &key,
path->slots[level]);
btrfs_release_path(src, path);
btrfs_release_path(path);
path->lowest_level = level;
ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
......@@ -2499,7 +2499,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
path->locks[upper->level] = 0;
slot = path->slots[upper->level];
btrfs_release_path(NULL, path);
btrfs_release_path(path);
} else {
ret = btrfs_bin_search(upper->eb, key, upper->level,
&slot);
......@@ -2740,7 +2740,7 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans,
} else {
path->lowest_level = node->level;
ret = btrfs_search_slot(trans, root, key, path, 0, 1);
btrfs_release_path(root, path);
btrfs_release_path(path);
if (ret > 0)
ret = 0;
}
......@@ -2873,7 +2873,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
struct extent_map *em;
int ret = 0;
em = alloc_extent_map(GFP_NOFS);
em = alloc_extent_map();
if (!em)
return -ENOMEM;
......@@ -3122,7 +3122,7 @@ static int add_tree_block(struct reloc_control *rc,
#endif
}
btrfs_release_path(rc->extent_root, path);
btrfs_release_path(path);
BUG_ON(level == -1);
......@@ -3223,7 +3223,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
key.offset = 0;
inode = btrfs_iget(fs_info->sb, &key, root, NULL);
if (!inode || IS_ERR(inode) || is_bad_inode(inode)) {
if (IS_ERR_OR_NULL(inode) || is_bad_inode(inode)) {
if (inode && !IS_ERR(inode))
iput(inode);
return -ENOENT;
......@@ -3508,7 +3508,7 @@ int add_data_references(struct reloc_control *rc,
}
path->slots[0]++;
}
btrfs_release_path(rc->extent_root, path);
btrfs_release_path(path);
if (err)
free_block_list(blocks);
return err;
......@@ -3571,7 +3571,7 @@ int find_next_extent(struct btrfs_trans_handle *trans,
EXTENT_DIRTY);
if (ret == 0 && start <= key.objectid) {
btrfs_release_path(rc->extent_root, path);
btrfs_release_path(path);
rc->search_start = end + 1;
} else {
rc->search_start = key.objectid + key.offset;
......@@ -3579,7 +3579,7 @@ int find_next_extent(struct btrfs_trans_handle *trans,
return 0;
}
}
btrfs_release_path(rc->extent_root, path);
btrfs_release_path(path);
return ret;
}
......@@ -3716,7 +3716,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
flags = BTRFS_EXTENT_FLAG_DATA;
if (path_change) {
btrfs_release_path(rc->extent_root, path);
btrfs_release_path(path);
path->search_commit_root = 1;
path->skip_locking = 1;
......@@ -3739,7 +3739,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
(flags & BTRFS_EXTENT_FLAG_DATA)) {
ret = add_data_references(rc, &key, path, &blocks);
} else {
btrfs_release_path(rc->extent_root, path);
btrfs_release_path(path);
ret = 0;
}
if (ret < 0) {
......@@ -3802,7 +3802,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
}
}
btrfs_release_path(rc->extent_root, path);
btrfs_release_path(path);
clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
GFP_NOFS);
......@@ -3870,7 +3870,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
BTRFS_INODE_PREALLOC);
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(root, path);
btrfs_release_path(path);
out:
btrfs_free_path(path);
return ret;
......@@ -3938,7 +3938,7 @@ static struct reloc_control *alloc_reloc_control(void)
INIT_LIST_HEAD(&rc->reloc_roots);
backref_cache_init(&rc->backref_cache);
mapping_tree_init(&rc->reloc_root_tree);
extent_io_tree_init(&rc->processed_blocks, NULL, GFP_NOFS);
extent_io_tree_init(&rc->processed_blocks, NULL);
return rc;
}
......@@ -4112,7 +4112,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
}
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
btrfs_release_path(root->fs_info->tree_root, path);
btrfs_release_path(path);
if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
key.type != BTRFS_ROOT_ITEM_KEY)
......@@ -4144,7 +4144,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
key.offset--;
}
btrfs_release_path(root->fs_info->tree_root, path);
btrfs_release_path(path);
if (list_empty(&reloc_roots))
goto out;
......
......@@ -21,53 +21,6 @@
#include "disk-io.h"
#include "print-tree.h"
/*
* search forward for a root, starting with objectid 'search_start'
* if a root key is found, the objectid we find is filled into 'found_objectid'
* and 0 is returned. < 0 is returned on error, 1 if there is nothing
* left in the tree.
*/
int btrfs_search_root(struct btrfs_root *root, u64 search_start,
u64 *found_objectid)
{
struct btrfs_path *path;
struct btrfs_key search_key;
int ret;
root = root->fs_info->tree_root;
search_key.objectid = search_start;
search_key.type = (u8)-1;
search_key.offset = (u64)-1;
path = btrfs_alloc_path();
BUG_ON(!path);
again:
ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
if (ret < 0)
goto out;
if (ret == 0) {
ret = 1;
goto out;
}
if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
ret = btrfs_next_leaf(root, path);
if (ret)
goto out;
}
btrfs_item_key_to_cpu(path->nodes[0], &search_key, path->slots[0]);
if (search_key.type != BTRFS_ROOT_ITEM_KEY) {
search_key.offset++;
btrfs_release_path(root, path);
goto again;
}
ret = 0;
*found_objectid = search_key.objectid;
out:
btrfs_free_path(path);
return ret;
}
/*
* lookup the root with the highest offset for a given objectid. The key we do
* find is copied into 'key'. If we find something return 0, otherwise 1, < 0
......@@ -230,7 +183,7 @@ int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid)
memcpy(&found_key, &key, sizeof(key));
key.offset++;
btrfs_release_path(root, path);
btrfs_release_path(path);
dead_root =
btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
&found_key);
......@@ -292,7 +245,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
}
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
btrfs_release_path(tree_root, path);
btrfs_release_path(path);
if (key.objectid != BTRFS_ORPHAN_OBJECTID ||
key.type != BTRFS_ORPHAN_ITEM_KEY)
......@@ -390,7 +343,7 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans,
err = -ENOENT;
if (key.type == BTRFS_ROOT_BACKREF_KEY) {
btrfs_release_path(tree_root, path);
btrfs_release_path(path);
key.objectid = ref_id;
key.type = BTRFS_ROOT_REF_KEY;
key.offset = root_id;
......@@ -463,7 +416,7 @@ int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
if (key.type == BTRFS_ROOT_BACKREF_KEY) {
btrfs_release_path(tree_root, path);
btrfs_release_path(path);
key.objectid = ref_id;
key.type = BTRFS_ROOT_REF_KEY;
key.offset = root_id;
......
......@@ -740,7 +740,7 @@ static int btrfs_set_super(struct super_block *s, void *data)
* for multiple device setup. Make sure to keep it in sync.
*/
static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
const char *dev_name, void *data)
const char *device_name, void *data)
{
struct block_device *bdev = NULL;
struct super_block *s;
......@@ -763,7 +763,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
if (error)
return ERR_PTR(error);
error = btrfs_scan_one_device(dev_name, mode, fs_type, &fs_devices);
error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices);
if (error)
goto error_free_subvol_name;
......
......@@ -174,86 +174,9 @@ static const struct sysfs_ops btrfs_root_attr_ops = {
.store = btrfs_root_attr_store,
};
static struct kobj_type btrfs_root_ktype = {
.default_attrs = btrfs_root_attrs,
.sysfs_ops = &btrfs_root_attr_ops,
.release = btrfs_root_release,
};
static struct kobj_type btrfs_super_ktype = {
.default_attrs = btrfs_super_attrs,
.sysfs_ops = &btrfs_super_attr_ops,
.release = btrfs_super_release,
};
/* /sys/fs/btrfs/ entry */
static struct kset *btrfs_kset;
int btrfs_sysfs_add_super(struct btrfs_fs_info *fs)
{
int error;
char *name;
char c;
int len = strlen(fs->sb->s_id) + 1;
int i;
name = kmalloc(len, GFP_NOFS);
if (!name) {
error = -ENOMEM;
goto fail;
}
for (i = 0; i < len; i++) {
c = fs->sb->s_id[i];
if (c == '/' || c == '\\')
c = '!';
name[i] = c;
}
name[len] = '\0';
fs->super_kobj.kset = btrfs_kset;
error = kobject_init_and_add(&fs->super_kobj, &btrfs_super_ktype,
NULL, "%s", name);
kfree(name);
if (error)
goto fail;
return 0;
fail:
printk(KERN_ERR "btrfs: sysfs creation for super failed\n");
return error;
}
int btrfs_sysfs_add_root(struct btrfs_root *root)
{
int error;
error = kobject_init_and_add(&root->root_kobj, &btrfs_root_ktype,
&root->fs_info->super_kobj,
"%s", root->name);
if (error)
goto fail;
return 0;
fail:
printk(KERN_ERR "btrfs: sysfs creation for root failed\n");
return error;
}
void btrfs_sysfs_del_root(struct btrfs_root *root)
{
kobject_put(&root->root_kobj);
wait_for_completion(&root->kobj_unregister);
}
void btrfs_sysfs_del_super(struct btrfs_fs_info *fs)
{
kobject_put(&fs->super_kobj);
wait_for_completion(&fs->kobj_unregister);
}
int btrfs_init_sysfs(void)
{
btrfs_kset = kset_create_and_add("btrfs", NULL, fs_kobj);
......
......@@ -81,8 +81,7 @@ static noinline int join_transaction(struct btrfs_root *root)
INIT_LIST_HEAD(&cur_trans->pending_snapshots);
list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
extent_io_tree_init(&cur_trans->dirty_pages,
root->fs_info->btree_inode->i_mapping,
GFP_NOFS);
root->fs_info->btree_inode->i_mapping);
spin_lock(&root->fs_info->new_trans_lock);
root->fs_info->running_transaction = cur_trans;
spin_unlock(&root->fs_info->new_trans_lock);
......@@ -348,49 +347,6 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
return ret;
}
#if 0
/*
* rate limit against the drop_snapshot code. This helps to slow down new
* operations if the drop_snapshot code isn't able to keep up.
*/
static void throttle_on_drops(struct btrfs_root *root)
{
struct btrfs_fs_info *info = root->fs_info;
int harder_count = 0;
harder:
if (atomic_read(&info->throttles)) {
DEFINE_WAIT(wait);
int thr;
thr = atomic_read(&info->throttle_gen);
do {
prepare_to_wait(&info->transaction_throttle,
&wait, TASK_UNINTERRUPTIBLE);
if (!atomic_read(&info->throttles)) {
finish_wait(&info->transaction_throttle, &wait);
break;
}
schedule();
finish_wait(&info->transaction_throttle, &wait);
} while (thr == atomic_read(&info->throttle_gen));
harder_count++;
if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
harder_count < 2)
goto harder;
if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
harder_count < 10)
goto harder;
if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
harder_count < 20)
goto harder;
}
}
#endif
void btrfs_throttle(struct btrfs_root *root)
{
mutex_lock(&root->fs_info->trans_mutex);
......@@ -837,97 +793,6 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
return ret;
}
#if 0
/*
* when dropping snapshots, we generate a ton of delayed refs, and it makes
* sense not to join the transaction while it is trying to flush the current
* queue of delayed refs out.
*
* This is used by the drop snapshot code only
*/
static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info)
{
DEFINE_WAIT(wait);
mutex_lock(&info->trans_mutex);
while (info->running_transaction &&
info->running_transaction->delayed_refs.flushing) {
prepare_to_wait(&info->transaction_wait, &wait,
TASK_UNINTERRUPTIBLE);
mutex_unlock(&info->trans_mutex);
schedule();
mutex_lock(&info->trans_mutex);
finish_wait(&info->transaction_wait, &wait);
}
mutex_unlock(&info->trans_mutex);
return 0;
}
/*
* Given a list of roots that need to be deleted, call btrfs_drop_snapshot on
* all of them
*/
int btrfs_drop_dead_root(struct btrfs_root *root)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *tree_root = root->fs_info->tree_root;
unsigned long nr;
int ret;
while (1) {
/*
* we don't want to jump in and create a bunch of
* delayed refs if the transaction is starting to close
*/
wait_transaction_pre_flush(tree_root->fs_info);
trans = btrfs_start_transaction(tree_root, 1);
/*
* we've joined a transaction, make sure it isn't
* closing right now
*/
if (trans->transaction->delayed_refs.flushing) {
btrfs_end_transaction(trans, tree_root);
continue;
}
ret = btrfs_drop_snapshot(trans, root);
if (ret != -EAGAIN)
break;
ret = btrfs_update_root(trans, tree_root,
&root->root_key,
&root->root_item);
if (ret)
break;
nr = trans->blocks_used;
ret = btrfs_end_transaction(trans, tree_root);
BUG_ON(ret);
btrfs_btree_balance_dirty(tree_root, nr);
cond_resched();
}
BUG_ON(ret);
ret = btrfs_del_root(trans, tree_root, &root->root_key);
BUG_ON(ret);
nr = trans->blocks_used;
ret = btrfs_end_transaction(trans, tree_root);
BUG_ON(ret);
free_extent_buffer(root->node);
free_extent_buffer(root->commit_root);
kfree(root);
btrfs_btree_balance_dirty(tree_root, nr);
return ret;
}
#endif
/*
* new snapshots need to be created at a very specific time in the
* transaction commit. This does the actual creation
......
......@@ -101,11 +101,8 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_add_dead_root(struct btrfs_root *root);
int btrfs_drop_dead_root(struct btrfs_root *root);
int btrfs_defrag_root(struct btrfs_root *root, int cacheonly);
int btrfs_clean_old_snapshots(struct btrfs_root *root);
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
......
......@@ -97,7 +97,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
ret = 0;
goto out;
}
btrfs_release_path(root, path);
btrfs_release_path(path);
wret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (wret < 0) {
......
This diff is collapsed.
......@@ -38,7 +38,6 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
const char *name, int name_len,
struct inode *inode, u64 dirid);
int btrfs_join_running_log_trans(struct btrfs_root *root);
int btrfs_end_log_trans(struct btrfs_root *root);
int btrfs_pin_log_trans(struct btrfs_root *root);
int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
......
This diff is collapsed.
......@@ -196,7 +196,6 @@ void btrfs_mapping_init(struct btrfs_mapping_tree *tree);
void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
int mirror_num, int async_submit);
int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf);
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fmode_t flags, void *holder);
int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
......@@ -209,8 +208,6 @@ int btrfs_add_device(struct btrfs_trans_handle *trans,
int btrfs_rm_device(struct btrfs_root *root, char *device_path);
int btrfs_cleanup_fs_uuids(void);
int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len);
int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
u64 logical, struct page *page);
int btrfs_grow_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device, u64 new_size);
struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
......@@ -218,8 +215,6 @@ struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
int btrfs_init_new_device(struct btrfs_root *root, char *path);
int btrfs_balance(struct btrfs_root *dev_root);
void btrfs_unlock_volumes(void);
void btrfs_lock_volumes(void);
int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
int find_free_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device, u64 num_bytes,
......
......@@ -120,13 +120,13 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
ret = btrfs_delete_one_dir_name(trans, root, path, di);
BUG_ON(ret);
btrfs_release_path(root, path);
btrfs_release_path(path);
/* if we don't have a value then we are removing the xattr */
if (!value)
goto out;
} else {
btrfs_release_path(root, path);
btrfs_release_path(path);
if (flags & XATTR_REPLACE) {
/* we couldn't find the attr to replace */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment