Commit 9613bebb authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Pull btrfs fixes and features from Chris Mason:
 "We've merged in the error handling patches from SuSE.  These are
  already shipping in the sles kernel, and they give btrfs the ability
  to abort transactions and go readonly on errors.  It involves a lot of
  churn as they clarify BUG_ONs, and remove the ones we now properly
  deal with.

  Josef reworked the way our metadata interacts with the page cache.
  page->private now points to the btrfs extent_buffer object, which
  makes everything faster.  He changed it so we write an whole extent
  buffer at a time instead of allowing individual pages to go down,,
  which will be important for the raid5/6 code (for the 3.5 merge
  window ;)

  Josef also made us more aggressive about dropping pages for metadata
  blocks that were freed due to COW.  Overall, our metadata caching is
  much faster now.

  We've integrated my patch for metadata bigger than the page size.
  This allows metadata blocks up to 64KB in size.  In practice 16K and
  32K seem to work best.  For workloads with lots of metadata, this cuts
  down the size of the extent allocation tree dramatically and fragments
  much less.

  Scrub was updated to support the larger block sizes, which ended up
  being a fairly large change (thanks Stefan Behrens).

  We also have an assortment of fixes and updates, especially to the
  balancing code (Ilya Dryomov), the back ref walker (Jan Schmidt) and
  the defragging code (Liu Bo)."

Fixed up trivial conflicts in fs/btrfs/scrub.c that were just due to
removal of the second argument to k[un]map_atomic() in commit
7ac687d9.

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (75 commits)
  Btrfs: update the checks for mixed block groups with big metadata blocks
  Btrfs: update to the right index of defragment
  Btrfs: do not bother to defrag an extent if it is a big real extent
  Btrfs: add a check to decide if we should defrag the range
  Btrfs: fix recursive defragment with autodefrag option
  Btrfs: fix the mismatch of page->mapping
  Btrfs: fix race between direct io and autodefrag
  Btrfs: fix deadlock during allocating chunks
  Btrfs: show useful info in space reservation tracepoint
  Btrfs: don't use crc items bigger than 4KB
  Btrfs: flush out and clean up any block device pages during mount
  btrfs: disallow unequal data/metadata blocksize for mixed block groups
  Btrfs: enhance superblock sanity checks
  Btrfs: change scrub to support big blocks
  Btrfs: minor cleanup in scrub
  Btrfs: introduce common define for max number of mirrors
  Btrfs: fix infinite loop in btrfs_shrink_device()
  Btrfs: fix memory leak in resolver code
  Btrfs: allow dup for data chunks in mixed mode
  Btrfs: validate target profiles only if we are going to use them
  ...
parents 40380f1c bc3f116f
...@@ -171,11 +171,11 @@ static void check_pending_worker_creates(struct btrfs_worker_thread *worker) ...@@ -171,11 +171,11 @@ static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
spin_unlock_irqrestore(&workers->lock, flags); spin_unlock_irqrestore(&workers->lock, flags);
} }
static noinline int run_ordered_completions(struct btrfs_workers *workers, static noinline void run_ordered_completions(struct btrfs_workers *workers,
struct btrfs_work *work) struct btrfs_work *work)
{ {
if (!workers->ordered) if (!workers->ordered)
return 0; return;
set_bit(WORK_DONE_BIT, &work->flags); set_bit(WORK_DONE_BIT, &work->flags);
...@@ -213,7 +213,6 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers, ...@@ -213,7 +213,6 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,
} }
spin_unlock(&workers->order_lock); spin_unlock(&workers->order_lock);
return 0;
} }
static void put_worker(struct btrfs_worker_thread *worker) static void put_worker(struct btrfs_worker_thread *worker)
...@@ -399,7 +398,7 @@ static int worker_loop(void *arg) ...@@ -399,7 +398,7 @@ static int worker_loop(void *arg)
/* /*
* this will wait for all the worker threads to shutdown * this will wait for all the worker threads to shutdown
*/ */
int btrfs_stop_workers(struct btrfs_workers *workers) void btrfs_stop_workers(struct btrfs_workers *workers)
{ {
struct list_head *cur; struct list_head *cur;
struct btrfs_worker_thread *worker; struct btrfs_worker_thread *worker;
...@@ -427,7 +426,6 @@ int btrfs_stop_workers(struct btrfs_workers *workers) ...@@ -427,7 +426,6 @@ int btrfs_stop_workers(struct btrfs_workers *workers)
put_worker(worker); put_worker(worker);
} }
spin_unlock_irq(&workers->lock); spin_unlock_irq(&workers->lock);
return 0;
} }
/* /*
...@@ -615,14 +613,14 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) ...@@ -615,14 +613,14 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
* it was taken from. It is intended for use with long running work functions * it was taken from. It is intended for use with long running work functions
* that make some progress and want to give the cpu up for others. * that make some progress and want to give the cpu up for others.
*/ */
int btrfs_requeue_work(struct btrfs_work *work) void btrfs_requeue_work(struct btrfs_work *work)
{ {
struct btrfs_worker_thread *worker = work->worker; struct btrfs_worker_thread *worker = work->worker;
unsigned long flags; unsigned long flags;
int wake = 0; int wake = 0;
if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
goto out; return;
spin_lock_irqsave(&worker->lock, flags); spin_lock_irqsave(&worker->lock, flags);
if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags))
...@@ -649,9 +647,6 @@ int btrfs_requeue_work(struct btrfs_work *work) ...@@ -649,9 +647,6 @@ int btrfs_requeue_work(struct btrfs_work *work)
if (wake) if (wake)
wake_up_process(worker->task); wake_up_process(worker->task);
spin_unlock_irqrestore(&worker->lock, flags); spin_unlock_irqrestore(&worker->lock, flags);
out:
return 0;
} }
void btrfs_set_work_high_prio(struct btrfs_work *work) void btrfs_set_work_high_prio(struct btrfs_work *work)
......
...@@ -111,9 +111,9 @@ struct btrfs_workers { ...@@ -111,9 +111,9 @@ struct btrfs_workers {
void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work); void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
int btrfs_start_workers(struct btrfs_workers *workers); int btrfs_start_workers(struct btrfs_workers *workers);
int btrfs_stop_workers(struct btrfs_workers *workers); void btrfs_stop_workers(struct btrfs_workers *workers);
void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max, void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
struct btrfs_workers *async_starter); struct btrfs_workers *async_starter);
int btrfs_requeue_work(struct btrfs_work *work); void btrfs_requeue_work(struct btrfs_work *work);
void btrfs_set_work_high_prio(struct btrfs_work *work); void btrfs_set_work_high_prio(struct btrfs_work *work);
#endif #endif
...@@ -116,6 +116,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, ...@@ -116,6 +116,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
* to a logical address * to a logical address
*/ */
static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
int search_commit_root,
struct __prelim_ref *ref, struct __prelim_ref *ref,
struct ulist *parents) struct ulist *parents)
{ {
...@@ -131,6 +132,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, ...@@ -131,6 +132,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
path->search_commit_root = !!search_commit_root;
root_key.objectid = ref->root_id; root_key.objectid = ref->root_id;
root_key.type = BTRFS_ROOT_ITEM_KEY; root_key.type = BTRFS_ROOT_ITEM_KEY;
...@@ -188,6 +190,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, ...@@ -188,6 +190,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
* resolve all indirect backrefs from the list * resolve all indirect backrefs from the list
*/ */
static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info, static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
int search_commit_root,
struct list_head *head) struct list_head *head)
{ {
int err; int err;
...@@ -212,7 +215,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info, ...@@ -212,7 +215,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
continue; continue;
if (ref->count == 0) if (ref->count == 0)
continue; continue;
err = __resolve_indirect_ref(fs_info, ref, parents); err = __resolve_indirect_ref(fs_info, search_commit_root,
ref, parents);
if (err) { if (err) {
if (ret == 0) if (ret == 0)
ret = err; ret = err;
...@@ -586,6 +590,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, ...@@ -586,6 +590,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head; struct btrfs_delayed_ref_head *head;
int info_level = 0; int info_level = 0;
int ret; int ret;
int search_commit_root = (trans == BTRFS_BACKREF_SEARCH_COMMIT_ROOT);
struct list_head prefs_delayed; struct list_head prefs_delayed;
struct list_head prefs; struct list_head prefs;
struct __prelim_ref *ref; struct __prelim_ref *ref;
...@@ -600,6 +605,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, ...@@ -600,6 +605,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
path->search_commit_root = !!search_commit_root;
/* /*
* grab both a lock on the path and a lock on the delayed ref head. * grab both a lock on the path and a lock on the delayed ref head.
...@@ -614,8 +620,10 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, ...@@ -614,8 +620,10 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
goto out; goto out;
BUG_ON(ret == 0); BUG_ON(ret == 0);
if (trans != BTRFS_BACKREF_SEARCH_COMMIT_ROOT) {
/* /*
* look if there are updates for this ref queued and lock the head * look if there are updates for this ref queued and lock the
* head
*/ */
delayed_refs = &trans->transaction->delayed_refs; delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock); spin_lock(&delayed_refs->lock);
...@@ -636,13 +644,15 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, ...@@ -636,13 +644,15 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
btrfs_put_delayed_ref(&head->node); btrfs_put_delayed_ref(&head->node);
goto again; goto again;
} }
ret = __add_delayed_refs(head, seq, &info_key, &prefs_delayed); ret = __add_delayed_refs(head, seq, &info_key,
&prefs_delayed);
if (ret) { if (ret) {
spin_unlock(&delayed_refs->lock); spin_unlock(&delayed_refs->lock);
goto out; goto out;
} }
} }
spin_unlock(&delayed_refs->lock); spin_unlock(&delayed_refs->lock);
}
if (path->slots[0]) { if (path->slots[0]) {
struct extent_buffer *leaf; struct extent_buffer *leaf;
...@@ -679,7 +689,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans, ...@@ -679,7 +689,7 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
if (ret) if (ret)
goto out; goto out;
ret = __resolve_indirect_refs(fs_info, &prefs); ret = __resolve_indirect_refs(fs_info, search_commit_root, &prefs);
if (ret) if (ret)
goto out; goto out;
...@@ -1074,8 +1084,7 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, ...@@ -1074,8 +1084,7 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
return 0; return 0;
} }
static int iterate_leaf_refs(struct btrfs_fs_info *fs_info, static int iterate_leaf_refs(struct btrfs_fs_info *fs_info, u64 logical,
struct btrfs_path *path, u64 logical,
u64 orig_extent_item_objectid, u64 orig_extent_item_objectid,
u64 extent_item_pos, u64 root, u64 extent_item_pos, u64 root,
iterate_extent_inodes_t *iterate, void *ctx) iterate_extent_inodes_t *iterate, void *ctx)
...@@ -1143,35 +1152,38 @@ static int iterate_leaf_refs(struct btrfs_fs_info *fs_info, ...@@ -1143,35 +1152,38 @@ static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
* calls iterate() for every inode that references the extent identified by * calls iterate() for every inode that references the extent identified by
* the given parameters. * the given parameters.
* when the iterator function returns a non-zero value, iteration stops. * when the iterator function returns a non-zero value, iteration stops.
* path is guaranteed to be in released state when iterate() is called.
*/ */
int iterate_extent_inodes(struct btrfs_fs_info *fs_info, int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 extent_item_objectid, u64 extent_item_pos, u64 extent_item_objectid, u64 extent_item_pos,
int search_commit_root,
iterate_extent_inodes_t *iterate, void *ctx) iterate_extent_inodes_t *iterate, void *ctx)
{ {
int ret; int ret;
struct list_head data_refs = LIST_HEAD_INIT(data_refs); struct list_head data_refs = LIST_HEAD_INIT(data_refs);
struct list_head shared_refs = LIST_HEAD_INIT(shared_refs); struct list_head shared_refs = LIST_HEAD_INIT(shared_refs);
struct btrfs_trans_handle *trans; struct btrfs_trans_handle *trans;
struct ulist *refs; struct ulist *refs = NULL;
struct ulist *roots; struct ulist *roots = NULL;
struct ulist_node *ref_node = NULL; struct ulist_node *ref_node = NULL;
struct ulist_node *root_node = NULL; struct ulist_node *root_node = NULL;
struct seq_list seq_elem; struct seq_list seq_elem;
struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_root *delayed_refs = NULL;
pr_debug("resolving all inodes for extent %llu\n",
extent_item_objectid);
if (search_commit_root) {
trans = BTRFS_BACKREF_SEARCH_COMMIT_ROOT;
} else {
trans = btrfs_join_transaction(fs_info->extent_root); trans = btrfs_join_transaction(fs_info->extent_root);
if (IS_ERR(trans)) if (IS_ERR(trans))
return PTR_ERR(trans); return PTR_ERR(trans);
pr_debug("resolving all inodes for extent %llu\n",
extent_item_objectid);
delayed_refs = &trans->transaction->delayed_refs; delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock); spin_lock(&delayed_refs->lock);
btrfs_get_delayed_seq(delayed_refs, &seq_elem); btrfs_get_delayed_seq(delayed_refs, &seq_elem);
spin_unlock(&delayed_refs->lock); spin_unlock(&delayed_refs->lock);
}
ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid, ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
extent_item_pos, seq_elem.seq, extent_item_pos, seq_elem.seq,
...@@ -1188,7 +1200,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info, ...@@ -1188,7 +1200,7 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
while (!ret && (root_node = ulist_next(roots, root_node))) { while (!ret && (root_node = ulist_next(roots, root_node))) {
pr_debug("root %llu references leaf %llu\n", pr_debug("root %llu references leaf %llu\n",
root_node->val, ref_node->val); root_node->val, ref_node->val);
ret = iterate_leaf_refs(fs_info, path, ref_node->val, ret = iterate_leaf_refs(fs_info, ref_node->val,
extent_item_objectid, extent_item_objectid,
extent_item_pos, root_node->val, extent_item_pos, root_node->val,
iterate, ctx); iterate, ctx);
...@@ -1198,8 +1210,11 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info, ...@@ -1198,8 +1210,11 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
ulist_free(refs); ulist_free(refs);
ulist_free(roots); ulist_free(roots);
out: out:
if (!search_commit_root) {
btrfs_put_delayed_seq(delayed_refs, &seq_elem); btrfs_put_delayed_seq(delayed_refs, &seq_elem);
btrfs_end_transaction(trans, fs_info->extent_root); btrfs_end_transaction(trans, fs_info->extent_root);
}
return ret; return ret;
} }
...@@ -1210,6 +1225,7 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, ...@@ -1210,6 +1225,7 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
int ret; int ret;
u64 extent_item_pos; u64 extent_item_pos;
struct btrfs_key found_key; struct btrfs_key found_key;
int search_commit_root = path->search_commit_root;
ret = extent_from_logical(fs_info, logical, path, ret = extent_from_logical(fs_info, logical, path,
&found_key); &found_key);
...@@ -1220,8 +1236,9 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, ...@@ -1220,8 +1236,9 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
return ret; return ret;
extent_item_pos = logical - found_key.objectid; extent_item_pos = logical - found_key.objectid;
ret = iterate_extent_inodes(fs_info, path, found_key.objectid, ret = iterate_extent_inodes(fs_info, found_key.objectid,
extent_item_pos, iterate, ctx); extent_item_pos, search_commit_root,
iterate, ctx);
return ret; return ret;
} }
...@@ -1342,12 +1359,6 @@ int paths_from_inode(u64 inum, struct inode_fs_paths *ipath) ...@@ -1342,12 +1359,6 @@ int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
inode_to_path, ipath); inode_to_path, ipath);
} }
/*
* allocates space to return multiple file system paths for an inode.
* total_bytes to allocate are passed, note that space usable for actual path
* information will be total_bytes - sizeof(struct inode_fs_paths).
* the returned pointer must be freed with free_ipath() in the end.
*/
struct btrfs_data_container *init_data_container(u32 total_bytes) struct btrfs_data_container *init_data_container(u32 total_bytes)
{ {
struct btrfs_data_container *data; struct btrfs_data_container *data;
...@@ -1403,5 +1414,6 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, ...@@ -1403,5 +1414,6 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
void free_ipath(struct inode_fs_paths *ipath) void free_ipath(struct inode_fs_paths *ipath)
{ {
kfree(ipath->fspath);
kfree(ipath); kfree(ipath);
} }
...@@ -22,6 +22,8 @@ ...@@ -22,6 +22,8 @@
#include "ioctl.h" #include "ioctl.h"
#include "ulist.h" #include "ulist.h"
#define BTRFS_BACKREF_SEARCH_COMMIT_ROOT ((struct btrfs_trans_handle *)0)
struct inode_fs_paths { struct inode_fs_paths {
struct btrfs_path *btrfs_path; struct btrfs_path *btrfs_path;
struct btrfs_root *fs_root; struct btrfs_root *fs_root;
...@@ -44,9 +46,8 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, ...@@ -44,9 +46,8 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
u64 *out_root, u8 *out_level); u64 *out_root, u8 *out_level);
int iterate_extent_inodes(struct btrfs_fs_info *fs_info, int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
struct btrfs_path *path,
u64 extent_item_objectid, u64 extent_item_objectid,
u64 extent_offset, u64 extent_offset, int search_commit_root,
iterate_extent_inodes_t *iterate, void *ctx); iterate_extent_inodes_t *iterate, void *ctx);
int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
......
...@@ -226,7 +226,7 @@ static void end_compressed_bio_read(struct bio *bio, int err) ...@@ -226,7 +226,7 @@ static void end_compressed_bio_read(struct bio *bio, int err)
* Clear the writeback bits on all of the file * Clear the writeback bits on all of the file
* pages for a compressed write * pages for a compressed write
*/ */
static noinline int end_compressed_writeback(struct inode *inode, u64 start, static noinline void end_compressed_writeback(struct inode *inode, u64 start,
unsigned long ram_size) unsigned long ram_size)
{ {
unsigned long index = start >> PAGE_CACHE_SHIFT; unsigned long index = start >> PAGE_CACHE_SHIFT;
...@@ -253,7 +253,6 @@ static noinline int end_compressed_writeback(struct inode *inode, u64 start, ...@@ -253,7 +253,6 @@ static noinline int end_compressed_writeback(struct inode *inode, u64 start,
index += ret; index += ret;
} }
/* the inode may be gone now */ /* the inode may be gone now */
return 0;
} }
/* /*
...@@ -392,16 +391,16 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, ...@@ -392,16 +391,16 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
*/ */
atomic_inc(&cb->pending_bios); atomic_inc(&cb->pending_bios);
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
BUG_ON(ret); BUG_ON(ret); /* -ENOMEM */
if (!skip_sum) { if (!skip_sum) {
ret = btrfs_csum_one_bio(root, inode, bio, ret = btrfs_csum_one_bio(root, inode, bio,
start, 1); start, 1);
BUG_ON(ret); BUG_ON(ret); /* -ENOMEM */
} }
ret = btrfs_map_bio(root, WRITE, bio, 0, 1); ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
BUG_ON(ret); BUG_ON(ret); /* -ENOMEM */
bio_put(bio); bio_put(bio);
...@@ -421,15 +420,15 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, ...@@ -421,15 +420,15 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
bio_get(bio); bio_get(bio);
ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
BUG_ON(ret); BUG_ON(ret); /* -ENOMEM */
if (!skip_sum) { if (!skip_sum) {
ret = btrfs_csum_one_bio(root, inode, bio, start, 1); ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
BUG_ON(ret); BUG_ON(ret); /* -ENOMEM */
} }
ret = btrfs_map_bio(root, WRITE, bio, 0, 1); ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
BUG_ON(ret); BUG_ON(ret); /* -ENOMEM */
bio_put(bio); bio_put(bio);
return 0; return 0;
...@@ -497,7 +496,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -497,7 +496,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
* sure they map to this compressed extent on disk. * sure they map to this compressed extent on disk.
*/ */
set_page_extent_mapped(page); set_page_extent_mapped(page);
lock_extent(tree, last_offset, end, GFP_NOFS); lock_extent(tree, last_offset, end);
read_lock(&em_tree->lock); read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, last_offset, em = lookup_extent_mapping(em_tree, last_offset,
PAGE_CACHE_SIZE); PAGE_CACHE_SIZE);
...@@ -507,7 +506,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -507,7 +506,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
(last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
(em->block_start >> 9) != cb->orig_bio->bi_sector) { (em->block_start >> 9) != cb->orig_bio->bi_sector) {
free_extent_map(em); free_extent_map(em);
unlock_extent(tree, last_offset, end, GFP_NOFS); unlock_extent(tree, last_offset, end);
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
break; break;
...@@ -535,7 +534,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -535,7 +534,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
nr_pages++; nr_pages++;
page_cache_release(page); page_cache_release(page);
} else { } else {
unlock_extent(tree, last_offset, end, GFP_NOFS); unlock_extent(tree, last_offset, end);
unlock_page(page); unlock_page(page);
page_cache_release(page); page_cache_release(page);
break; break;
...@@ -662,7 +661,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -662,7 +661,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bio_get(comp_bio); bio_get(comp_bio);
ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
BUG_ON(ret); BUG_ON(ret); /* -ENOMEM */
/* /*
* inc the count before we submit the bio so * inc the count before we submit the bio so
...@@ -675,14 +674,14 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -675,14 +674,14 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
ret = btrfs_lookup_bio_sums(root, inode, ret = btrfs_lookup_bio_sums(root, inode,
comp_bio, sums); comp_bio, sums);
BUG_ON(ret); BUG_ON(ret); /* -ENOMEM */
} }
sums += (comp_bio->bi_size + root->sectorsize - 1) / sums += (comp_bio->bi_size + root->sectorsize - 1) /
root->sectorsize; root->sectorsize;
ret = btrfs_map_bio(root, READ, comp_bio, ret = btrfs_map_bio(root, READ, comp_bio,
mirror_num, 0); mirror_num, 0);
BUG_ON(ret); BUG_ON(ret); /* -ENOMEM */
bio_put(comp_bio); bio_put(comp_bio);
...@@ -698,15 +697,15 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -698,15 +697,15 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bio_get(comp_bio); bio_get(comp_bio);
ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0); ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, 0);
BUG_ON(ret); BUG_ON(ret); /* -ENOMEM */
if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums); ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
BUG_ON(ret); BUG_ON(ret); /* -ENOMEM */
} }
ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
BUG_ON(ret); BUG_ON(ret); /* -ENOMEM */
bio_put(comp_bio); bio_put(comp_bio);
return 0; return 0;
...@@ -734,7 +733,7 @@ struct btrfs_compress_op *btrfs_compress_op[] = { ...@@ -734,7 +733,7 @@ struct btrfs_compress_op *btrfs_compress_op[] = {
&btrfs_lzo_compress, &btrfs_lzo_compress,
}; };
int __init btrfs_init_compress(void) void __init btrfs_init_compress(void)
{ {
int i; int i;
...@@ -744,7 +743,6 @@ int __init btrfs_init_compress(void) ...@@ -744,7 +743,6 @@ int __init btrfs_init_compress(void)
atomic_set(&comp_alloc_workspace[i], 0); atomic_set(&comp_alloc_workspace[i], 0);
init_waitqueue_head(&comp_workspace_wait[i]); init_waitqueue_head(&comp_workspace_wait[i]);
} }
return 0;
} }
/* /*
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#ifndef __BTRFS_COMPRESSION_ #ifndef __BTRFS_COMPRESSION_
#define __BTRFS_COMPRESSION_ #define __BTRFS_COMPRESSION_
int btrfs_init_compress(void); void btrfs_init_compress(void);
void btrfs_exit_compress(void); void btrfs_exit_compress(void);
int btrfs_compress_pages(int type, struct address_space *mapping, int btrfs_compress_pages(int type, struct address_space *mapping,
......
This diff is collapsed.
This diff is collapsed.
...@@ -115,6 +115,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode) ...@@ -115,6 +115,7 @@ static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
return NULL; return NULL;
} }
/* Will return either the node or PTR_ERR(-ENOMEM) */
static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
struct inode *inode) struct inode *inode)
{ {
...@@ -836,10 +837,8 @@ static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans, ...@@ -836,10 +837,8 @@ static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
btrfs_clear_path_blocking(path, NULL, 0); btrfs_clear_path_blocking(path, NULL, 0);
/* insert the keys of the items */ /* insert the keys of the items */
ret = setup_items_for_insert(trans, root, path, keys, data_size, setup_items_for_insert(trans, root, path, keys, data_size,
total_data_size, total_size, nitems); total_data_size, total_size, nitems);
if (ret)
goto error;
/* insert the dir index items */ /* insert the dir index items */
slot = path->slots[0]; slot = path->slots[0];
...@@ -1108,16 +1107,25 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, ...@@ -1108,16 +1107,25 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
return 0; return 0;
} }
/* Called when committing the transaction. */ /*
* Called when committing the transaction.
* Returns 0 on success.
* Returns < 0 on error and returns with an aborted transaction with any
* outstanding delayed items cleaned up.
*/
int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root) struct btrfs_root *root)
{ {
struct btrfs_root *curr_root = root;
struct btrfs_delayed_root *delayed_root; struct btrfs_delayed_root *delayed_root;
struct btrfs_delayed_node *curr_node, *prev_node; struct btrfs_delayed_node *curr_node, *prev_node;
struct btrfs_path *path; struct btrfs_path *path;
struct btrfs_block_rsv *block_rsv; struct btrfs_block_rsv *block_rsv;
int ret = 0; int ret = 0;
if (trans->aborted)
return -EIO;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
return -ENOMEM; return -ENOMEM;
...@@ -1130,17 +1138,18 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, ...@@ -1130,17 +1138,18 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
curr_node = btrfs_first_delayed_node(delayed_root); curr_node = btrfs_first_delayed_node(delayed_root);
while (curr_node) { while (curr_node) {
root = curr_node->root; curr_root = curr_node->root;
ret = btrfs_insert_delayed_items(trans, path, root, ret = btrfs_insert_delayed_items(trans, path, curr_root,
curr_node); curr_node);
if (!ret) if (!ret)
ret = btrfs_delete_delayed_items(trans, path, root, ret = btrfs_delete_delayed_items(trans, path,
curr_node); curr_root, curr_node);
if (!ret) if (!ret)
ret = btrfs_update_delayed_inode(trans, root, path, ret = btrfs_update_delayed_inode(trans, curr_root,
curr_node); path, curr_node);
if (ret) { if (ret) {
btrfs_release_delayed_node(curr_node); btrfs_release_delayed_node(curr_node);
btrfs_abort_transaction(trans, root, ret);
break; break;
} }
...@@ -1151,6 +1160,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, ...@@ -1151,6 +1160,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
btrfs_free_path(path); btrfs_free_path(path);
trans->block_rsv = block_rsv; trans->block_rsv = block_rsv;
return ret; return ret;
} }
...@@ -1371,6 +1381,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root) ...@@ -1371,6 +1381,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
btrfs_wq_run_delayed_node(delayed_root, root, 0); btrfs_wq_run_delayed_node(delayed_root, root, 0);
} }
/* Will return 0 or -ENOMEM */
int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
struct btrfs_root *root, const char *name, struct btrfs_root *root, const char *name,
int name_len, struct inode *dir, int name_len, struct inode *dir,
......
...@@ -420,7 +420,7 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing, ...@@ -420,7 +420,7 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
* this does all the dirty work in terms of maintaining the correct * this does all the dirty work in terms of maintaining the correct
* overall modification count. * overall modification count.
*/ */
static noinline int add_delayed_ref_head(struct btrfs_fs_info *fs_info, static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans, struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *ref, struct btrfs_delayed_ref_node *ref,
u64 bytenr, u64 num_bytes, u64 bytenr, u64 num_bytes,
...@@ -487,20 +487,19 @@ static noinline int add_delayed_ref_head(struct btrfs_fs_info *fs_info, ...@@ -487,20 +487,19 @@ static noinline int add_delayed_ref_head(struct btrfs_fs_info *fs_info,
* we've updated the existing ref, free the newly * we've updated the existing ref, free the newly
* allocated ref * allocated ref
*/ */
kfree(ref); kfree(head_ref);
} else { } else {
delayed_refs->num_heads++; delayed_refs->num_heads++;
delayed_refs->num_heads_ready++; delayed_refs->num_heads_ready++;
delayed_refs->num_entries++; delayed_refs->num_entries++;
trans->delayed_ref_updates++; trans->delayed_ref_updates++;
} }
return 0;
} }
/* /*
* helper to insert a delayed tree ref into the rbtree. * helper to insert a delayed tree ref into the rbtree.
*/ */
static noinline int add_delayed_tree_ref(struct btrfs_fs_info *fs_info, static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans, struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *ref, struct btrfs_delayed_ref_node *ref,
u64 bytenr, u64 num_bytes, u64 parent, u64 bytenr, u64 num_bytes, u64 parent,
...@@ -549,18 +548,17 @@ static noinline int add_delayed_tree_ref(struct btrfs_fs_info *fs_info, ...@@ -549,18 +548,17 @@ static noinline int add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
* we've updated the existing ref, free the newly * we've updated the existing ref, free the newly
* allocated ref * allocated ref
*/ */
kfree(ref); kfree(full_ref);
} else { } else {
delayed_refs->num_entries++; delayed_refs->num_entries++;
trans->delayed_ref_updates++; trans->delayed_ref_updates++;
} }
return 0;
} }
/* /*
* helper to insert a delayed data ref into the rbtree. * helper to insert a delayed data ref into the rbtree.
*/ */
static noinline int add_delayed_data_ref(struct btrfs_fs_info *fs_info, static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans, struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_node *ref, struct btrfs_delayed_ref_node *ref,
u64 bytenr, u64 num_bytes, u64 parent, u64 bytenr, u64 num_bytes, u64 parent,
...@@ -611,12 +609,11 @@ static noinline int add_delayed_data_ref(struct btrfs_fs_info *fs_info, ...@@ -611,12 +609,11 @@ static noinline int add_delayed_data_ref(struct btrfs_fs_info *fs_info,
* we've updated the existing ref, free the newly * we've updated the existing ref, free the newly
* allocated ref * allocated ref
*/ */
kfree(ref); kfree(full_ref);
} else { } else {
delayed_refs->num_entries++; delayed_refs->num_entries++;
trans->delayed_ref_updates++; trans->delayed_ref_updates++;
} }
return 0;
} }
/* /*
...@@ -634,7 +631,6 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, ...@@ -634,7 +631,6 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_tree_ref *ref; struct btrfs_delayed_tree_ref *ref;
struct btrfs_delayed_ref_head *head_ref; struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_root *delayed_refs;
int ret;
BUG_ON(extent_op && extent_op->is_data); BUG_ON(extent_op && extent_op->is_data);
ref = kmalloc(sizeof(*ref), GFP_NOFS); ref = kmalloc(sizeof(*ref), GFP_NOFS);
...@@ -656,14 +652,12 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, ...@@ -656,14 +652,12 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
* insert both the head node and the new ref without dropping * insert both the head node and the new ref without dropping
* the spin lock * the spin lock
*/ */
ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr, add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
num_bytes, action, 0); num_bytes, action, 0);
BUG_ON(ret);
ret = add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr, add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
num_bytes, parent, ref_root, level, action, num_bytes, parent, ref_root, level, action,
for_cow); for_cow);
BUG_ON(ret);
if (!need_ref_seq(for_cow, ref_root) && if (!need_ref_seq(for_cow, ref_root) &&
waitqueue_active(&delayed_refs->seq_wait)) waitqueue_active(&delayed_refs->seq_wait))
wake_up(&delayed_refs->seq_wait); wake_up(&delayed_refs->seq_wait);
...@@ -685,7 +679,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, ...@@ -685,7 +679,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_data_ref *ref; struct btrfs_delayed_data_ref *ref;
struct btrfs_delayed_ref_head *head_ref; struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_root *delayed_refs;
int ret;
BUG_ON(extent_op && !extent_op->is_data); BUG_ON(extent_op && !extent_op->is_data);
ref = kmalloc(sizeof(*ref), GFP_NOFS); ref = kmalloc(sizeof(*ref), GFP_NOFS);
...@@ -707,14 +700,12 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, ...@@ -707,14 +700,12 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
* insert both the head node and the new ref without dropping * insert both the head node and the new ref without dropping
* the spin lock * the spin lock
*/ */
ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr, add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
num_bytes, action, 1); num_bytes, action, 1);
BUG_ON(ret);
ret = add_delayed_data_ref(fs_info, trans, &ref->node, bytenr, add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
num_bytes, parent, ref_root, owner, offset, num_bytes, parent, ref_root, owner, offset,
action, for_cow); action, for_cow);
BUG_ON(ret);
if (!need_ref_seq(for_cow, ref_root) && if (!need_ref_seq(for_cow, ref_root) &&
waitqueue_active(&delayed_refs->seq_wait)) waitqueue_active(&delayed_refs->seq_wait))
wake_up(&delayed_refs->seq_wait); wake_up(&delayed_refs->seq_wait);
...@@ -729,7 +720,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, ...@@ -729,7 +720,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
{ {
struct btrfs_delayed_ref_head *head_ref; struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_root *delayed_refs;
int ret;
head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS); head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
if (!head_ref) if (!head_ref)
...@@ -740,10 +730,9 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, ...@@ -740,10 +730,9 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
delayed_refs = &trans->transaction->delayed_refs; delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock); spin_lock(&delayed_refs->lock);
ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr, add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
num_bytes, BTRFS_UPDATE_DELAYED_HEAD, num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
extent_op->is_data); extent_op->is_data);
BUG_ON(ret);
if (waitqueue_active(&delayed_refs->seq_wait)) if (waitqueue_active(&delayed_refs->seq_wait))
wake_up(&delayed_refs->seq_wait); wake_up(&delayed_refs->seq_wait);
......
...@@ -49,9 +49,8 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle ...@@ -49,9 +49,8 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
di = btrfs_match_dir_item_name(root, path, name, name_len); di = btrfs_match_dir_item_name(root, path, name, name_len);
if (di) if (di)
return ERR_PTR(-EEXIST); return ERR_PTR(-EEXIST);
ret = btrfs_extend_item(trans, root, path, data_size); btrfs_extend_item(trans, root, path, data_size);
} } else if (ret < 0)
if (ret < 0)
return ERR_PTR(ret); return ERR_PTR(ret);
WARN_ON(ret > 0); WARN_ON(ret > 0);
leaf = path->nodes[0]; leaf = path->nodes[0];
...@@ -116,6 +115,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, ...@@ -116,6 +115,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
* 'location' is the key to stuff into the directory item, 'type' is the * 'location' is the key to stuff into the directory item, 'type' is the
* type of the inode we're pointing to, and 'index' is the sequence number * type of the inode we're pointing to, and 'index' is the sequence number
* to use for the second index (if one is created). * to use for the second index (if one is created).
* Will return 0 or -ENOMEM
*/ */
int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
*root, const char *name, int name_len, *root, const char *name, int name_len,
...@@ -383,7 +383,7 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans, ...@@ -383,7 +383,7 @@ int btrfs_delete_one_dir_name(struct btrfs_trans_handle *trans,
start = btrfs_item_ptr_offset(leaf, path->slots[0]); start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len, memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_len - (ptr + sub_item_len - start)); item_len - (ptr + sub_item_len - start));
ret = btrfs_truncate_item(trans, root, path, btrfs_truncate_item(trans, root, path,
item_len - sub_item_len, 1); item_len - sub_item_len, 1);
} }
return ret; return ret;
......
This diff is collapsed.
...@@ -44,7 +44,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize, ...@@ -44,7 +44,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
int mirror_num, struct extent_buffer **eb); int mirror_num, struct extent_buffer **eb);
struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
u64 bytenr, u32 blocksize); u64 bytenr, u32 blocksize);
int clean_tree_block(struct btrfs_trans_handle *trans, void clean_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *buf); struct btrfs_root *root, struct extent_buffer *buf);
int open_ctree(struct super_block *sb, int open_ctree(struct super_block *sb,
struct btrfs_fs_devices *fs_devices, struct btrfs_fs_devices *fs_devices,
...@@ -64,7 +64,7 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, ...@@ -64,7 +64,7 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info); int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root); void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
void btrfs_mark_buffer_dirty(struct extent_buffer *buf); void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid); int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid);
int btrfs_set_buffer_uptodate(struct extent_buffer *buf); int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
...@@ -85,6 +85,10 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, ...@@ -85,6 +85,10 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info); struct btrfs_fs_info *fs_info);
int btrfs_add_log_tree(struct btrfs_trans_handle *trans, int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root); struct btrfs_root *root);
int btrfs_cleanup_transaction(struct btrfs_root *root);
void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
struct btrfs_root *root);
void btrfs_abort_devices(struct btrfs_root *root);
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
void btrfs_init_lockdep(void); void btrfs_init_lockdep(void);
......
...@@ -193,7 +193,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child) ...@@ -193,7 +193,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
if (ret < 0) if (ret < 0)
goto fail; goto fail;
BUG_ON(ret == 0); BUG_ON(ret == 0); /* Key with offset of -1 found */
if (path->slots[0] == 0) { if (path->slots[0] == 0) {
ret = -ENOENT; ret = -ENOENT;
goto fail; goto fail;
......
This diff is collapsed.
This diff is collapsed.
...@@ -35,6 +35,10 @@ ...@@ -35,6 +35,10 @@
#define EXTENT_BUFFER_DIRTY 2 #define EXTENT_BUFFER_DIRTY 2
#define EXTENT_BUFFER_CORRUPT 3 #define EXTENT_BUFFER_CORRUPT 3
#define EXTENT_BUFFER_READAHEAD 4 /* this got triggered by readahead */ #define EXTENT_BUFFER_READAHEAD 4 /* this got triggered by readahead */
#define EXTENT_BUFFER_TREE_REF 5
#define EXTENT_BUFFER_STALE 6
#define EXTENT_BUFFER_WRITEBACK 7
#define EXTENT_BUFFER_IOERR 8
/* these are flags for extent_clear_unlock_delalloc */ /* these are flags for extent_clear_unlock_delalloc */
#define EXTENT_CLEAR_UNLOCK_PAGE 0x1 #define EXTENT_CLEAR_UNLOCK_PAGE 0x1
...@@ -54,6 +58,7 @@ ...@@ -54,6 +58,7 @@
#define EXTENT_PAGE_PRIVATE_FIRST_PAGE 3 #define EXTENT_PAGE_PRIVATE_FIRST_PAGE 3
struct extent_state; struct extent_state;
struct btrfs_root;
typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw, typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
struct bio *bio, int mirror_num, struct bio *bio, int mirror_num,
...@@ -69,9 +74,7 @@ struct extent_io_ops { ...@@ -69,9 +74,7 @@ struct extent_io_ops {
size_t size, struct bio *bio, size_t size, struct bio *bio,
unsigned long bio_flags); unsigned long bio_flags);
int (*readpage_io_hook)(struct page *page, u64 start, u64 end); int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
int (*readpage_io_failed_hook)(struct bio *bio, struct page *page, int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
u64 start, u64 end, int failed_mirror,
struct extent_state *state);
int (*writepage_io_failed_hook)(struct bio *bio, struct page *page, int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
u64 start, u64 end, u64 start, u64 end,
struct extent_state *state); struct extent_state *state);
...@@ -97,6 +100,7 @@ struct extent_io_tree { ...@@ -97,6 +100,7 @@ struct extent_io_tree {
struct radix_tree_root buffer; struct radix_tree_root buffer;
struct address_space *mapping; struct address_space *mapping;
u64 dirty_bytes; u64 dirty_bytes;
int track_uptodate;
spinlock_t lock; spinlock_t lock;
spinlock_t buffer_lock; spinlock_t buffer_lock;
struct extent_io_ops *ops; struct extent_io_ops *ops;
...@@ -119,16 +123,21 @@ struct extent_state { ...@@ -119,16 +123,21 @@ struct extent_state {
struct list_head leak_list; struct list_head leak_list;
}; };
#define INLINE_EXTENT_BUFFER_PAGES 16
#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_CACHE_SIZE)
struct extent_buffer { struct extent_buffer {
u64 start; u64 start;
unsigned long len; unsigned long len;
unsigned long map_start; unsigned long map_start;
unsigned long map_len; unsigned long map_len;
struct page *first_page;
unsigned long bflags; unsigned long bflags;
struct extent_io_tree *tree;
spinlock_t refs_lock;
atomic_t refs;
atomic_t io_pages;
int failed_mirror;
struct list_head leak_list; struct list_head leak_list;
struct rcu_head rcu_head; struct rcu_head rcu_head;
atomic_t refs;
pid_t lock_owner; pid_t lock_owner;
/* count of read lock holders on the extent buffer */ /* count of read lock holders on the extent buffer */
...@@ -152,6 +161,9 @@ struct extent_buffer { ...@@ -152,6 +161,9 @@ struct extent_buffer {
* to unlock * to unlock
*/ */
wait_queue_head_t read_lock_wq; wait_queue_head_t read_lock_wq;
wait_queue_head_t lock_wq;
struct page *inline_pages[INLINE_EXTENT_BUFFER_PAGES];
struct page **pages;
}; };
static inline void extent_set_compress_type(unsigned long *bio_flags, static inline void extent_set_compress_type(unsigned long *bio_flags,
...@@ -178,18 +190,17 @@ void extent_io_tree_init(struct extent_io_tree *tree, ...@@ -178,18 +190,17 @@ void extent_io_tree_init(struct extent_io_tree *tree,
int try_release_extent_mapping(struct extent_map_tree *map, int try_release_extent_mapping(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page, struct extent_io_tree *tree, struct page *page,
gfp_t mask); gfp_t mask);
int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page); int try_release_extent_buffer(struct page *page, gfp_t mask);
int try_release_extent_state(struct extent_map_tree *map, int try_release_extent_state(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page, struct extent_io_tree *tree, struct page *page,
gfp_t mask); gfp_t mask);
int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
int bits, struct extent_state **cached, gfp_t mask); int bits, struct extent_state **cached);
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end);
int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached, gfp_t mask); struct extent_state **cached, gfp_t mask);
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
gfp_t mask);
int extent_read_full_page(struct extent_io_tree *tree, struct page *page, int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
get_extent_t *get_extent, int mirror_num); get_extent_t *get_extent, int mirror_num);
int __init extent_io_init(void); int __init extent_io_init(void);
...@@ -210,7 +221,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -210,7 +221,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
int bits, gfp_t mask); int bits, gfp_t mask);
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
int bits, int exclusive_bits, u64 *failed_start, int bits, u64 *failed_start,
struct extent_state **cached_state, gfp_t mask); struct extent_state **cached_state, gfp_t mask);
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached_state, gfp_t mask); struct extent_state **cached_state, gfp_t mask);
...@@ -240,6 +251,8 @@ int extent_writepages(struct extent_io_tree *tree, ...@@ -240,6 +251,8 @@ int extent_writepages(struct extent_io_tree *tree,
struct address_space *mapping, struct address_space *mapping,
get_extent_t *get_extent, get_extent_t *get_extent,
struct writeback_control *wbc); struct writeback_control *wbc);
int btree_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc);
int extent_readpages(struct extent_io_tree *tree, int extent_readpages(struct extent_io_tree *tree,
struct address_space *mapping, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages, struct list_head *pages, unsigned nr_pages,
...@@ -251,11 +264,11 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private); ...@@ -251,11 +264,11 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
void set_page_extent_mapped(struct page *page); void set_page_extent_mapped(struct page *page);
struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
u64 start, unsigned long len, u64 start, unsigned long len);
struct page *page0);
struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
u64 start, unsigned long len); u64 start, unsigned long len);
void free_extent_buffer(struct extent_buffer *eb); void free_extent_buffer(struct extent_buffer *eb);
void free_extent_buffer_stale(struct extent_buffer *eb);
#define WAIT_NONE 0 #define WAIT_NONE 0
#define WAIT_COMPLETE 1 #define WAIT_COMPLETE 1
#define WAIT_PAGE_LOCK 2 #define WAIT_PAGE_LOCK 2
...@@ -287,19 +300,12 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, ...@@ -287,19 +300,12 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_offset, unsigned long len); unsigned long src_offset, unsigned long len);
void memset_extent_buffer(struct extent_buffer *eb, char c, void memset_extent_buffer(struct extent_buffer *eb, char c,
unsigned long start, unsigned long len); unsigned long start, unsigned long len);
int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits); void wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits);
int clear_extent_buffer_dirty(struct extent_io_tree *tree, void clear_extent_buffer_dirty(struct extent_buffer *eb);
struct extent_buffer *eb); int set_extent_buffer_dirty(struct extent_buffer *eb);
int set_extent_buffer_dirty(struct extent_io_tree *tree, int set_extent_buffer_uptodate(struct extent_buffer *eb);
struct extent_buffer *eb); int clear_extent_buffer_uptodate(struct extent_buffer *eb);
int set_extent_buffer_uptodate(struct extent_io_tree *tree, int extent_buffer_uptodate(struct extent_buffer *eb);
struct extent_buffer *eb);
int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
struct extent_buffer *eb,
struct extent_state **cached_state);
int extent_buffer_uptodate(struct extent_io_tree *tree,
struct extent_buffer *eb,
struct extent_state *cached_state);
int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset, int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
unsigned long min_len, char **map, unsigned long min_len, char **map,
unsigned long *map_start, unsigned long *map_start,
...@@ -320,4 +326,6 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start, ...@@ -320,4 +326,6 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
u64 length, u64 logical, struct page *page, u64 length, u64 logical, struct page *page,
int mirror_num); int mirror_num);
int end_extent_writepage(struct page *page, int err, u64 start, u64 end); int end_extent_writepage(struct page *page, int err, u64 start, u64 end);
int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
int mirror_num);
#endif #endif
...@@ -25,10 +25,12 @@ ...@@ -25,10 +25,12 @@
#include "transaction.h" #include "transaction.h"
#include "print-tree.h" #include "print-tree.h"
#define MAX_CSUM_ITEMS(r, size) ((((BTRFS_LEAF_DATA_SIZE(r) - \ #define __MAX_CSUM_ITEMS(r, size) ((((BTRFS_LEAF_DATA_SIZE(r) - \
sizeof(struct btrfs_item) * 2) / \ sizeof(struct btrfs_item) * 2) / \
size) - 1)) size) - 1))
#define MAX_CSUM_ITEMS(r, size) (min(__MAX_CSUM_ITEMS(r, size), PAGE_CACHE_SIZE))
#define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \ #define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
sizeof(struct btrfs_ordered_sum)) / \ sizeof(struct btrfs_ordered_sum)) / \
sizeof(struct btrfs_sector_sum) * \ sizeof(struct btrfs_sector_sum) * \
...@@ -59,7 +61,7 @@ int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, ...@@ -59,7 +61,7 @@ int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
sizeof(*item)); sizeof(*item));
if (ret < 0) if (ret < 0)
goto out; goto out;
BUG_ON(ret); BUG_ON(ret); /* Can't happen */
leaf = path->nodes[0]; leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item); struct btrfs_file_extent_item);
...@@ -284,6 +286,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, ...@@ -284,6 +286,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
struct btrfs_ordered_sum *sums; struct btrfs_ordered_sum *sums;
struct btrfs_sector_sum *sector_sum; struct btrfs_sector_sum *sector_sum;
struct btrfs_csum_item *item; struct btrfs_csum_item *item;
LIST_HEAD(tmplist);
unsigned long offset; unsigned long offset;
int ret; int ret;
size_t size; size_t size;
...@@ -358,7 +361,10 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, ...@@ -358,7 +361,10 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
MAX_ORDERED_SUM_BYTES(root)); MAX_ORDERED_SUM_BYTES(root));
sums = kzalloc(btrfs_ordered_sum_size(root, size), sums = kzalloc(btrfs_ordered_sum_size(root, size),
GFP_NOFS); GFP_NOFS);
BUG_ON(!sums); if (!sums) {
ret = -ENOMEM;
goto fail;
}
sector_sum = sums->sums; sector_sum = sums->sums;
sums->bytenr = start; sums->bytenr = start;
...@@ -380,12 +386,19 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, ...@@ -380,12 +386,19 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
offset += csum_size; offset += csum_size;
sector_sum++; sector_sum++;
} }
list_add_tail(&sums->list, list); list_add_tail(&sums->list, &tmplist);
} }
path->slots[0]++; path->slots[0]++;
} }
ret = 0; ret = 0;
fail: fail:
while (ret < 0 && !list_empty(&tmplist)) {
sums = list_entry(&tmplist, struct btrfs_ordered_sum, list);
list_del(&sums->list);
kfree(sums);
}
list_splice_tail(&tmplist, list);
btrfs_free_path(path); btrfs_free_path(path);
return ret; return ret;
} }
...@@ -420,7 +433,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, ...@@ -420,7 +433,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
offset = page_offset(bvec->bv_page) + bvec->bv_offset; offset = page_offset(bvec->bv_page) + bvec->bv_offset;
ordered = btrfs_lookup_ordered_extent(inode, offset); ordered = btrfs_lookup_ordered_extent(inode, offset);
BUG_ON(!ordered); BUG_ON(!ordered); /* Logic error */
sums->bytenr = ordered->start; sums->bytenr = ordered->start;
while (bio_index < bio->bi_vcnt) { while (bio_index < bio->bi_vcnt) {
...@@ -439,11 +452,11 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, ...@@ -439,11 +452,11 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left), sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
GFP_NOFS); GFP_NOFS);
BUG_ON(!sums); BUG_ON(!sums); /* -ENOMEM */
sector_sum = sums->sums; sector_sum = sums->sums;
sums->len = bytes_left; sums->len = bytes_left;
ordered = btrfs_lookup_ordered_extent(inode, offset); ordered = btrfs_lookup_ordered_extent(inode, offset);
BUG_ON(!ordered); BUG_ON(!ordered); /* Logic error */
sums->bytenr = ordered->start; sums->bytenr = ordered->start;
} }
...@@ -483,7 +496,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, ...@@ -483,7 +496,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
* This calls btrfs_truncate_item with the correct args based on the * This calls btrfs_truncate_item with the correct args based on the
* overlap, and fixes up the key as required. * overlap, and fixes up the key as required.
*/ */
static noinline int truncate_one_csum(struct btrfs_trans_handle *trans, static noinline void truncate_one_csum(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct btrfs_path *path, struct btrfs_path *path,
struct btrfs_key *key, struct btrfs_key *key,
...@@ -494,7 +507,6 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans, ...@@ -494,7 +507,6 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,
u64 csum_end; u64 csum_end;
u64 end_byte = bytenr + len; u64 end_byte = bytenr + len;
u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits; u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits;
int ret;
leaf = path->nodes[0]; leaf = path->nodes[0];
csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size; csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
...@@ -510,7 +522,7 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans, ...@@ -510,7 +522,7 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,
*/ */
u32 new_size = (bytenr - key->offset) >> blocksize_bits; u32 new_size = (bytenr - key->offset) >> blocksize_bits;
new_size *= csum_size; new_size *= csum_size;
ret = btrfs_truncate_item(trans, root, path, new_size, 1); btrfs_truncate_item(trans, root, path, new_size, 1);
} else if (key->offset >= bytenr && csum_end > end_byte && } else if (key->offset >= bytenr && csum_end > end_byte &&
end_byte > key->offset) { end_byte > key->offset) {
/* /*
...@@ -522,15 +534,13 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans, ...@@ -522,15 +534,13 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,
u32 new_size = (csum_end - end_byte) >> blocksize_bits; u32 new_size = (csum_end - end_byte) >> blocksize_bits;
new_size *= csum_size; new_size *= csum_size;
ret = btrfs_truncate_item(trans, root, path, new_size, 0); btrfs_truncate_item(trans, root, path, new_size, 0);
key->offset = end_byte; key->offset = end_byte;
ret = btrfs_set_item_key_safe(trans, root, path, key); btrfs_set_item_key_safe(trans, root, path, key);
BUG_ON(ret);
} else { } else {
BUG(); BUG();
} }
return 0;
} }
/* /*
...@@ -635,13 +645,14 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, ...@@ -635,13 +645,14 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
* item changed size or key * item changed size or key
*/ */
ret = btrfs_split_item(trans, root, path, &key, offset); ret = btrfs_split_item(trans, root, path, &key, offset);
BUG_ON(ret && ret != -EAGAIN); if (ret && ret != -EAGAIN) {
btrfs_abort_transaction(trans, root, ret);
goto out;
}
key.offset = end_byte - 1; key.offset = end_byte - 1;
} else { } else {
ret = truncate_one_csum(trans, root, path, truncate_one_csum(trans, root, path, &key, bytenr, len);
&key, bytenr, len);
BUG_ON(ret);
if (key.offset < bytenr) if (key.offset < bytenr)
break; break;
} }
...@@ -772,7 +783,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, ...@@ -772,7 +783,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
if (diff != csum_size) if (diff != csum_size)
goto insert; goto insert;
ret = btrfs_extend_item(trans, root, path, diff); btrfs_extend_item(trans, root, path, diff);
goto csum; goto csum;
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include "ctree.h" #include "ctree.h"
#include "disk-io.h" #include "disk-io.h"
#include "transaction.h" #include "transaction.h"
#include "print-tree.h"
static int find_name_in_backref(struct btrfs_path *path, const char *name, static int find_name_in_backref(struct btrfs_path *path, const char *name,
int name_len, struct btrfs_inode_ref **ref_ret) int name_len, struct btrfs_inode_ref **ref_ret)
...@@ -128,13 +129,14 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, ...@@ -128,13 +129,14 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
item_start = btrfs_item_ptr_offset(leaf, path->slots[0]); item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len, memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_size - (ptr + sub_item_len - item_start)); item_size - (ptr + sub_item_len - item_start));
ret = btrfs_truncate_item(trans, root, path, btrfs_truncate_item(trans, root, path,
item_size - sub_item_len, 1); item_size - sub_item_len, 1);
out: out:
btrfs_free_path(path); btrfs_free_path(path);
return ret; return ret;
} }
/* Will return 0, -ENOMEM, -EMLINK, or -EEXIST or anything from the CoW path */
int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
const char *name, int name_len, const char *name, int name_len,
...@@ -165,7 +167,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, ...@@ -165,7 +167,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
goto out; goto out;
old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
ret = btrfs_extend_item(trans, root, path, ins_len); btrfs_extend_item(trans, root, path, ins_len);
ref = btrfs_item_ptr(path->nodes[0], path->slots[0], ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_ref); struct btrfs_inode_ref);
ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size); ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment