Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
f0f76413
Commit
f0f76413
authored
Dec 23, 2015
by
Chris Mason
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'freespace-4.5' into for-linus-4.5
parents
a53fe257
f7d3d2f9
Changes
16
Show whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
2934 additions
and
113 deletions
+2934
-113
fs/btrfs/Makefile
fs/btrfs/Makefile
+3
-2
fs/btrfs/ctree.h
fs/btrfs/ctree.h
+155
-2
fs/btrfs/disk-io.c
fs/btrfs/disk-io.c
+38
-0
fs/btrfs/extent-tree.c
fs/btrfs/extent-tree.c
+71
-30
fs/btrfs/extent_io.c
fs/btrfs/extent_io.c
+170
-13
fs/btrfs/extent_io.h
fs/btrfs/extent_io.h
+9
-1
fs/btrfs/free-space-tree.c
fs/btrfs/free-space-tree.c
+1584
-0
fs/btrfs/free-space-tree.h
fs/btrfs/free-space-tree.h
+72
-0
fs/btrfs/super.c
fs/btrfs/super.c
+47
-9
fs/btrfs/tests/btrfs-tests.c
fs/btrfs/tests/btrfs-tests.c
+58
-0
fs/btrfs/tests/btrfs-tests.h
fs/btrfs/tests/btrfs-tests.h
+10
-0
fs/btrfs/tests/extent-io-tests.c
fs/btrfs/tests/extent-io-tests.c
+136
-2
fs/btrfs/tests/free-space-tests.c
fs/btrfs/tests/free-space-tests.c
+2
-39
fs/btrfs/tests/free-space-tree-tests.c
fs/btrfs/tests/free-space-tree-tests.c
+571
-0
fs/btrfs/tests/qgroup-tests.c
fs/btrfs/tests/qgroup-tests.c
+6
-14
include/trace/events/btrfs.h
include/trace/events/btrfs.h
+2
-1
No files found.
fs/btrfs/Makefile
View file @
f0f76413
...
...
@@ -9,11 +9,12 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
export.o tree-log.o free-space-cache.o zlib.o lzo.o
\
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o
\
reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o
\
uuid-tree.o props.o hash.o
uuid-tree.o props.o hash.o
free-space-tree.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL)
+=
acl.o
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY)
+=
check-integrity.o
btrfs-$(CONFIG_BTRFS_FS_RUN_SANITY_TESTS)
+=
tests/free-space-tests.o
\
tests/extent-buffer-tests.o tests/btrfs-tests.o
\
tests/extent-io-tests.o tests/inode-tests.o tests/qgroup-tests.o
tests/extent-io-tests.o tests/inode-tests.o tests/qgroup-tests.o
\
tests/free-space-tree-tests.o
fs/btrfs/ctree.h
View file @
f0f76413
...
...
@@ -96,6 +96,9 @@ struct btrfs_ordered_sum;
/* for storing items that use the BTRFS_UUID_KEY* types */
#define BTRFS_UUID_TREE_OBJECTID 9ULL
/* tracks free space in block groups. */
#define BTRFS_FREE_SPACE_TREE_OBJECTID 10ULL
/* for storing balance parameters in the root tree */
#define BTRFS_BALANCE_OBJECTID -4ULL
...
...
@@ -500,6 +503,8 @@ struct btrfs_super_block {
* Compat flags that we support. If any incompat flags are set other than the
* ones specified below then we will fail to mount
*/
#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE (1ULL << 0)
#define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0)
#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1)
#define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2)
...
...
@@ -526,7 +531,10 @@ struct btrfs_super_block {
#define BTRFS_FEATURE_COMPAT_SUPP 0ULL
#define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL
#define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL
#define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL
#define BTRFS_FEATURE_COMPAT_RO_SUPP \
(BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE)
#define BTRFS_FEATURE_COMPAT_RO_SAFE_SET 0ULL
#define BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR 0ULL
...
...
@@ -1088,6 +1096,13 @@ struct btrfs_block_group_item {
__le64
flags
;
}
__attribute__
((
__packed__
));
struct
btrfs_free_space_info
{
__le32
extent_count
;
__le32
flags
;
}
__attribute__
((
__packed__
));
#define BTRFS_FREE_SPACE_USING_BITMAPS (1ULL << 0)
#define BTRFS_QGROUP_LEVEL_SHIFT 48
static
inline
u64
btrfs_qgroup_level
(
u64
qgroupid
)
{
...
...
@@ -1296,6 +1311,9 @@ struct btrfs_caching_control {
atomic_t
count
;
};
/* Once caching_thread() finds this much free space, it will wake up waiters. */
#define CACHING_CTL_WAKE_UP (1024 * 1024 * 2)
struct
btrfs_io_ctl
{
void
*
cur
,
*
orig
;
struct
page
*
page
;
...
...
@@ -1321,8 +1339,20 @@ struct btrfs_block_group_cache {
u64
delalloc_bytes
;
u64
bytes_super
;
u64
flags
;
u64
sectorsize
;
u64
cache_generation
;
u32
sectorsize
;
/*
* If the free space extent count exceeds this number, convert the block
* group to bitmaps.
*/
u32
bitmap_high_thresh
;
/*
* If the free space extent count drops below this number, convert the
* block group back to extents.
*/
u32
bitmap_low_thresh
;
/*
* It is just used for the delayed data space allocation because
...
...
@@ -1378,6 +1408,15 @@ struct btrfs_block_group_cache {
struct
list_head
io_list
;
struct
btrfs_io_ctl
io_ctl
;
/* Lock for free space tree operations. */
struct
mutex
free_space_lock
;
/*
* Does the block group need to be added to the free space tree?
* Protected by free_space_lock.
*/
int
needs_free_space
;
};
/* delayed seq elem */
...
...
@@ -1429,6 +1468,7 @@ struct btrfs_fs_info {
struct
btrfs_root
*
csum_root
;
struct
btrfs_root
*
quota_root
;
struct
btrfs_root
*
uuid_root
;
struct
btrfs_root
*
free_space_root
;
/* the log root tree is a directory of all the other log roots */
struct
btrfs_root
*
log_root_tree
;
...
...
@@ -2092,6 +2132,27 @@ struct btrfs_ioctl_defrag_range_args {
*/
#define BTRFS_BLOCK_GROUP_ITEM_KEY 192
/*
* Every block group is represented in the free space tree by a free space info
* item, which stores some accounting information. It is keyed on
* (block_group_start, FREE_SPACE_INFO, block_group_length).
*/
#define BTRFS_FREE_SPACE_INFO_KEY 198
/*
* A free space extent tracks an extent of space that is free in a block group.
* It is keyed on (start, FREE_SPACE_EXTENT, length).
*/
#define BTRFS_FREE_SPACE_EXTENT_KEY 199
/*
* When a block group becomes very fragmented, we convert it to use bitmaps
* instead of extents. A free space bitmap is keyed on
* (start, FREE_SPACE_BITMAP, length); the corresponding item is a bitmap with
* (length / sectorsize) bits.
*/
#define BTRFS_FREE_SPACE_BITMAP_KEY 200
#define BTRFS_DEV_EXTENT_KEY 204
#define BTRFS_DEV_ITEM_KEY 216
#define BTRFS_CHUNK_ITEM_KEY 228
...
...
@@ -2184,6 +2245,7 @@ struct btrfs_ioctl_defrag_range_args {
#define BTRFS_MOUNT_RESCAN_UUID_TREE (1 << 23)
#define BTRFS_MOUNT_FRAGMENT_DATA (1 << 24)
#define BTRFS_MOUNT_FRAGMENT_METADATA (1 << 25)
#define BTRFS_MOUNT_FREE_SPACE_TREE (1 << 26)
#define BTRFS_DEFAULT_COMMIT_INTERVAL (30)
#define BTRFS_DEFAULT_MAX_INLINE (8192)
...
...
@@ -2506,6 +2568,11 @@ BTRFS_SETGET_FUNCS(disk_block_group_flags,
BTRFS_SETGET_STACK_FUNCS
(
block_group_flags
,
struct
btrfs_block_group_item
,
flags
,
64
);
/* struct btrfs_free_space_info */
BTRFS_SETGET_FUNCS
(
free_space_extent_count
,
struct
btrfs_free_space_info
,
extent_count
,
32
);
BTRFS_SETGET_FUNCS
(
free_space_flags
,
struct
btrfs_free_space_info
,
flags
,
32
);
/* struct btrfs_inode_ref */
BTRFS_SETGET_FUNCS
(
inode_ref_name_len
,
struct
btrfs_inode_ref
,
name_len
,
16
);
BTRFS_SETGET_FUNCS
(
inode_ref_index
,
struct
btrfs_inode_ref
,
index
,
64
);
...
...
@@ -3573,6 +3640,9 @@ void btrfs_end_write_no_snapshoting(struct btrfs_root *root);
void
check_system_chunk
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_root
*
root
,
const
u64
type
);
u64
add_new_free_space
(
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_fs_info
*
info
,
u64
start
,
u64
end
);
/* ctree.c */
int
btrfs_bin_search
(
struct
extent_buffer
*
eb
,
struct
btrfs_key
*
key
,
int
level
,
int
*
slot
);
...
...
@@ -3737,6 +3807,7 @@ static inline void free_fs_info(struct btrfs_fs_info *fs_info)
kfree
(
fs_info
->
csum_root
);
kfree
(
fs_info
->
quota_root
);
kfree
(
fs_info
->
uuid_root
);
kfree
(
fs_info
->
free_space_root
);
kfree
(
fs_info
->
super_copy
);
kfree
(
fs_info
->
super_for_commit
);
security_free_mnt_opts
(
&
fs_info
->
security_opts
);
...
...
@@ -4246,6 +4317,30 @@ static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info,
}
}
#define btrfs_clear_fs_incompat(__fs_info, opt) \
__btrfs_clear_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt)
static
inline
void
__btrfs_clear_fs_incompat
(
struct
btrfs_fs_info
*
fs_info
,
u64
flag
)
{
struct
btrfs_super_block
*
disk_super
;
u64
features
;
disk_super
=
fs_info
->
super_copy
;
features
=
btrfs_super_incompat_flags
(
disk_super
);
if
(
features
&
flag
)
{
spin_lock
(
&
fs_info
->
super_lock
);
features
=
btrfs_super_incompat_flags
(
disk_super
);
if
(
features
&
flag
)
{
features
&=
~
flag
;
btrfs_set_super_incompat_flags
(
disk_super
,
features
);
btrfs_info
(
fs_info
,
"clearing %llu feature flag"
,
flag
);
}
spin_unlock
(
&
fs_info
->
super_lock
);
}
}
#define btrfs_fs_incompat(fs_info, opt) \
__btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt)
...
...
@@ -4256,6 +4351,64 @@ static inline int __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag)
return
!!
(
btrfs_super_incompat_flags
(
disk_super
)
&
flag
);
}
#define btrfs_set_fs_compat_ro(__fs_info, opt) \
__btrfs_set_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt)
static
inline
void
__btrfs_set_fs_compat_ro
(
struct
btrfs_fs_info
*
fs_info
,
u64
flag
)
{
struct
btrfs_super_block
*
disk_super
;
u64
features
;
disk_super
=
fs_info
->
super_copy
;
features
=
btrfs_super_compat_ro_flags
(
disk_super
);
if
(
!
(
features
&
flag
))
{
spin_lock
(
&
fs_info
->
super_lock
);
features
=
btrfs_super_compat_ro_flags
(
disk_super
);
if
(
!
(
features
&
flag
))
{
features
|=
flag
;
btrfs_set_super_compat_ro_flags
(
disk_super
,
features
);
btrfs_info
(
fs_info
,
"setting %llu ro feature flag"
,
flag
);
}
spin_unlock
(
&
fs_info
->
super_lock
);
}
}
#define btrfs_clear_fs_compat_ro(__fs_info, opt) \
__btrfs_clear_fs_compat_ro((__fs_info), BTRFS_FEATURE_COMPAT_RO_##opt)
static
inline
void
__btrfs_clear_fs_compat_ro
(
struct
btrfs_fs_info
*
fs_info
,
u64
flag
)
{
struct
btrfs_super_block
*
disk_super
;
u64
features
;
disk_super
=
fs_info
->
super_copy
;
features
=
btrfs_super_compat_ro_flags
(
disk_super
);
if
(
features
&
flag
)
{
spin_lock
(
&
fs_info
->
super_lock
);
features
=
btrfs_super_compat_ro_flags
(
disk_super
);
if
(
features
&
flag
)
{
features
&=
~
flag
;
btrfs_set_super_compat_ro_flags
(
disk_super
,
features
);
btrfs_info
(
fs_info
,
"clearing %llu ro feature flag"
,
flag
);
}
spin_unlock
(
&
fs_info
->
super_lock
);
}
}
#define btrfs_fs_compat_ro(fs_info, opt) \
__btrfs_fs_compat_ro((fs_info), BTRFS_FEATURE_COMPAT_RO_##opt)
static
inline
int
__btrfs_fs_compat_ro
(
struct
btrfs_fs_info
*
fs_info
,
u64
flag
)
{
struct
btrfs_super_block
*
disk_super
;
disk_super
=
fs_info
->
super_copy
;
return
!!
(
btrfs_super_compat_ro_flags
(
disk_super
)
&
flag
);
}
/*
* Call btrfs_abort_transaction as early as possible when an error condition is
* detected, that way the exact line number is reported.
...
...
fs/btrfs/disk-io.c
View file @
f0f76413
...
...
@@ -42,6 +42,7 @@
#include "locking.h"
#include "tree-log.h"
#include "free-space-cache.h"
#include "free-space-tree.h"
#include "inode-map.h"
#include "check-integrity.h"
#include "rcu-string.h"
...
...
@@ -1650,6 +1651,9 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
if
(
location
->
objectid
==
BTRFS_UUID_TREE_OBJECTID
)
return
fs_info
->
uuid_root
?
fs_info
->
uuid_root
:
ERR_PTR
(
-
ENOENT
);
if
(
location
->
objectid
==
BTRFS_FREE_SPACE_TREE_OBJECTID
)
return
fs_info
->
free_space_root
?
fs_info
->
free_space_root
:
ERR_PTR
(
-
ENOENT
);
again:
root
=
btrfs_lookup_fs_root
(
fs_info
,
location
->
objectid
);
if
(
root
)
{
...
...
@@ -2148,6 +2152,7 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
free_root_extent_buffers
(
info
->
uuid_root
);
if
(
chunk_root
)
free_root_extent_buffers
(
info
->
chunk_root
);
free_root_extent_buffers
(
info
->
free_space_root
);
}
void
btrfs_free_fs_roots
(
struct
btrfs_fs_info
*
fs_info
)
...
...
@@ -2448,6 +2453,15 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info,
fs_info
->
uuid_root
=
root
;
}
if
(
btrfs_fs_compat_ro
(
fs_info
,
FREE_SPACE_TREE
))
{
location
.
objectid
=
BTRFS_FREE_SPACE_TREE_OBJECTID
;
root
=
btrfs_read_tree_root
(
tree_root
,
&
location
);
if
(
IS_ERR
(
root
))
return
PTR_ERR
(
root
);
set_bit
(
BTRFS_ROOT_TRACK_DIRTY
,
&
root
->
state
);
fs_info
->
free_space_root
=
root
;
}
return
0
;
}
...
...
@@ -3076,6 +3090,30 @@ int open_ctree(struct super_block *sb,
btrfs_qgroup_rescan_resume
(
fs_info
);
if
(
btrfs_test_opt
(
tree_root
,
CLEAR_CACHE
)
&&
btrfs_fs_compat_ro
(
fs_info
,
FREE_SPACE_TREE
))
{
pr_info
(
"BTRFS: clearing free space tree
\n
"
);
ret
=
btrfs_clear_free_space_tree
(
fs_info
);
if
(
ret
)
{
pr_warn
(
"BTRFS: failed to clear free space tree %d
\n
"
,
ret
);
close_ctree
(
tree_root
);
return
ret
;
}
}
if
(
btrfs_test_opt
(
tree_root
,
FREE_SPACE_TREE
)
&&
!
btrfs_fs_compat_ro
(
fs_info
,
FREE_SPACE_TREE
))
{
pr_info
(
"BTRFS: creating free space tree
\n
"
);
ret
=
btrfs_create_free_space_tree
(
fs_info
);
if
(
ret
)
{
pr_warn
(
"BTRFS: failed to create free space tree %d
\n
"
,
ret
);
close_ctree
(
tree_root
);
return
ret
;
}
}
if
(
!
fs_info
->
uuid_root
)
{
pr_info
(
"BTRFS: creating UUID tree
\n
"
);
ret
=
btrfs_create_uuid_tree
(
fs_info
);
...
...
fs/btrfs/extent-tree.c
View file @
f0f76413
...
...
@@ -33,6 +33,7 @@
#include "raid56.h"
#include "locking.h"
#include "free-space-cache.h"
#include "free-space-tree.h"
#include "math.h"
#include "sysfs.h"
#include "qgroup.h"
...
...
@@ -357,7 +358,7 @@ static void fragment_free_space(struct btrfs_root *root,
* we need to check the pinned_extents for any extents that can't be used yet
* since their free space will be released as soon as the transaction commits.
*/
static
u64
add_new_free_space
(
struct
btrfs_block_group_cache
*
block_group
,
u64
add_new_free_space
(
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_fs_info
*
info
,
u64
start
,
u64
end
)
{
u64
extent_start
,
extent_end
,
size
,
total_added
=
0
;
...
...
@@ -395,11 +396,10 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
return
total_added
;
}
static
noinline
void
caching_thread
(
struct
btrfs_work
*
work
)
static
int
load_extent_tree_free
(
struct
btrfs_caching_control
*
caching_ctl
)
{
struct
btrfs_block_group_cache
*
block_group
;
struct
btrfs_fs_info
*
fs_info
;
struct
btrfs_caching_control
*
caching_ctl
;
struct
btrfs_root
*
extent_root
;
struct
btrfs_path
*
path
;
struct
extent_buffer
*
leaf
;
...
...
@@ -407,17 +407,16 @@ static noinline void caching_thread(struct btrfs_work *work)
u64
total_found
=
0
;
u64
last
=
0
;
u32
nritems
;
int
ret
=
-
ENOMEM
;
int
ret
;
bool
wakeup
=
true
;
caching_ctl
=
container_of
(
work
,
struct
btrfs_caching_control
,
work
);
block_group
=
caching_ctl
->
block_group
;
fs_info
=
block_group
->
fs_info
;
extent_root
=
fs_info
->
extent_root
;
path
=
btrfs_alloc_path
();
if
(
!
path
)
goto
out
;
return
-
ENOMEM
;
last
=
max_t
(
u64
,
block_group
->
key
.
objectid
,
BTRFS_SUPER_INFO_OFFSET
);
...
...
@@ -443,15 +442,11 @@ static noinline void caching_thread(struct btrfs_work *work)
key
.
objectid
=
last
;
key
.
offset
=
0
;
key
.
type
=
BTRFS_EXTENT_ITEM_KEY
;
again:
mutex_lock
(
&
caching_ctl
->
mutex
);
/* need to make sure the commit_root doesn't disappear */
down_read
(
&
fs_info
->
commit_root_sem
);
next:
ret
=
btrfs_search_slot
(
NULL
,
extent_root
,
&
key
,
path
,
0
,
0
);
if
(
ret
<
0
)
goto
err
;
goto
out
;
leaf
=
path
->
nodes
[
0
];
nritems
=
btrfs_header_nritems
(
leaf
);
...
...
@@ -477,12 +472,14 @@ static noinline void caching_thread(struct btrfs_work *work)
up_read
(
&
fs_info
->
commit_root_sem
);
mutex_unlock
(
&
caching_ctl
->
mutex
);
cond_resched
();
goto
again
;
mutex_lock
(
&
caching_ctl
->
mutex
);
down_read
(
&
fs_info
->
commit_root_sem
);
goto
next
;
}
ret
=
btrfs_next_leaf
(
extent_root
,
path
);
if
(
ret
<
0
)
goto
err
;
goto
out
;
if
(
ret
)
break
;
leaf
=
path
->
nodes
[
0
];
...
...
@@ -521,7 +518,7 @@ static noinline void caching_thread(struct btrfs_work *work)
else
last
=
key
.
objectid
+
key
.
offset
;
if
(
total_found
>
(
1024
*
1024
*
2
)
)
{
if
(
total_found
>
CACHING_CTL_WAKE_UP
)
{
total_found
=
0
;
if
(
wakeup
)
wake_up
(
&
caching_ctl
->
wait
);
...
...
@@ -534,9 +531,35 @@ static noinline void caching_thread(struct btrfs_work *work)
total_found
+=
add_new_free_space
(
block_group
,
fs_info
,
last
,
block_group
->
key
.
objectid
+
block_group
->
key
.
offset
);
caching_ctl
->
progress
=
(
u64
)
-
1
;
out:
btrfs_free_path
(
path
);
return
ret
;
}
static
noinline
void
caching_thread
(
struct
btrfs_work
*
work
)
{
struct
btrfs_block_group_cache
*
block_group
;
struct
btrfs_fs_info
*
fs_info
;
struct
btrfs_caching_control
*
caching_ctl
;
int
ret
;
caching_ctl
=
container_of
(
work
,
struct
btrfs_caching_control
,
work
);
block_group
=
caching_ctl
->
block_group
;
fs_info
=
block_group
->
fs_info
;
mutex_lock
(
&
caching_ctl
->
mutex
);
down_read
(
&
fs_info
->
commit_root_sem
);
if
(
btrfs_fs_compat_ro
(
fs_info
,
FREE_SPACE_TREE
))
ret
=
load_free_space_tree
(
caching_ctl
);
else
ret
=
load_extent_tree_free
(
caching_ctl
);
spin_lock
(
&
block_group
->
lock
);
block_group
->
caching_ctl
=
NULL
;
block_group
->
cached
=
BTRFS_CACHE_FINISHED
;
block_group
->
cached
=
ret
?
BTRFS_CACHE_ERROR
:
BTRFS_CACHE_FINISHED
;
spin_unlock
(
&
block_group
->
lock
);
#ifdef CONFIG_BTRFS_DEBUG
...
...
@@ -555,20 +578,11 @@ static noinline void caching_thread(struct btrfs_work *work)
#endif
caching_ctl
->
progress
=
(
u64
)
-
1
;
err:
btrfs_free_path
(
path
);
up_read
(
&
fs_info
->
commit_root_sem
);
free_excluded_extents
(
extent_root
,
block_group
);
up_read
(
&
fs_info
->
commit_root_sem
);
free_excluded_extents
(
fs_info
->
extent_root
,
block_group
);
mutex_unlock
(
&
caching_ctl
->
mutex
);
out:
if
(
ret
)
{
spin_lock
(
&
block_group
->
lock
);
block_group
->
caching_ctl
=
NULL
;
block_group
->
cached
=
BTRFS_CACHE_ERROR
;
spin_unlock
(
&
block_group
->
lock
);
}
wake_up
(
&
caching_ctl
->
wait
);
put_caching_control
(
caching_ctl
);
...
...
@@ -680,8 +694,8 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
}
}
else
{
/*
* We
are not going to do the fast caching, set cached to the
* appropriate value and wakeup any waiters.
* We
're either using the free space tree or no caching at all.
*
Set cached to the
appropriate value and wakeup any waiters.
*/
spin_lock
(
&
cache
->
lock
);
if
(
load_cache_only
)
{
...
...
@@ -6676,6 +6690,13 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
}
}
ret
=
add_to_free_space_tree
(
trans
,
root
->
fs_info
,
bytenr
,
num_bytes
);
if
(
ret
)
{
btrfs_abort_transaction
(
trans
,
extent_root
,
ret
);
goto
out
;
}
ret
=
update_block_group
(
trans
,
root
,
bytenr
,
num_bytes
,
0
);
if
(
ret
)
{
btrfs_abort_transaction
(
trans
,
extent_root
,
ret
);
...
...
@@ -7687,6 +7708,11 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty
(
path
->
nodes
[
0
]);
btrfs_free_path
(
path
);
ret
=
remove_from_free_space_tree
(
trans
,
fs_info
,
ins
->
objectid
,
ins
->
offset
);
if
(
ret
)
return
ret
;
ret
=
update_block_group
(
trans
,
root
,
ins
->
objectid
,
ins
->
offset
,
1
);
if
(
ret
)
{
/* -ENOENT, logic error */
btrfs_err
(
fs_info
,
"update block group failed for %llu %llu"
,
...
...
@@ -7767,6 +7793,11 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty
(
leaf
);
btrfs_free_path
(
path
);
ret
=
remove_from_free_space_tree
(
trans
,
fs_info
,
ins
->
objectid
,
num_bytes
);
if
(
ret
)
return
ret
;
ret
=
update_block_group
(
trans
,
root
,
ins
->
objectid
,
root
->
nodesize
,
1
);
if
(
ret
)
{
/* -ENOENT, logic error */
...
...
@@ -9671,6 +9702,8 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
cache
->
full_stripe_len
=
btrfs_full_stripe_len
(
root
,
&
root
->
fs_info
->
mapping_tree
,
start
);
set_free_space_tree_thresholds
(
cache
);
atomic_set
(
&
cache
->
count
,
1
);
spin_lock_init
(
&
cache
->
lock
);
init_rwsem
(
&
cache
->
data_rwsem
);
...
...
@@ -9682,6 +9715,7 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
INIT_LIST_HEAD
(
&
cache
->
io_list
);
btrfs_init_free_space_ctl
(
cache
);
atomic_set
(
&
cache
->
trimming
,
0
);
mutex_init
(
&
cache
->
free_space_lock
);
return
cache
;
}
...
...
@@ -9892,6 +9926,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
key
.
objectid
,
key
.
offset
);
if
(
ret
)
btrfs_abort_transaction
(
trans
,
extent_root
,
ret
);
add_block_group_free_space
(
trans
,
root
->
fs_info
,
block_group
);
/* already aborted the transaction if it failed. */
next:
list_del_init
(
&
block_group
->
bg_list
);
}
...
...
@@ -9922,6 +9958,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
cache
->
flags
=
type
;
cache
->
last_byte_to_unpin
=
(
u64
)
-
1
;
cache
->
cached
=
BTRFS_CACHE_FINISHED
;
cache
->
needs_free_space
=
1
;
ret
=
exclude_super_stripes
(
root
,
cache
);
if
(
ret
)
{
/*
...
...
@@ -10292,6 +10329,10 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
unlock_chunks
(
root
);
ret
=
remove_block_group_free_space
(
trans
,
root
->
fs_info
,
block_group
);
if
(
ret
)
goto
out
;
btrfs_put_block_group
(
block_group
);
btrfs_put_block_group
(
block_group
);
...
...
fs/btrfs/extent_io.c
View file @
f0f76413
...
...
@@ -4702,24 +4702,14 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
return
new
;
}
struct
extent_buffer
*
alloc_dummy_extent_buffer
(
struct
btrfs_fs_info
*
fs_info
,
u64
start
)
struct
extent_buffer
*
__
alloc_dummy_extent_buffer
(
struct
btrfs_fs_info
*
fs_info
,
u64
start
,
unsigned
long
len
)
{
struct
extent_buffer
*
eb
;
unsigned
long
len
;
unsigned
long
num_pages
;
unsigned
long
i
;
if
(
!
fs_info
)
{
/*
* Called only from tests that don't always have a fs_info
* available, but we know that nodesize is 4096
*/
len
=
4096
;
}
else
{
len
=
fs_info
->
tree_root
->
nodesize
;
}
num_pages
=
num_extent_pages
(
0
,
len
);
num_pages
=
num_extent_pages
(
start
,
len
);
eb
=
__alloc_extent_buffer
(
fs_info
,
start
,
len
);
if
(
!
eb
)
...
...
@@ -4742,6 +4732,24 @@ struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
return
NULL
;
}
struct
extent_buffer
*
alloc_dummy_extent_buffer
(
struct
btrfs_fs_info
*
fs_info
,
u64
start
)
{
unsigned
long
len
;
if
(
!
fs_info
)
{
/*
* Called only from tests that don't always have a fs_info
* available, but we know that nodesize is 4096
*/
len
=
4096
;
}
else
{
len
=
fs_info
->
tree_root
->
nodesize
;
}
return
__alloc_dummy_extent_buffer
(
fs_info
,
start
,
len
);
}
static
void
check_buffer_tree_ref
(
struct
extent_buffer
*
eb
)
{
int
refs
;
...
...
@@ -5497,6 +5505,155 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
}
}
/*
* The extent buffer bitmap operations are done with byte granularity because
* bitmap items are not guaranteed to be aligned to a word and therefore a
* single word in a bitmap may straddle two pages in the extent buffer.
*/
#define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
#define BYTE_MASK ((1 << BITS_PER_BYTE) - 1)
#define BITMAP_FIRST_BYTE_MASK(start) \
((BYTE_MASK << ((start) & (BITS_PER_BYTE - 1))) & BYTE_MASK)
#define BITMAP_LAST_BYTE_MASK(nbits) \
(BYTE_MASK >> (-(nbits) & (BITS_PER_BYTE - 1)))
/*
* eb_bitmap_offset() - calculate the page and offset of the byte containing the
* given bit number
* @eb: the extent buffer
* @start: offset of the bitmap item in the extent buffer
* @nr: bit number
* @page_index: return index of the page in the extent buffer that contains the
* given bit number
* @page_offset: return offset into the page given by page_index
*
* This helper hides the ugliness of finding the byte in an extent buffer which
* contains a given bit.
*/
static
inline
void
eb_bitmap_offset
(
struct
extent_buffer
*
eb
,
unsigned
long
start
,
unsigned
long
nr
,
unsigned
long
*
page_index
,
size_t
*
page_offset
)
{
size_t
start_offset
=
eb
->
start
&
((
u64
)
PAGE_CACHE_SIZE
-
1
);
size_t
byte_offset
=
BIT_BYTE
(
nr
);
size_t
offset
;
/*
* The byte we want is the offset of the extent buffer + the offset of
* the bitmap item in the extent buffer + the offset of the byte in the
* bitmap item.
*/
offset
=
start_offset
+
start
+
byte_offset
;
*
page_index
=
offset
>>
PAGE_CACHE_SHIFT
;
*
page_offset
=
offset
&
(
PAGE_CACHE_SIZE
-
1
);
}
/**
* extent_buffer_test_bit - determine whether a bit in a bitmap item is set
* @eb: the extent buffer
* @start: offset of the bitmap item in the extent buffer
* @nr: bit number to test
*/
int
extent_buffer_test_bit
(
struct
extent_buffer
*
eb
,
unsigned
long
start
,
unsigned
long
nr
)
{
char
*
kaddr
;
struct
page
*
page
;
unsigned
long
i
;
size_t
offset
;
eb_bitmap_offset
(
eb
,
start
,
nr
,
&
i
,
&
offset
);
page
=
eb
->
pages
[
i
];
WARN_ON
(
!
PageUptodate
(
page
));
kaddr
=
page_address
(
page
);
return
1U
&
(
kaddr
[
offset
]
>>
(
nr
&
(
BITS_PER_BYTE
-
1
)));
}
/**
* extent_buffer_bitmap_set - set an area of a bitmap
* @eb: the extent buffer
* @start: offset of the bitmap item in the extent buffer
* @pos: bit number of the first bit
* @len: number of bits to set
*/
void
extent_buffer_bitmap_set
(
struct
extent_buffer
*
eb
,
unsigned
long
start
,
unsigned
long
pos
,
unsigned
long
len
)
{
char
*
kaddr
;
struct
page
*
page
;
unsigned
long
i
;
size_t
offset
;
const
unsigned
int
size
=
pos
+
len
;
int
bits_to_set
=
BITS_PER_BYTE
-
(
pos
%
BITS_PER_BYTE
);
unsigned
int
mask_to_set
=
BITMAP_FIRST_BYTE_MASK
(
pos
);
eb_bitmap_offset
(
eb
,
start
,
pos
,
&
i
,
&
offset
);
page
=
eb
->
pages
[
i
];
WARN_ON
(
!
PageUptodate
(
page
));
kaddr
=
page_address
(
page
);
while
(
len
>=
bits_to_set
)
{
kaddr
[
offset
]
|=
mask_to_set
;
len
-=
bits_to_set
;
bits_to_set
=
BITS_PER_BYTE
;
mask_to_set
=
~
0U
;
if
(
++
offset
>=
PAGE_CACHE_SIZE
&&
len
>
0
)
{
offset
=
0
;
page
=
eb
->
pages
[
++
i
];
WARN_ON
(
!
PageUptodate
(
page
));
kaddr
=
page_address
(
page
);
}
}
if
(
len
)
{
mask_to_set
&=
BITMAP_LAST_BYTE_MASK
(
size
);
kaddr
[
offset
]
|=
mask_to_set
;
}
}
/**
* extent_buffer_bitmap_clear - clear an area of a bitmap
* @eb: the extent buffer
* @start: offset of the bitmap item in the extent buffer
* @pos: bit number of the first bit
* @len: number of bits to clear
*/
void
extent_buffer_bitmap_clear
(
struct
extent_buffer
*
eb
,
unsigned
long
start
,
unsigned
long
pos
,
unsigned
long
len
)
{
char
*
kaddr
;
struct
page
*
page
;
unsigned
long
i
;
size_t
offset
;
const
unsigned
int
size
=
pos
+
len
;
int
bits_to_clear
=
BITS_PER_BYTE
-
(
pos
%
BITS_PER_BYTE
);
unsigned
int
mask_to_clear
=
BITMAP_FIRST_BYTE_MASK
(
pos
);
eb_bitmap_offset
(
eb
,
start
,
pos
,
&
i
,
&
offset
);
page
=
eb
->
pages
[
i
];
WARN_ON
(
!
PageUptodate
(
page
));
kaddr
=
page_address
(
page
);
while
(
len
>=
bits_to_clear
)
{
kaddr
[
offset
]
&=
~
mask_to_clear
;
len
-=
bits_to_clear
;
bits_to_clear
=
BITS_PER_BYTE
;
mask_to_clear
=
~
0U
;
if
(
++
offset
>=
PAGE_CACHE_SIZE
&&
len
>
0
)
{
offset
=
0
;
page
=
eb
->
pages
[
++
i
];
WARN_ON
(
!
PageUptodate
(
page
));
kaddr
=
page_address
(
page
);
}
}
if
(
len
)
{
mask_to_clear
&=
BITMAP_LAST_BYTE_MASK
(
size
);
kaddr
[
offset
]
&=
~
mask_to_clear
;
}
}
static
inline
bool
areas_overlap
(
unsigned
long
src
,
unsigned
long
dst
,
unsigned
long
len
)
{
unsigned
long
distance
=
(
src
>
dst
)
?
src
-
dst
:
dst
-
src
;
...
...
fs/btrfs/extent_io.h
View file @
f0f76413
...
...
@@ -350,6 +350,8 @@ void set_page_extent_mapped(struct page *page);
struct
extent_buffer
*
alloc_extent_buffer
(
struct
btrfs_fs_info
*
fs_info
,
u64
start
);
struct
extent_buffer
*
__alloc_dummy_extent_buffer
(
struct
btrfs_fs_info
*
fs_info
,
u64
start
,
unsigned
long
len
);
struct
extent_buffer
*
alloc_dummy_extent_buffer
(
struct
btrfs_fs_info
*
fs_info
,
u64
start
);
struct
extent_buffer
*
btrfs_clone_extent_buffer
(
struct
extent_buffer
*
src
);
...
...
@@ -396,6 +398,12 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned
long
src_offset
,
unsigned
long
len
);
void
memset_extent_buffer
(
struct
extent_buffer
*
eb
,
char
c
,
unsigned
long
start
,
unsigned
long
len
);
int
extent_buffer_test_bit
(
struct
extent_buffer
*
eb
,
unsigned
long
start
,
unsigned
long
pos
);
void
extent_buffer_bitmap_set
(
struct
extent_buffer
*
eb
,
unsigned
long
start
,
unsigned
long
pos
,
unsigned
long
len
);
void
extent_buffer_bitmap_clear
(
struct
extent_buffer
*
eb
,
unsigned
long
start
,
unsigned
long
pos
,
unsigned
long
len
);
void
clear_extent_buffer_dirty
(
struct
extent_buffer
*
eb
);
int
set_extent_buffer_dirty
(
struct
extent_buffer
*
eb
);
void
set_extent_buffer_uptodate
(
struct
extent_buffer
*
eb
);
...
...
fs/btrfs/free-space-tree.c
0 → 100644
View file @
f0f76413
/*
* Copyright (C) 2015 Facebook. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include "ctree.h"
#include "disk-io.h"
#include "locking.h"
#include "free-space-tree.h"
#include "transaction.h"
static
int
__add_block_group_free_space
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_path
*
path
);
void
set_free_space_tree_thresholds
(
struct
btrfs_block_group_cache
*
cache
)
{
u32
bitmap_range
;
size_t
bitmap_size
;
u64
num_bitmaps
,
total_bitmap_size
;
/*
* We convert to bitmaps when the disk space required for using extents
* exceeds that required for using bitmaps.
*/
bitmap_range
=
cache
->
sectorsize
*
BTRFS_FREE_SPACE_BITMAP_BITS
;
num_bitmaps
=
div_u64
(
cache
->
key
.
offset
+
bitmap_range
-
1
,
bitmap_range
);
bitmap_size
=
sizeof
(
struct
btrfs_item
)
+
BTRFS_FREE_SPACE_BITMAP_SIZE
;
total_bitmap_size
=
num_bitmaps
*
bitmap_size
;
cache
->
bitmap_high_thresh
=
div_u64
(
total_bitmap_size
,
sizeof
(
struct
btrfs_item
));
/*
* We allow for a small buffer between the high threshold and low
* threshold to avoid thrashing back and forth between the two formats.
*/
if
(
cache
->
bitmap_high_thresh
>
100
)
cache
->
bitmap_low_thresh
=
cache
->
bitmap_high_thresh
-
100
;
else
cache
->
bitmap_low_thresh
=
0
;
}
static
int
add_new_free_space_info
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_path
*
path
)
{
struct
btrfs_root
*
root
=
fs_info
->
free_space_root
;
struct
btrfs_free_space_info
*
info
;
struct
btrfs_key
key
;
struct
extent_buffer
*
leaf
;
int
ret
;
key
.
objectid
=
block_group
->
key
.
objectid
;
key
.
type
=
BTRFS_FREE_SPACE_INFO_KEY
;
key
.
offset
=
block_group
->
key
.
offset
;
ret
=
btrfs_insert_empty_item
(
trans
,
root
,
path
,
&
key
,
sizeof
(
*
info
));
if
(
ret
)
goto
out
;
leaf
=
path
->
nodes
[
0
];
info
=
btrfs_item_ptr
(
leaf
,
path
->
slots
[
0
],
struct
btrfs_free_space_info
);
btrfs_set_free_space_extent_count
(
leaf
,
info
,
0
);
btrfs_set_free_space_flags
(
leaf
,
info
,
0
);
btrfs_mark_buffer_dirty
(
leaf
);
ret
=
0
;
out:
btrfs_release_path
(
path
);
return
ret
;
}
struct
btrfs_free_space_info
*
search_free_space_info
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_path
*
path
,
int
cow
)
{
struct
btrfs_root
*
root
=
fs_info
->
free_space_root
;
struct
btrfs_key
key
;
int
ret
;
key
.
objectid
=
block_group
->
key
.
objectid
;
key
.
type
=
BTRFS_FREE_SPACE_INFO_KEY
;
key
.
offset
=
block_group
->
key
.
offset
;
ret
=
btrfs_search_slot
(
trans
,
root
,
&
key
,
path
,
0
,
cow
);
if
(
ret
<
0
)
return
ERR_PTR
(
ret
);
if
(
ret
!=
0
)
{
btrfs_warn
(
fs_info
,
"missing free space info for %llu
\n
"
,
block_group
->
key
.
objectid
);
ASSERT
(
0
);
return
ERR_PTR
(
-
ENOENT
);
}
return
btrfs_item_ptr
(
path
->
nodes
[
0
],
path
->
slots
[
0
],
struct
btrfs_free_space_info
);
}
/*
* btrfs_search_slot() but we're looking for the greatest key less than the
* passed key.
*/
static
int
btrfs_search_prev_slot
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_root
*
root
,
struct
btrfs_key
*
key
,
struct
btrfs_path
*
p
,
int
ins_len
,
int
cow
)
{
int
ret
;
ret
=
btrfs_search_slot
(
trans
,
root
,
key
,
p
,
ins_len
,
cow
);
if
(
ret
<
0
)
return
ret
;
if
(
ret
==
0
)
{
ASSERT
(
0
);
return
-
EIO
;
}
if
(
p
->
slots
[
0
]
==
0
)
{
ASSERT
(
0
);
return
-
EIO
;
}
p
->
slots
[
0
]
--
;
return
0
;
}
static
inline
u32
free_space_bitmap_size
(
u64
size
,
u32
sectorsize
)
{
return
DIV_ROUND_UP
((
u32
)
div_u64
(
size
,
sectorsize
),
BITS_PER_BYTE
);
}
static
unsigned
long
*
alloc_bitmap
(
u32
bitmap_size
)
{
return
__vmalloc
(
bitmap_size
,
GFP_NOFS
|
__GFP_HIGHMEM
|
__GFP_ZERO
,
PAGE_KERNEL
);
}
int
convert_free_space_to_bitmaps
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_path
*
path
)
{
struct
btrfs_root
*
root
=
fs_info
->
free_space_root
;
struct
btrfs_free_space_info
*
info
;
struct
btrfs_key
key
,
found_key
;
struct
extent_buffer
*
leaf
;
unsigned
long
*
bitmap
;
char
*
bitmap_cursor
;
u64
start
,
end
;
u64
bitmap_range
,
i
;
u32
bitmap_size
,
flags
,
expected_extent_count
;
u32
extent_count
=
0
;
int
done
=
0
,
nr
;
int
ret
;
bitmap_size
=
free_space_bitmap_size
(
block_group
->
key
.
offset
,
block_group
->
sectorsize
);
bitmap
=
alloc_bitmap
(
bitmap_size
);
if
(
!
bitmap
)
{
ret
=
-
ENOMEM
;
goto
out
;
}
start
=
block_group
->
key
.
objectid
;
end
=
block_group
->
key
.
objectid
+
block_group
->
key
.
offset
;
key
.
objectid
=
end
-
1
;
key
.
type
=
(
u8
)
-
1
;
key
.
offset
=
(
u64
)
-
1
;
while
(
!
done
)
{
ret
=
btrfs_search_prev_slot
(
trans
,
root
,
&
key
,
path
,
-
1
,
1
);
if
(
ret
)
goto
out
;
leaf
=
path
->
nodes
[
0
];
nr
=
0
;
path
->
slots
[
0
]
++
;
while
(
path
->
slots
[
0
]
>
0
)
{
btrfs_item_key_to_cpu
(
leaf
,
&
found_key
,
path
->
slots
[
0
]
-
1
);
if
(
found_key
.
type
==
BTRFS_FREE_SPACE_INFO_KEY
)
{
ASSERT
(
found_key
.
objectid
==
block_group
->
key
.
objectid
);
ASSERT
(
found_key
.
offset
==
block_group
->
key
.
offset
);
done
=
1
;
break
;
}
else
if
(
found_key
.
type
==
BTRFS_FREE_SPACE_EXTENT_KEY
)
{
u64
first
,
last
;
ASSERT
(
found_key
.
objectid
>=
start
);
ASSERT
(
found_key
.
objectid
<
end
);
ASSERT
(
found_key
.
objectid
+
found_key
.
offset
<=
end
);
first
=
div_u64
(
found_key
.
objectid
-
start
,
block_group
->
sectorsize
);
last
=
div_u64
(
found_key
.
objectid
+
found_key
.
offset
-
start
,
block_group
->
sectorsize
);
bitmap_set
(
bitmap
,
first
,
last
-
first
);
extent_count
++
;
nr
++
;
path
->
slots
[
0
]
--
;
}
else
{
ASSERT
(
0
);
}
}
ret
=
btrfs_del_items
(
trans
,
root
,
path
,
path
->
slots
[
0
],
nr
);
if
(
ret
)
goto
out
;
btrfs_release_path
(
path
);
}
info
=
search_free_space_info
(
trans
,
fs_info
,
block_group
,
path
,
1
);
if
(
IS_ERR
(
info
))
{
ret
=
PTR_ERR
(
info
);
goto
out
;
}
leaf
=
path
->
nodes
[
0
];
flags
=
btrfs_free_space_flags
(
leaf
,
info
);
flags
|=
BTRFS_FREE_SPACE_USING_BITMAPS
;
btrfs_set_free_space_flags
(
leaf
,
info
,
flags
);
expected_extent_count
=
btrfs_free_space_extent_count
(
leaf
,
info
);
btrfs_mark_buffer_dirty
(
leaf
);
btrfs_release_path
(
path
);
if
(
extent_count
!=
expected_extent_count
)
{
btrfs_err
(
fs_info
,
"incorrect extent count for %llu; counted %u, expected %u"
,
block_group
->
key
.
objectid
,
extent_count
,
expected_extent_count
);
ASSERT
(
0
);
ret
=
-
EIO
;
goto
out
;
}
bitmap_cursor
=
(
char
*
)
bitmap
;
bitmap_range
=
block_group
->
sectorsize
*
BTRFS_FREE_SPACE_BITMAP_BITS
;
i
=
start
;
while
(
i
<
end
)
{
unsigned
long
ptr
;
u64
extent_size
;
u32
data_size
;
extent_size
=
min
(
end
-
i
,
bitmap_range
);
data_size
=
free_space_bitmap_size
(
extent_size
,
block_group
->
sectorsize
);
key
.
objectid
=
i
;
key
.
type
=
BTRFS_FREE_SPACE_BITMAP_KEY
;
key
.
offset
=
extent_size
;
ret
=
btrfs_insert_empty_item
(
trans
,
root
,
path
,
&
key
,
data_size
);
if
(
ret
)
goto
out
;
leaf
=
path
->
nodes
[
0
];
ptr
=
btrfs_item_ptr_offset
(
leaf
,
path
->
slots
[
0
]);
write_extent_buffer
(
leaf
,
bitmap_cursor
,
ptr
,
data_size
);
btrfs_mark_buffer_dirty
(
leaf
);
btrfs_release_path
(
path
);
i
+=
extent_size
;
bitmap_cursor
+=
data_size
;
}
ret
=
0
;
out:
vfree
(
bitmap
);
if
(
ret
)
btrfs_abort_transaction
(
trans
,
root
,
ret
);
return
ret
;
}
int
convert_free_space_to_extents
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_path
*
path
)
{
struct
btrfs_root
*
root
=
fs_info
->
free_space_root
;
struct
btrfs_free_space_info
*
info
;
struct
btrfs_key
key
,
found_key
;
struct
extent_buffer
*
leaf
;
unsigned
long
*
bitmap
;
u64
start
,
end
;
/* Initialize to silence GCC. */
u64
extent_start
=
0
;
u64
offset
;
u32
bitmap_size
,
flags
,
expected_extent_count
;
int
prev_bit
=
0
,
bit
,
bitnr
;
u32
extent_count
=
0
;
int
done
=
0
,
nr
;
int
ret
;
bitmap_size
=
free_space_bitmap_size
(
block_group
->
key
.
offset
,
block_group
->
sectorsize
);
bitmap
=
alloc_bitmap
(
bitmap_size
);
if
(
!
bitmap
)
{
ret
=
-
ENOMEM
;
goto
out
;
}
start
=
block_group
->
key
.
objectid
;
end
=
block_group
->
key
.
objectid
+
block_group
->
key
.
offset
;
key
.
objectid
=
end
-
1
;
key
.
type
=
(
u8
)
-
1
;
key
.
offset
=
(
u64
)
-
1
;
while
(
!
done
)
{
ret
=
btrfs_search_prev_slot
(
trans
,
root
,
&
key
,
path
,
-
1
,
1
);
if
(
ret
)
goto
out
;
leaf
=
path
->
nodes
[
0
];
nr
=
0
;
path
->
slots
[
0
]
++
;
while
(
path
->
slots
[
0
]
>
0
)
{
btrfs_item_key_to_cpu
(
leaf
,
&
found_key
,
path
->
slots
[
0
]
-
1
);
if
(
found_key
.
type
==
BTRFS_FREE_SPACE_INFO_KEY
)
{
ASSERT
(
found_key
.
objectid
==
block_group
->
key
.
objectid
);
ASSERT
(
found_key
.
offset
==
block_group
->
key
.
offset
);
done
=
1
;
break
;
}
else
if
(
found_key
.
type
==
BTRFS_FREE_SPACE_BITMAP_KEY
)
{
unsigned
long
ptr
;
char
*
bitmap_cursor
;
u32
bitmap_pos
,
data_size
;
ASSERT
(
found_key
.
objectid
>=
start
);
ASSERT
(
found_key
.
objectid
<
end
);
ASSERT
(
found_key
.
objectid
+
found_key
.
offset
<=
end
);
bitmap_pos
=
div_u64
(
found_key
.
objectid
-
start
,
block_group
->
sectorsize
*
BITS_PER_BYTE
);
bitmap_cursor
=
((
char
*
)
bitmap
)
+
bitmap_pos
;
data_size
=
free_space_bitmap_size
(
found_key
.
offset
,
block_group
->
sectorsize
);
ptr
=
btrfs_item_ptr_offset
(
leaf
,
path
->
slots
[
0
]
-
1
);
read_extent_buffer
(
leaf
,
bitmap_cursor
,
ptr
,
data_size
);
nr
++
;
path
->
slots
[
0
]
--
;
}
else
{
ASSERT
(
0
);
}
}
ret
=
btrfs_del_items
(
trans
,
root
,
path
,
path
->
slots
[
0
],
nr
);
if
(
ret
)
goto
out
;
btrfs_release_path
(
path
);
}
info
=
search_free_space_info
(
trans
,
fs_info
,
block_group
,
path
,
1
);
if
(
IS_ERR
(
info
))
{
ret
=
PTR_ERR
(
info
);
goto
out
;
}
leaf
=
path
->
nodes
[
0
];
flags
=
btrfs_free_space_flags
(
leaf
,
info
);
flags
&=
~
BTRFS_FREE_SPACE_USING_BITMAPS
;
btrfs_set_free_space_flags
(
leaf
,
info
,
flags
);
expected_extent_count
=
btrfs_free_space_extent_count
(
leaf
,
info
);
btrfs_mark_buffer_dirty
(
leaf
);
btrfs_release_path
(
path
);
offset
=
start
;
bitnr
=
0
;
while
(
offset
<
end
)
{
bit
=
!!
test_bit
(
bitnr
,
bitmap
);
if
(
prev_bit
==
0
&&
bit
==
1
)
{
extent_start
=
offset
;
}
else
if
(
prev_bit
==
1
&&
bit
==
0
)
{
key
.
objectid
=
extent_start
;
key
.
type
=
BTRFS_FREE_SPACE_EXTENT_KEY
;
key
.
offset
=
offset
-
extent_start
;
ret
=
btrfs_insert_empty_item
(
trans
,
root
,
path
,
&
key
,
0
);
if
(
ret
)
goto
out
;
btrfs_release_path
(
path
);
extent_count
++
;
}
prev_bit
=
bit
;
offset
+=
block_group
->
sectorsize
;
bitnr
++
;
}
if
(
prev_bit
==
1
)
{
key
.
objectid
=
extent_start
;
key
.
type
=
BTRFS_FREE_SPACE_EXTENT_KEY
;
key
.
offset
=
end
-
extent_start
;
ret
=
btrfs_insert_empty_item
(
trans
,
root
,
path
,
&
key
,
0
);
if
(
ret
)
goto
out
;
btrfs_release_path
(
path
);
extent_count
++
;
}
if
(
extent_count
!=
expected_extent_count
)
{
btrfs_err
(
fs_info
,
"incorrect extent count for %llu; counted %u, expected %u"
,
block_group
->
key
.
objectid
,
extent_count
,
expected_extent_count
);
ASSERT
(
0
);
ret
=
-
EIO
;
goto
out
;
}
ret
=
0
;
out:
vfree
(
bitmap
);
if
(
ret
)
btrfs_abort_transaction
(
trans
,
root
,
ret
);
return
ret
;
}
static
int
update_free_space_extent_count
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_path
*
path
,
int
new_extents
)
{
struct
btrfs_free_space_info
*
info
;
u32
flags
;
u32
extent_count
;
int
ret
=
0
;
if
(
new_extents
==
0
)
return
0
;
info
=
search_free_space_info
(
trans
,
fs_info
,
block_group
,
path
,
1
);
if
(
IS_ERR
(
info
))
{
ret
=
PTR_ERR
(
info
);
goto
out
;
}
flags
=
btrfs_free_space_flags
(
path
->
nodes
[
0
],
info
);
extent_count
=
btrfs_free_space_extent_count
(
path
->
nodes
[
0
],
info
);
extent_count
+=
new_extents
;
btrfs_set_free_space_extent_count
(
path
->
nodes
[
0
],
info
,
extent_count
);
btrfs_mark_buffer_dirty
(
path
->
nodes
[
0
]);
btrfs_release_path
(
path
);
if
(
!
(
flags
&
BTRFS_FREE_SPACE_USING_BITMAPS
)
&&
extent_count
>
block_group
->
bitmap_high_thresh
)
{
ret
=
convert_free_space_to_bitmaps
(
trans
,
fs_info
,
block_group
,
path
);
}
else
if
((
flags
&
BTRFS_FREE_SPACE_USING_BITMAPS
)
&&
extent_count
<
block_group
->
bitmap_low_thresh
)
{
ret
=
convert_free_space_to_extents
(
trans
,
fs_info
,
block_group
,
path
);
}
out:
return
ret
;
}
int
free_space_test_bit
(
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_path
*
path
,
u64
offset
)
{
struct
extent_buffer
*
leaf
;
struct
btrfs_key
key
;
u64
found_start
,
found_end
;
unsigned
long
ptr
,
i
;
leaf
=
path
->
nodes
[
0
];
btrfs_item_key_to_cpu
(
leaf
,
&
key
,
path
->
slots
[
0
]);
ASSERT
(
key
.
type
==
BTRFS_FREE_SPACE_BITMAP_KEY
);
found_start
=
key
.
objectid
;
found_end
=
key
.
objectid
+
key
.
offset
;
ASSERT
(
offset
>=
found_start
&&
offset
<
found_end
);
ptr
=
btrfs_item_ptr_offset
(
leaf
,
path
->
slots
[
0
]);
i
=
div_u64
(
offset
-
found_start
,
block_group
->
sectorsize
);
return
!!
extent_buffer_test_bit
(
leaf
,
ptr
,
i
);
}
static
void
free_space_set_bits
(
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_path
*
path
,
u64
*
start
,
u64
*
size
,
int
bit
)
{
struct
extent_buffer
*
leaf
;
struct
btrfs_key
key
;
u64
end
=
*
start
+
*
size
;
u64
found_start
,
found_end
;
unsigned
long
ptr
,
first
,
last
;
leaf
=
path
->
nodes
[
0
];
btrfs_item_key_to_cpu
(
leaf
,
&
key
,
path
->
slots
[
0
]);
ASSERT
(
key
.
type
==
BTRFS_FREE_SPACE_BITMAP_KEY
);
found_start
=
key
.
objectid
;
found_end
=
key
.
objectid
+
key
.
offset
;
ASSERT
(
*
start
>=
found_start
&&
*
start
<
found_end
);
ASSERT
(
end
>
found_start
);
if
(
end
>
found_end
)
end
=
found_end
;
ptr
=
btrfs_item_ptr_offset
(
leaf
,
path
->
slots
[
0
]);
first
=
div_u64
(
*
start
-
found_start
,
block_group
->
sectorsize
);
last
=
div_u64
(
end
-
found_start
,
block_group
->
sectorsize
);
if
(
bit
)
extent_buffer_bitmap_set
(
leaf
,
ptr
,
first
,
last
-
first
);
else
extent_buffer_bitmap_clear
(
leaf
,
ptr
,
first
,
last
-
first
);
btrfs_mark_buffer_dirty
(
leaf
);
*
size
-=
end
-
*
start
;
*
start
=
end
;
}
/*
* We can't use btrfs_next_item() in modify_free_space_bitmap() because
* btrfs_next_leaf() doesn't get the path for writing. We can forgo the fancy
* tree walking in btrfs_next_leaf() anyways because we know exactly what we're
* looking for.
*/
static
int
free_space_next_bitmap
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_root
*
root
,
struct
btrfs_path
*
p
)
{
struct
btrfs_key
key
;
if
(
p
->
slots
[
0
]
+
1
<
btrfs_header_nritems
(
p
->
nodes
[
0
]))
{
p
->
slots
[
0
]
++
;
return
0
;
}
btrfs_item_key_to_cpu
(
p
->
nodes
[
0
],
&
key
,
p
->
slots
[
0
]);
btrfs_release_path
(
p
);
key
.
objectid
+=
key
.
offset
;
key
.
type
=
(
u8
)
-
1
;
key
.
offset
=
(
u64
)
-
1
;
return
btrfs_search_prev_slot
(
trans
,
root
,
&
key
,
p
,
0
,
1
);
}
/*
* If remove is 1, then we are removing free space, thus clearing bits in the
* bitmap. If remove is 0, then we are adding free space, thus setting bits in
* the bitmap.
*/
static
int
modify_free_space_bitmap
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_path
*
path
,
u64
start
,
u64
size
,
int
remove
)
{
struct
btrfs_root
*
root
=
fs_info
->
free_space_root
;
struct
btrfs_key
key
;
u64
end
=
start
+
size
;
u64
cur_start
,
cur_size
;
int
prev_bit
,
next_bit
;
int
new_extents
;
int
ret
;
/*
* Read the bit for the block immediately before the extent of space if
* that block is within the block group.
*/
if
(
start
>
block_group
->
key
.
objectid
)
{
u64
prev_block
=
start
-
block_group
->
sectorsize
;
key
.
objectid
=
prev_block
;
key
.
type
=
(
u8
)
-
1
;
key
.
offset
=
(
u64
)
-
1
;
ret
=
btrfs_search_prev_slot
(
trans
,
root
,
&
key
,
path
,
0
,
1
);
if
(
ret
)
goto
out
;
prev_bit
=
free_space_test_bit
(
block_group
,
path
,
prev_block
);
/* The previous block may have been in the previous bitmap. */
btrfs_item_key_to_cpu
(
path
->
nodes
[
0
],
&
key
,
path
->
slots
[
0
]);
if
(
start
>=
key
.
objectid
+
key
.
offset
)
{
ret
=
free_space_next_bitmap
(
trans
,
root
,
path
);
if
(
ret
)
goto
out
;
}
}
else
{
key
.
objectid
=
start
;
key
.
type
=
(
u8
)
-
1
;
key
.
offset
=
(
u64
)
-
1
;
ret
=
btrfs_search_prev_slot
(
trans
,
root
,
&
key
,
path
,
0
,
1
);
if
(
ret
)
goto
out
;
prev_bit
=
-
1
;
}
/*
* Iterate over all of the bitmaps overlapped by the extent of space,
* clearing/setting bits as required.
*/
cur_start
=
start
;
cur_size
=
size
;
while
(
1
)
{
free_space_set_bits
(
block_group
,
path
,
&
cur_start
,
&
cur_size
,
!
remove
);
if
(
cur_size
==
0
)
break
;
ret
=
free_space_next_bitmap
(
trans
,
root
,
path
);
if
(
ret
)
goto
out
;
}
/*
* Read the bit for the block immediately after the extent of space if
* that block is within the block group.
*/
if
(
end
<
block_group
->
key
.
objectid
+
block_group
->
key
.
offset
)
{
/* The next block may be in the next bitmap. */
btrfs_item_key_to_cpu
(
path
->
nodes
[
0
],
&
key
,
path
->
slots
[
0
]);
if
(
end
>=
key
.
objectid
+
key
.
offset
)
{
ret
=
free_space_next_bitmap
(
trans
,
root
,
path
);
if
(
ret
)
goto
out
;
}
next_bit
=
free_space_test_bit
(
block_group
,
path
,
end
);
}
else
{
next_bit
=
-
1
;
}
if
(
remove
)
{
new_extents
=
-
1
;
if
(
prev_bit
==
1
)
{
/* Leftover on the left. */
new_extents
++
;
}
if
(
next_bit
==
1
)
{
/* Leftover on the right. */
new_extents
++
;
}
}
else
{
new_extents
=
1
;
if
(
prev_bit
==
1
)
{
/* Merging with neighbor on the left. */
new_extents
--
;
}
if
(
next_bit
==
1
)
{
/* Merging with neighbor on the right. */
new_extents
--
;
}
}
btrfs_release_path
(
path
);
ret
=
update_free_space_extent_count
(
trans
,
fs_info
,
block_group
,
path
,
new_extents
);
out:
return
ret
;
}
static
int
remove_free_space_extent
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_path
*
path
,
u64
start
,
u64
size
)
{
struct
btrfs_root
*
root
=
fs_info
->
free_space_root
;
struct
btrfs_key
key
;
u64
found_start
,
found_end
;
u64
end
=
start
+
size
;
int
new_extents
=
-
1
;
int
ret
;
key
.
objectid
=
start
;
key
.
type
=
(
u8
)
-
1
;
key
.
offset
=
(
u64
)
-
1
;
ret
=
btrfs_search_prev_slot
(
trans
,
root
,
&
key
,
path
,
-
1
,
1
);
if
(
ret
)
goto
out
;
btrfs_item_key_to_cpu
(
path
->
nodes
[
0
],
&
key
,
path
->
slots
[
0
]);
ASSERT
(
key
.
type
==
BTRFS_FREE_SPACE_EXTENT_KEY
);
found_start
=
key
.
objectid
;
found_end
=
key
.
objectid
+
key
.
offset
;
ASSERT
(
start
>=
found_start
&&
end
<=
found_end
);
/*
* Okay, now that we've found the free space extent which contains the
* free space that we are removing, there are four cases:
*
* 1. We're using the whole extent: delete the key we found and
* decrement the free space extent count.
* 2. We are using part of the extent starting at the beginning: delete
* the key we found and insert a new key representing the leftover at
* the end. There is no net change in the number of extents.
* 3. We are using part of the extent ending at the end: delete the key
* we found and insert a new key representing the leftover at the
* beginning. There is no net change in the number of extents.
* 4. We are using part of the extent in the middle: delete the key we
* found and insert two new keys representing the leftovers on each
* side. Where we used to have one extent, we now have two, so increment
* the extent count. We may need to convert the block group to bitmaps
* as a result.
*/
/* Delete the existing key (cases 1-4). */
ret
=
btrfs_del_item
(
trans
,
root
,
path
);
if
(
ret
)
goto
out
;
/* Add a key for leftovers at the beginning (cases 3 and 4). */
if
(
start
>
found_start
)
{
key
.
objectid
=
found_start
;
key
.
type
=
BTRFS_FREE_SPACE_EXTENT_KEY
;
key
.
offset
=
start
-
found_start
;
btrfs_release_path
(
path
);
ret
=
btrfs_insert_empty_item
(
trans
,
root
,
path
,
&
key
,
0
);
if
(
ret
)
goto
out
;
new_extents
++
;
}
/* Add a key for leftovers at the end (cases 2 and 4). */
if
(
end
<
found_end
)
{
key
.
objectid
=
end
;
key
.
type
=
BTRFS_FREE_SPACE_EXTENT_KEY
;
key
.
offset
=
found_end
-
end
;
btrfs_release_path
(
path
);
ret
=
btrfs_insert_empty_item
(
trans
,
root
,
path
,
&
key
,
0
);
if
(
ret
)
goto
out
;
new_extents
++
;
}
btrfs_release_path
(
path
);
ret
=
update_free_space_extent_count
(
trans
,
fs_info
,
block_group
,
path
,
new_extents
);
out:
return
ret
;
}
int
__remove_from_free_space_tree
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_path
*
path
,
u64
start
,
u64
size
)
{
struct
btrfs_free_space_info
*
info
;
u32
flags
;
int
ret
;
if
(
block_group
->
needs_free_space
)
{
ret
=
__add_block_group_free_space
(
trans
,
fs_info
,
block_group
,
path
);
if
(
ret
)
return
ret
;
}
info
=
search_free_space_info
(
NULL
,
fs_info
,
block_group
,
path
,
0
);
if
(
IS_ERR
(
info
))
return
PTR_ERR
(
info
);
flags
=
btrfs_free_space_flags
(
path
->
nodes
[
0
],
info
);
btrfs_release_path
(
path
);
if
(
flags
&
BTRFS_FREE_SPACE_USING_BITMAPS
)
{
return
modify_free_space_bitmap
(
trans
,
fs_info
,
block_group
,
path
,
start
,
size
,
1
);
}
else
{
return
remove_free_space_extent
(
trans
,
fs_info
,
block_group
,
path
,
start
,
size
);
}
}
int
remove_from_free_space_tree
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
u64
start
,
u64
size
)
{
struct
btrfs_block_group_cache
*
block_group
;
struct
btrfs_path
*
path
;
int
ret
;
if
(
!
btrfs_fs_compat_ro
(
fs_info
,
FREE_SPACE_TREE
))
return
0
;
path
=
btrfs_alloc_path
();
if
(
!
path
)
{
ret
=
-
ENOMEM
;
goto
out
;
}
block_group
=
btrfs_lookup_block_group
(
fs_info
,
start
);
if
(
!
block_group
)
{
ASSERT
(
0
);
ret
=
-
ENOENT
;
goto
out
;
}
mutex_lock
(
&
block_group
->
free_space_lock
);
ret
=
__remove_from_free_space_tree
(
trans
,
fs_info
,
block_group
,
path
,
start
,
size
);
mutex_unlock
(
&
block_group
->
free_space_lock
);
btrfs_put_block_group
(
block_group
);
out:
btrfs_free_path
(
path
);
if
(
ret
)
btrfs_abort_transaction
(
trans
,
fs_info
->
free_space_root
,
ret
);
return
ret
;
}
static
int
add_free_space_extent
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_path
*
path
,
u64
start
,
u64
size
)
{
struct
btrfs_root
*
root
=
fs_info
->
free_space_root
;
struct
btrfs_key
key
,
new_key
;
u64
found_start
,
found_end
;
u64
end
=
start
+
size
;
int
new_extents
=
1
;
int
ret
;
/*
* We are adding a new extent of free space, but we need to merge
* extents. There are four cases here:
*
* 1. The new extent does not have any immediate neighbors to merge
* with: add the new key and increment the free space extent count. We
* may need to convert the block group to bitmaps as a result.
* 2. The new extent has an immediate neighbor before it: remove the
* previous key and insert a new key combining both of them. There is no
* net change in the number of extents.
* 3. The new extent has an immediate neighbor after it: remove the next
* key and insert a new key combining both of them. There is no net
* change in the number of extents.
* 4. The new extent has immediate neighbors on both sides: remove both
* of the keys and insert a new key combining all of them. Where we used
* to have two extents, we now have one, so decrement the extent count.
*/
new_key
.
objectid
=
start
;
new_key
.
type
=
BTRFS_FREE_SPACE_EXTENT_KEY
;
new_key
.
offset
=
size
;
/* Search for a neighbor on the left. */
if
(
start
==
block_group
->
key
.
objectid
)
goto
right
;
key
.
objectid
=
start
-
1
;
key
.
type
=
(
u8
)
-
1
;
key
.
offset
=
(
u64
)
-
1
;
ret
=
btrfs_search_prev_slot
(
trans
,
root
,
&
key
,
path
,
-
1
,
1
);
if
(
ret
)
goto
out
;
btrfs_item_key_to_cpu
(
path
->
nodes
[
0
],
&
key
,
path
->
slots
[
0
]);
if
(
key
.
type
!=
BTRFS_FREE_SPACE_EXTENT_KEY
)
{
ASSERT
(
key
.
type
==
BTRFS_FREE_SPACE_INFO_KEY
);
btrfs_release_path
(
path
);
goto
right
;
}
found_start
=
key
.
objectid
;
found_end
=
key
.
objectid
+
key
.
offset
;
ASSERT
(
found_start
>=
block_group
->
key
.
objectid
&&
found_end
>
block_group
->
key
.
objectid
);
ASSERT
(
found_start
<
start
&&
found_end
<=
start
);
/*
* Delete the neighbor on the left and absorb it into the new key (cases
* 2 and 4).
*/
if
(
found_end
==
start
)
{
ret
=
btrfs_del_item
(
trans
,
root
,
path
);
if
(
ret
)
goto
out
;
new_key
.
objectid
=
found_start
;
new_key
.
offset
+=
key
.
offset
;
new_extents
--
;
}
btrfs_release_path
(
path
);
right:
/* Search for a neighbor on the right. */
if
(
end
==
block_group
->
key
.
objectid
+
block_group
->
key
.
offset
)
goto
insert
;
key
.
objectid
=
end
;
key
.
type
=
(
u8
)
-
1
;
key
.
offset
=
(
u64
)
-
1
;
ret
=
btrfs_search_prev_slot
(
trans
,
root
,
&
key
,
path
,
-
1
,
1
);
if
(
ret
)
goto
out
;
btrfs_item_key_to_cpu
(
path
->
nodes
[
0
],
&
key
,
path
->
slots
[
0
]);
if
(
key
.
type
!=
BTRFS_FREE_SPACE_EXTENT_KEY
)
{
ASSERT
(
key
.
type
==
BTRFS_FREE_SPACE_INFO_KEY
);
btrfs_release_path
(
path
);
goto
insert
;
}
found_start
=
key
.
objectid
;
found_end
=
key
.
objectid
+
key
.
offset
;
ASSERT
(
found_start
>=
block_group
->
key
.
objectid
&&
found_end
>
block_group
->
key
.
objectid
);
ASSERT
((
found_start
<
start
&&
found_end
<=
start
)
||
(
found_start
>=
end
&&
found_end
>
end
));
/*
* Delete the neighbor on the right and absorb it into the new key
* (cases 3 and 4).
*/
if
(
found_start
==
end
)
{
ret
=
btrfs_del_item
(
trans
,
root
,
path
);
if
(
ret
)
goto
out
;
new_key
.
offset
+=
key
.
offset
;
new_extents
--
;
}
btrfs_release_path
(
path
);
insert:
/* Insert the new key (cases 1-4). */
ret
=
btrfs_insert_empty_item
(
trans
,
root
,
path
,
&
new_key
,
0
);
if
(
ret
)
goto
out
;
btrfs_release_path
(
path
);
ret
=
update_free_space_extent_count
(
trans
,
fs_info
,
block_group
,
path
,
new_extents
);
out:
return
ret
;
}
int
__add_to_free_space_tree
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_path
*
path
,
u64
start
,
u64
size
)
{
struct
btrfs_free_space_info
*
info
;
u32
flags
;
int
ret
;
if
(
block_group
->
needs_free_space
)
{
ret
=
__add_block_group_free_space
(
trans
,
fs_info
,
block_group
,
path
);
if
(
ret
)
return
ret
;
}
info
=
search_free_space_info
(
NULL
,
fs_info
,
block_group
,
path
,
0
);
if
(
IS_ERR
(
info
))
return
PTR_ERR
(
info
);
flags
=
btrfs_free_space_flags
(
path
->
nodes
[
0
],
info
);
btrfs_release_path
(
path
);
if
(
flags
&
BTRFS_FREE_SPACE_USING_BITMAPS
)
{
return
modify_free_space_bitmap
(
trans
,
fs_info
,
block_group
,
path
,
start
,
size
,
0
);
}
else
{
return
add_free_space_extent
(
trans
,
fs_info
,
block_group
,
path
,
start
,
size
);
}
}
int
add_to_free_space_tree
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
u64
start
,
u64
size
)
{
struct
btrfs_block_group_cache
*
block_group
;
struct
btrfs_path
*
path
;
int
ret
;
if
(
!
btrfs_fs_compat_ro
(
fs_info
,
FREE_SPACE_TREE
))
return
0
;
path
=
btrfs_alloc_path
();
if
(
!
path
)
{
ret
=
-
ENOMEM
;
goto
out
;
}
block_group
=
btrfs_lookup_block_group
(
fs_info
,
start
);
if
(
!
block_group
)
{
ASSERT
(
0
);
ret
=
-
ENOENT
;
goto
out
;
}
mutex_lock
(
&
block_group
->
free_space_lock
);
ret
=
__add_to_free_space_tree
(
trans
,
fs_info
,
block_group
,
path
,
start
,
size
);
mutex_unlock
(
&
block_group
->
free_space_lock
);
btrfs_put_block_group
(
block_group
);
out:
btrfs_free_path
(
path
);
if
(
ret
)
btrfs_abort_transaction
(
trans
,
fs_info
->
free_space_root
,
ret
);
return
ret
;
}
/*
* Populate the free space tree by walking the extent tree. Operations on the
* extent tree that happen as a result of writes to the free space tree will go
* through the normal add/remove hooks.
*/
static
int
populate_free_space_tree
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
)
{
struct
btrfs_root
*
extent_root
=
fs_info
->
extent_root
;
struct
btrfs_path
*
path
,
*
path2
;
struct
btrfs_key
key
;
u64
start
,
end
;
int
ret
;
path
=
btrfs_alloc_path
();
if
(
!
path
)
return
-
ENOMEM
;
path
->
reada
=
1
;
path2
=
btrfs_alloc_path
();
if
(
!
path2
)
{
btrfs_free_path
(
path
);
return
-
ENOMEM
;
}
ret
=
add_new_free_space_info
(
trans
,
fs_info
,
block_group
,
path2
);
if
(
ret
)
goto
out
;
/*
* Iterate through all of the extent and metadata items in this block
* group, adding the free space between them and the free space at the
* end. Note that EXTENT_ITEM and METADATA_ITEM are less than
* BLOCK_GROUP_ITEM, so an extent may precede the block group that it's
* contained in.
*/
key
.
objectid
=
block_group
->
key
.
objectid
;
key
.
type
=
BTRFS_EXTENT_ITEM_KEY
;
key
.
offset
=
0
;
ret
=
btrfs_search_slot_for_read
(
extent_root
,
&
key
,
path
,
1
,
0
);
if
(
ret
<
0
)
goto
out
;
ASSERT
(
ret
==
0
);
start
=
block_group
->
key
.
objectid
;
end
=
block_group
->
key
.
objectid
+
block_group
->
key
.
offset
;
while
(
1
)
{
btrfs_item_key_to_cpu
(
path
->
nodes
[
0
],
&
key
,
path
->
slots
[
0
]);
if
(
key
.
type
==
BTRFS_EXTENT_ITEM_KEY
||
key
.
type
==
BTRFS_METADATA_ITEM_KEY
)
{
if
(
key
.
objectid
>=
end
)
break
;
if
(
start
<
key
.
objectid
)
{
ret
=
__add_to_free_space_tree
(
trans
,
fs_info
,
block_group
,
path2
,
start
,
key
.
objectid
-
start
);
if
(
ret
)
goto
out
;
}
start
=
key
.
objectid
;
if
(
key
.
type
==
BTRFS_METADATA_ITEM_KEY
)
start
+=
fs_info
->
tree_root
->
nodesize
;
else
start
+=
key
.
offset
;
}
else
if
(
key
.
type
==
BTRFS_BLOCK_GROUP_ITEM_KEY
)
{
if
(
key
.
objectid
!=
block_group
->
key
.
objectid
)
break
;
}
ret
=
btrfs_next_item
(
extent_root
,
path
);
if
(
ret
<
0
)
goto
out
;
if
(
ret
)
break
;
}
if
(
start
<
end
)
{
ret
=
__add_to_free_space_tree
(
trans
,
fs_info
,
block_group
,
path2
,
start
,
end
-
start
);
if
(
ret
)
goto
out
;
}
ret
=
0
;
out:
btrfs_free_path
(
path2
);
btrfs_free_path
(
path
);
return
ret
;
}
int
btrfs_create_free_space_tree
(
struct
btrfs_fs_info
*
fs_info
)
{
struct
btrfs_trans_handle
*
trans
;
struct
btrfs_root
*
tree_root
=
fs_info
->
tree_root
;
struct
btrfs_root
*
free_space_root
;
struct
btrfs_block_group_cache
*
block_group
;
struct
rb_node
*
node
;
int
ret
;
trans
=
btrfs_start_transaction
(
tree_root
,
0
);
if
(
IS_ERR
(
trans
))
return
PTR_ERR
(
trans
);
free_space_root
=
btrfs_create_tree
(
trans
,
fs_info
,
BTRFS_FREE_SPACE_TREE_OBJECTID
);
if
(
IS_ERR
(
free_space_root
))
{
ret
=
PTR_ERR
(
free_space_root
);
goto
abort
;
}
fs_info
->
free_space_root
=
free_space_root
;
node
=
rb_first
(
&
fs_info
->
block_group_cache_tree
);
while
(
node
)
{
block_group
=
rb_entry
(
node
,
struct
btrfs_block_group_cache
,
cache_node
);
ret
=
populate_free_space_tree
(
trans
,
fs_info
,
block_group
);
if
(
ret
)
goto
abort
;
node
=
rb_next
(
node
);
}
btrfs_set_fs_compat_ro
(
fs_info
,
FREE_SPACE_TREE
);
ret
=
btrfs_commit_transaction
(
trans
,
tree_root
);
if
(
ret
)
return
ret
;
return
0
;
abort:
btrfs_abort_transaction
(
trans
,
tree_root
,
ret
);
btrfs_end_transaction
(
trans
,
tree_root
);
return
ret
;
}
static
int
clear_free_space_tree
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_root
*
root
)
{
struct
btrfs_path
*
path
;
struct
btrfs_key
key
;
int
nr
;
int
ret
;
path
=
btrfs_alloc_path
();
if
(
!
path
)
return
-
ENOMEM
;
path
->
leave_spinning
=
1
;
key
.
objectid
=
0
;
key
.
type
=
0
;
key
.
offset
=
0
;
while
(
1
)
{
ret
=
btrfs_search_slot
(
trans
,
root
,
&
key
,
path
,
-
1
,
1
);
if
(
ret
<
0
)
goto
out
;
nr
=
btrfs_header_nritems
(
path
->
nodes
[
0
]);
if
(
!
nr
)
break
;
path
->
slots
[
0
]
=
0
;
ret
=
btrfs_del_items
(
trans
,
root
,
path
,
0
,
nr
);
if
(
ret
)
goto
out
;
btrfs_release_path
(
path
);
}
ret
=
0
;
out:
btrfs_free_path
(
path
);
return
ret
;
}
int
btrfs_clear_free_space_tree
(
struct
btrfs_fs_info
*
fs_info
)
{
struct
btrfs_trans_handle
*
trans
;
struct
btrfs_root
*
tree_root
=
fs_info
->
tree_root
;
struct
btrfs_root
*
free_space_root
=
fs_info
->
free_space_root
;
int
ret
;
trans
=
btrfs_start_transaction
(
tree_root
,
0
);
if
(
IS_ERR
(
trans
))
return
PTR_ERR
(
trans
);
btrfs_clear_fs_compat_ro
(
fs_info
,
FREE_SPACE_TREE
);
fs_info
->
free_space_root
=
NULL
;
ret
=
clear_free_space_tree
(
trans
,
free_space_root
);
if
(
ret
)
goto
abort
;
ret
=
btrfs_del_root
(
trans
,
tree_root
,
&
free_space_root
->
root_key
);
if
(
ret
)
goto
abort
;
list_del
(
&
free_space_root
->
dirty_list
);
btrfs_tree_lock
(
free_space_root
->
node
);
clean_tree_block
(
trans
,
tree_root
->
fs_info
,
free_space_root
->
node
);
btrfs_tree_unlock
(
free_space_root
->
node
);
btrfs_free_tree_block
(
trans
,
free_space_root
,
free_space_root
->
node
,
0
,
1
);
free_extent_buffer
(
free_space_root
->
node
);
free_extent_buffer
(
free_space_root
->
commit_root
);
kfree
(
free_space_root
);
ret
=
btrfs_commit_transaction
(
trans
,
tree_root
);
if
(
ret
)
return
ret
;
return
0
;
abort:
btrfs_abort_transaction
(
trans
,
tree_root
,
ret
);
btrfs_end_transaction
(
trans
,
tree_root
);
return
ret
;
}
static
int
__add_block_group_free_space
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_path
*
path
)
{
u64
start
,
end
;
int
ret
;
start
=
block_group
->
key
.
objectid
;
end
=
block_group
->
key
.
objectid
+
block_group
->
key
.
offset
;
block_group
->
needs_free_space
=
0
;
ret
=
add_new_free_space_info
(
trans
,
fs_info
,
block_group
,
path
);
if
(
ret
)
return
ret
;
return
__add_to_free_space_tree
(
trans
,
fs_info
,
block_group
,
path
,
block_group
->
key
.
objectid
,
block_group
->
key
.
offset
);
}
int
add_block_group_free_space
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
)
{
struct
btrfs_path
*
path
=
NULL
;
int
ret
=
0
;
if
(
!
btrfs_fs_compat_ro
(
fs_info
,
FREE_SPACE_TREE
))
return
0
;
mutex_lock
(
&
block_group
->
free_space_lock
);
if
(
!
block_group
->
needs_free_space
)
goto
out
;
path
=
btrfs_alloc_path
();
if
(
!
path
)
{
ret
=
-
ENOMEM
;
goto
out
;
}
ret
=
__add_block_group_free_space
(
trans
,
fs_info
,
block_group
,
path
);
out:
btrfs_free_path
(
path
);
mutex_unlock
(
&
block_group
->
free_space_lock
);
if
(
ret
)
btrfs_abort_transaction
(
trans
,
fs_info
->
free_space_root
,
ret
);
return
ret
;
}
int
remove_block_group_free_space
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
)
{
struct
btrfs_root
*
root
=
fs_info
->
free_space_root
;
struct
btrfs_path
*
path
;
struct
btrfs_key
key
,
found_key
;
struct
extent_buffer
*
leaf
;
u64
start
,
end
;
int
done
=
0
,
nr
;
int
ret
;
if
(
!
btrfs_fs_compat_ro
(
fs_info
,
FREE_SPACE_TREE
))
return
0
;
if
(
block_group
->
needs_free_space
)
{
/* We never added this block group to the free space tree. */
return
0
;
}
path
=
btrfs_alloc_path
();
if
(
!
path
)
{
ret
=
-
ENOMEM
;
goto
out
;
}
start
=
block_group
->
key
.
objectid
;
end
=
block_group
->
key
.
objectid
+
block_group
->
key
.
offset
;
key
.
objectid
=
end
-
1
;
key
.
type
=
(
u8
)
-
1
;
key
.
offset
=
(
u64
)
-
1
;
while
(
!
done
)
{
ret
=
btrfs_search_prev_slot
(
trans
,
root
,
&
key
,
path
,
-
1
,
1
);
if
(
ret
)
goto
out
;
leaf
=
path
->
nodes
[
0
];
nr
=
0
;
path
->
slots
[
0
]
++
;
while
(
path
->
slots
[
0
]
>
0
)
{
btrfs_item_key_to_cpu
(
leaf
,
&
found_key
,
path
->
slots
[
0
]
-
1
);
if
(
found_key
.
type
==
BTRFS_FREE_SPACE_INFO_KEY
)
{
ASSERT
(
found_key
.
objectid
==
block_group
->
key
.
objectid
);
ASSERT
(
found_key
.
offset
==
block_group
->
key
.
offset
);
done
=
1
;
nr
++
;
path
->
slots
[
0
]
--
;
break
;
}
else
if
(
found_key
.
type
==
BTRFS_FREE_SPACE_EXTENT_KEY
||
found_key
.
type
==
BTRFS_FREE_SPACE_BITMAP_KEY
)
{
ASSERT
(
found_key
.
objectid
>=
start
);
ASSERT
(
found_key
.
objectid
<
end
);
ASSERT
(
found_key
.
objectid
+
found_key
.
offset
<=
end
);
nr
++
;
path
->
slots
[
0
]
--
;
}
else
{
ASSERT
(
0
);
}
}
ret
=
btrfs_del_items
(
trans
,
root
,
path
,
path
->
slots
[
0
],
nr
);
if
(
ret
)
goto
out
;
btrfs_release_path
(
path
);
}
ret
=
0
;
out:
btrfs_free_path
(
path
);
if
(
ret
)
btrfs_abort_transaction
(
trans
,
root
,
ret
);
return
ret
;
}
static
int
load_free_space_bitmaps
(
struct
btrfs_caching_control
*
caching_ctl
,
struct
btrfs_path
*
path
,
u32
expected_extent_count
)
{
struct
btrfs_block_group_cache
*
block_group
;
struct
btrfs_fs_info
*
fs_info
;
struct
btrfs_root
*
root
;
struct
btrfs_key
key
;
int
prev_bit
=
0
,
bit
;
/* Initialize to silence GCC. */
u64
extent_start
=
0
;
u64
end
,
offset
;
u64
total_found
=
0
;
u32
extent_count
=
0
;
int
ret
;
block_group
=
caching_ctl
->
block_group
;
fs_info
=
block_group
->
fs_info
;
root
=
fs_info
->
free_space_root
;
end
=
block_group
->
key
.
objectid
+
block_group
->
key
.
offset
;
while
(
1
)
{
ret
=
btrfs_next_item
(
root
,
path
);
if
(
ret
<
0
)
goto
out
;
if
(
ret
)
break
;
btrfs_item_key_to_cpu
(
path
->
nodes
[
0
],
&
key
,
path
->
slots
[
0
]);
if
(
key
.
type
==
BTRFS_FREE_SPACE_INFO_KEY
)
break
;
ASSERT
(
key
.
type
==
BTRFS_FREE_SPACE_BITMAP_KEY
);
ASSERT
(
key
.
objectid
<
end
&&
key
.
objectid
+
key
.
offset
<=
end
);
caching_ctl
->
progress
=
key
.
objectid
;
offset
=
key
.
objectid
;
while
(
offset
<
key
.
objectid
+
key
.
offset
)
{
bit
=
free_space_test_bit
(
block_group
,
path
,
offset
);
if
(
prev_bit
==
0
&&
bit
==
1
)
{
extent_start
=
offset
;
}
else
if
(
prev_bit
==
1
&&
bit
==
0
)
{
total_found
+=
add_new_free_space
(
block_group
,
fs_info
,
extent_start
,
offset
);
if
(
total_found
>
CACHING_CTL_WAKE_UP
)
{
total_found
=
0
;
wake_up
(
&
caching_ctl
->
wait
);
}
extent_count
++
;
}
prev_bit
=
bit
;
offset
+=
block_group
->
sectorsize
;
}
}
if
(
prev_bit
==
1
)
{
total_found
+=
add_new_free_space
(
block_group
,
fs_info
,
extent_start
,
end
);
extent_count
++
;
}
if
(
extent_count
!=
expected_extent_count
)
{
btrfs_err
(
fs_info
,
"incorrect extent count for %llu; counted %u, expected %u"
,
block_group
->
key
.
objectid
,
extent_count
,
expected_extent_count
);
ASSERT
(
0
);
ret
=
-
EIO
;
goto
out
;
}
caching_ctl
->
progress
=
(
u64
)
-
1
;
ret
=
0
;
out:
return
ret
;
}
static
int
load_free_space_extents
(
struct
btrfs_caching_control
*
caching_ctl
,
struct
btrfs_path
*
path
,
u32
expected_extent_count
)
{
struct
btrfs_block_group_cache
*
block_group
;
struct
btrfs_fs_info
*
fs_info
;
struct
btrfs_root
*
root
;
struct
btrfs_key
key
;
u64
end
;
u64
total_found
=
0
;
u32
extent_count
=
0
;
int
ret
;
block_group
=
caching_ctl
->
block_group
;
fs_info
=
block_group
->
fs_info
;
root
=
fs_info
->
free_space_root
;
end
=
block_group
->
key
.
objectid
+
block_group
->
key
.
offset
;
while
(
1
)
{
ret
=
btrfs_next_item
(
root
,
path
);
if
(
ret
<
0
)
goto
out
;
if
(
ret
)
break
;
btrfs_item_key_to_cpu
(
path
->
nodes
[
0
],
&
key
,
path
->
slots
[
0
]);
if
(
key
.
type
==
BTRFS_FREE_SPACE_INFO_KEY
)
break
;
ASSERT
(
key
.
type
==
BTRFS_FREE_SPACE_EXTENT_KEY
);
ASSERT
(
key
.
objectid
<
end
&&
key
.
objectid
+
key
.
offset
<=
end
);
caching_ctl
->
progress
=
key
.
objectid
;
total_found
+=
add_new_free_space
(
block_group
,
fs_info
,
key
.
objectid
,
key
.
objectid
+
key
.
offset
);
if
(
total_found
>
CACHING_CTL_WAKE_UP
)
{
total_found
=
0
;
wake_up
(
&
caching_ctl
->
wait
);
}
extent_count
++
;
}
if
(
extent_count
!=
expected_extent_count
)
{
btrfs_err
(
fs_info
,
"incorrect extent count for %llu; counted %u, expected %u"
,
block_group
->
key
.
objectid
,
extent_count
,
expected_extent_count
);
ASSERT
(
0
);
ret
=
-
EIO
;
goto
out
;
}
caching_ctl
->
progress
=
(
u64
)
-
1
;
ret
=
0
;
out:
return
ret
;
}
int
load_free_space_tree
(
struct
btrfs_caching_control
*
caching_ctl
)
{
struct
btrfs_block_group_cache
*
block_group
;
struct
btrfs_fs_info
*
fs_info
;
struct
btrfs_free_space_info
*
info
;
struct
btrfs_path
*
path
;
u32
extent_count
,
flags
;
int
ret
;
block_group
=
caching_ctl
->
block_group
;
fs_info
=
block_group
->
fs_info
;
path
=
btrfs_alloc_path
();
if
(
!
path
)
return
-
ENOMEM
;
/*
* Just like caching_thread() doesn't want to deadlock on the extent
* tree, we don't want to deadlock on the free space tree.
*/
path
->
skip_locking
=
1
;
path
->
search_commit_root
=
1
;
path
->
reada
=
1
;
info
=
search_free_space_info
(
NULL
,
fs_info
,
block_group
,
path
,
0
);
if
(
IS_ERR
(
info
))
{
ret
=
PTR_ERR
(
info
);
goto
out
;
}
extent_count
=
btrfs_free_space_extent_count
(
path
->
nodes
[
0
],
info
);
flags
=
btrfs_free_space_flags
(
path
->
nodes
[
0
],
info
);
/*
* We left path pointing to the free space info item, so now
* load_free_space_foo can just iterate through the free space tree from
* there.
*/
if
(
flags
&
BTRFS_FREE_SPACE_USING_BITMAPS
)
ret
=
load_free_space_bitmaps
(
caching_ctl
,
path
,
extent_count
);
else
ret
=
load_free_space_extents
(
caching_ctl
,
path
,
extent_count
);
out:
btrfs_free_path
(
path
);
return
ret
;
}
fs/btrfs/free-space-tree.h
0 → 100644
View file @
f0f76413
/*
* Copyright (C) 2015 Facebook. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#ifndef __BTRFS_FREE_SPACE_TREE
#define __BTRFS_FREE_SPACE_TREE
/*
* The default size for new free space bitmap items. The last bitmap in a block
* group may be truncated, and none of the free space tree code assumes that
* existing bitmaps are this size.
*/
#define BTRFS_FREE_SPACE_BITMAP_SIZE 256
#define BTRFS_FREE_SPACE_BITMAP_BITS (BTRFS_FREE_SPACE_BITMAP_SIZE * BITS_PER_BYTE)
void
set_free_space_tree_thresholds
(
struct
btrfs_block_group_cache
*
block_group
);
int
btrfs_create_free_space_tree
(
struct
btrfs_fs_info
*
fs_info
);
int
btrfs_clear_free_space_tree
(
struct
btrfs_fs_info
*
fs_info
);
int
load_free_space_tree
(
struct
btrfs_caching_control
*
caching_ctl
);
int
add_block_group_free_space
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
);
int
remove_block_group_free_space
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
);
int
add_to_free_space_tree
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
u64
start
,
u64
size
);
int
remove_from_free_space_tree
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
u64
start
,
u64
size
);
/* Exposed for testing. */
struct
btrfs_free_space_info
*
search_free_space_info
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_path
*
path
,
int
cow
);
int
__add_to_free_space_tree
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_path
*
path
,
u64
start
,
u64
size
);
int
__remove_from_free_space_tree
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_path
*
path
,
u64
start
,
u64
size
);
int
convert_free_space_to_bitmaps
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_path
*
path
);
int
convert_free_space_to_extents
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_path
*
path
);
int
free_space_test_bit
(
struct
btrfs_block_group_cache
*
block_group
,
struct
btrfs_path
*
path
,
u64
offset
);
#endif
fs/btrfs/super.c
View file @
f0f76413
...
...
@@ -295,10 +295,11 @@ enum {
Opt_nossd
,
Opt_ssd_spread
,
Opt_thread_pool
,
Opt_noacl
,
Opt_compress
,
Opt_compress_type
,
Opt_compress_force
,
Opt_compress_force_type
,
Opt_notreelog
,
Opt_ratio
,
Opt_flushoncommit
,
Opt_discard
,
Opt_space_cache
,
Opt_clear_cache
,
Opt_user_subvol_rm_allowed
,
Opt_enospc_debug
,
Opt_subvolrootid
,
Opt_defrag
,
Opt_inode_cache
,
Opt_no_space_cache
,
Opt_recovery
,
Opt_skip_balance
,
Opt_check_integrity
,
Opt_check_integrity_including_extent_data
,
Opt_space_cache
,
Opt_space_cache_version
,
Opt_clear_cache
,
Opt_user_subvol_rm_allowed
,
Opt_enospc_debug
,
Opt_subvolrootid
,
Opt_defrag
,
Opt_inode_cache
,
Opt_no_space_cache
,
Opt_recovery
,
Opt_skip_balance
,
Opt_check_integrity
,
Opt_check_integrity_including_extent_data
,
Opt_check_integrity_print_mask
,
Opt_fatal_errors
,
Opt_rescan_uuid_tree
,
Opt_commit_interval
,
Opt_barrier
,
Opt_nodefrag
,
Opt_nodiscard
,
Opt_noenospc_debug
,
Opt_noflushoncommit
,
Opt_acl
,
Opt_datacow
,
...
...
@@ -340,6 +341,7 @@ static match_table_t tokens = {
{
Opt_discard
,
"discard"
},
{
Opt_nodiscard
,
"nodiscard"
},
{
Opt_space_cache
,
"space_cache"
},
{
Opt_space_cache_version
,
"space_cache=%s"
},
{
Opt_clear_cache
,
"clear_cache"
},
{
Opt_user_subvol_rm_allowed
,
"user_subvol_rm_allowed"
},
{
Opt_enospc_debug
,
"enospc_debug"
},
...
...
@@ -383,7 +385,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
bool
compress_force
=
false
;
cache_gen
=
btrfs_super_cache_generation
(
root
->
fs_info
->
super_copy
);
if
(
cache_gen
)
if
(
btrfs_fs_compat_ro
(
root
->
fs_info
,
FREE_SPACE_TREE
))
btrfs_set_opt
(
info
->
mount_opt
,
FREE_SPACE_TREE
);
else
if
(
cache_gen
)
btrfs_set_opt
(
info
->
mount_opt
,
SPACE_CACHE
);
if
(
!
options
)
...
...
@@ -617,15 +621,35 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
"turning off discard"
);
break
;
case
Opt_space_cache
:
case
Opt_space_cache_version
:
if
(
token
==
Opt_space_cache
||
strcmp
(
args
[
0
].
from
,
"v1"
)
==
0
)
{
btrfs_clear_opt
(
root
->
fs_info
->
mount_opt
,
FREE_SPACE_TREE
);
btrfs_set_and_info
(
root
,
SPACE_CACHE
,
"enabling disk space caching"
);
}
else
if
(
strcmp
(
args
[
0
].
from
,
"v2"
)
==
0
)
{
btrfs_clear_opt
(
root
->
fs_info
->
mount_opt
,
SPACE_CACHE
);
btrfs_set_and_info
(
root
,
FREE_SPACE_TREE
,
"enabling free space tree"
);
}
else
{
ret
=
-
EINVAL
;
goto
out
;
}
break
;
case
Opt_rescan_uuid_tree
:
btrfs_set_opt
(
info
->
mount_opt
,
RESCAN_UUID_TREE
);
break
;
case
Opt_no_space_cache
:
if
(
btrfs_test_opt
(
root
,
SPACE_CACHE
))
{
btrfs_clear_and_info
(
root
,
SPACE_CACHE
,
"disabling disk space caching"
);
}
if
(
btrfs_test_opt
(
root
,
FREE_SPACE_TREE
))
{
btrfs_clear_and_info
(
root
,
FREE_SPACE_TREE
,
"disabling free space tree"
);
}
break
;
case
Opt_inode_cache
:
btrfs_set_pending_and_info
(
info
,
INODE_MAP_CACHE
,
...
...
@@ -754,8 +778,17 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
}
}
out:
if
(
btrfs_fs_compat_ro
(
root
->
fs_info
,
FREE_SPACE_TREE
)
&&
!
btrfs_test_opt
(
root
,
FREE_SPACE_TREE
)
&&
!
btrfs_test_opt
(
root
,
CLEAR_CACHE
))
{
btrfs_err
(
root
->
fs_info
,
"cannot disable free space tree"
);
ret
=
-
EINVAL
;
}
if
(
!
ret
&&
btrfs_test_opt
(
root
,
SPACE_CACHE
))
btrfs_info
(
root
->
fs_info
,
"disk space caching is enabled"
);
if
(
!
ret
&&
btrfs_test_opt
(
root
,
FREE_SPACE_TREE
))
btrfs_info
(
root
->
fs_info
,
"using free space tree"
);
kfree
(
orig
);
return
ret
;
}
...
...
@@ -1162,6 +1195,8 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
seq_puts
(
seq
,
",noacl"
);
if
(
btrfs_test_opt
(
root
,
SPACE_CACHE
))
seq_puts
(
seq
,
",space_cache"
);
else
if
(
btrfs_test_opt
(
root
,
FREE_SPACE_TREE
))
seq_puts
(
seq
,
",space_cache=v2"
);
else
seq_puts
(
seq
,
",nospace_cache"
);
if
(
btrfs_test_opt
(
root
,
RESCAN_UUID_TREE
))
...
...
@@ -2225,6 +2260,9 @@ static int btrfs_run_sanity_tests(void)
if
(
ret
)
goto
out
;
ret
=
btrfs_test_qgroups
();
if
(
ret
)
goto
out
;
ret
=
btrfs_test_free_space_tree
();
out:
btrfs_destroy_test_fs
();
return
ret
;
...
...
fs/btrfs/tests/btrfs-tests.c
View file @
f0f76413
...
...
@@ -21,6 +21,9 @@
#include <linux/magic.h>
#include "btrfs-tests.h"
#include "../ctree.h"
#include "../free-space-cache.h"
#include "../free-space-tree.h"
#include "../transaction.h"
#include "../volumes.h"
#include "../disk-io.h"
#include "../qgroup.h"
...
...
@@ -122,6 +125,9 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void)
INIT_LIST_HEAD
(
&
fs_info
->
tree_mod_seq_list
);
INIT_RADIX_TREE
(
&
fs_info
->
buffer_radix
,
GFP_ATOMIC
);
INIT_RADIX_TREE
(
&
fs_info
->
fs_roots_radix
,
GFP_ATOMIC
);
extent_io_tree_init
(
&
fs_info
->
freed_extents
[
0
],
NULL
);
extent_io_tree_init
(
&
fs_info
->
freed_extents
[
1
],
NULL
);
fs_info
->
pinned_extents
=
&
fs_info
->
freed_extents
[
0
];
return
fs_info
;
}
...
...
@@ -169,3 +175,55 @@ void btrfs_free_dummy_root(struct btrfs_root *root)
kfree
(
root
);
}
struct
btrfs_block_group_cache
*
btrfs_alloc_dummy_block_group
(
unsigned
long
length
)
{
struct
btrfs_block_group_cache
*
cache
;
cache
=
kzalloc
(
sizeof
(
*
cache
),
GFP_NOFS
);
if
(
!
cache
)
return
NULL
;
cache
->
free_space_ctl
=
kzalloc
(
sizeof
(
*
cache
->
free_space_ctl
),
GFP_NOFS
);
if
(
!
cache
->
free_space_ctl
)
{
kfree
(
cache
);
return
NULL
;
}
cache
->
fs_info
=
btrfs_alloc_dummy_fs_info
();
if
(
!
cache
->
fs_info
)
{
kfree
(
cache
->
free_space_ctl
);
kfree
(
cache
);
return
NULL
;
}
cache
->
key
.
objectid
=
0
;
cache
->
key
.
offset
=
length
;
cache
->
key
.
type
=
BTRFS_BLOCK_GROUP_ITEM_KEY
;
cache
->
sectorsize
=
4096
;
cache
->
full_stripe_len
=
4096
;
INIT_LIST_HEAD
(
&
cache
->
list
);
INIT_LIST_HEAD
(
&
cache
->
cluster_list
);
INIT_LIST_HEAD
(
&
cache
->
bg_list
);
btrfs_init_free_space_ctl
(
cache
);
mutex_init
(
&
cache
->
free_space_lock
);
return
cache
;
}
void
btrfs_free_dummy_block_group
(
struct
btrfs_block_group_cache
*
cache
)
{
if
(
!
cache
)
return
;
__btrfs_remove_free_space_cache
(
cache
->
free_space_ctl
);
kfree
(
cache
->
free_space_ctl
);
kfree
(
cache
);
}
void
btrfs_init_dummy_trans
(
struct
btrfs_trans_handle
*
trans
)
{
memset
(
trans
,
0
,
sizeof
(
*
trans
));
trans
->
transid
=
1
;
INIT_LIST_HEAD
(
&
trans
->
qgroup_ref_list
);
trans
->
type
=
__TRANS_DUMMY
;
}
fs/btrfs/tests/btrfs-tests.h
View file @
f0f76413
...
...
@@ -24,17 +24,23 @@
#define test_msg(fmt, ...) pr_info("BTRFS: selftest: " fmt, ##__VA_ARGS__)
struct
btrfs_root
;
struct
btrfs_trans_handle
;
int
btrfs_test_free_space_cache
(
void
);
int
btrfs_test_extent_buffer_operations
(
void
);
int
btrfs_test_extent_io
(
void
);
int
btrfs_test_inodes
(
void
);
int
btrfs_test_qgroups
(
void
);
int
btrfs_test_free_space_tree
(
void
);
int
btrfs_init_test_fs
(
void
);
void
btrfs_destroy_test_fs
(
void
);
struct
inode
*
btrfs_new_test_inode
(
void
);
struct
btrfs_fs_info
*
btrfs_alloc_dummy_fs_info
(
void
);
void
btrfs_free_dummy_root
(
struct
btrfs_root
*
root
);
struct
btrfs_block_group_cache
*
btrfs_alloc_dummy_block_group
(
unsigned
long
length
);
void
btrfs_free_dummy_block_group
(
struct
btrfs_block_group_cache
*
cache
);
void
btrfs_init_dummy_trans
(
struct
btrfs_trans_handle
*
trans
);
#else
static
inline
int
btrfs_test_free_space_cache
(
void
)
{
...
...
@@ -63,6 +69,10 @@ static inline int btrfs_test_qgroups(void)
{
return
0
;
}
static
inline
int
btrfs_test_free_space_tree
(
void
)
{
return
0
;
}
#endif
#endif
fs/btrfs/tests/extent-io-tests.c
View file @
f0f76413
...
...
@@ -18,6 +18,7 @@
#include <linux/pagemap.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include "btrfs-tests.h"
#include "../extent_io.h"
...
...
@@ -76,6 +77,8 @@ static int test_find_delalloc(void)
u64
found
;
int
ret
=
-
EINVAL
;
test_msg
(
"Running find delalloc tests
\n
"
);
inode
=
btrfs_new_test_inode
();
if
(
!
inode
)
{
test_msg
(
"Failed to allocate test inode
\n
"
);
...
...
@@ -268,8 +271,139 @@ static int test_find_delalloc(void)
return
ret
;
}
static
int
__test_eb_bitmaps
(
unsigned
long
*
bitmap
,
struct
extent_buffer
*
eb
,
unsigned
long
len
)
{
unsigned
long
i
,
x
;
memset
(
bitmap
,
0
,
len
);
memset_extent_buffer
(
eb
,
0
,
0
,
len
);
if
(
memcmp_extent_buffer
(
eb
,
bitmap
,
0
,
len
)
!=
0
)
{
test_msg
(
"Bitmap was not zeroed
\n
"
);
return
-
EINVAL
;
}
bitmap_set
(
bitmap
,
0
,
len
*
BITS_PER_BYTE
);
extent_buffer_bitmap_set
(
eb
,
0
,
0
,
len
*
BITS_PER_BYTE
);
if
(
memcmp_extent_buffer
(
eb
,
bitmap
,
0
,
len
)
!=
0
)
{
test_msg
(
"Setting all bits failed
\n
"
);
return
-
EINVAL
;
}
bitmap_clear
(
bitmap
,
0
,
len
*
BITS_PER_BYTE
);
extent_buffer_bitmap_clear
(
eb
,
0
,
0
,
len
*
BITS_PER_BYTE
);
if
(
memcmp_extent_buffer
(
eb
,
bitmap
,
0
,
len
)
!=
0
)
{
test_msg
(
"Clearing all bits failed
\n
"
);
return
-
EINVAL
;
}
bitmap_set
(
bitmap
,
(
PAGE_CACHE_SIZE
-
sizeof
(
long
)
/
2
)
*
BITS_PER_BYTE
,
sizeof
(
long
)
*
BITS_PER_BYTE
);
extent_buffer_bitmap_set
(
eb
,
PAGE_CACHE_SIZE
-
sizeof
(
long
)
/
2
,
0
,
sizeof
(
long
)
*
BITS_PER_BYTE
);
if
(
memcmp_extent_buffer
(
eb
,
bitmap
,
0
,
len
)
!=
0
)
{
test_msg
(
"Setting straddling pages failed
\n
"
);
return
-
EINVAL
;
}
bitmap_set
(
bitmap
,
0
,
len
*
BITS_PER_BYTE
);
bitmap_clear
(
bitmap
,
(
PAGE_CACHE_SIZE
-
sizeof
(
long
)
/
2
)
*
BITS_PER_BYTE
,
sizeof
(
long
)
*
BITS_PER_BYTE
);
extent_buffer_bitmap_set
(
eb
,
0
,
0
,
len
*
BITS_PER_BYTE
);
extent_buffer_bitmap_clear
(
eb
,
PAGE_CACHE_SIZE
-
sizeof
(
long
)
/
2
,
0
,
sizeof
(
long
)
*
BITS_PER_BYTE
);
if
(
memcmp_extent_buffer
(
eb
,
bitmap
,
0
,
len
)
!=
0
)
{
test_msg
(
"Clearing straddling pages failed
\n
"
);
return
-
EINVAL
;
}
/*
* Generate a wonky pseudo-random bit pattern for the sake of not using
* something repetitive that could miss some hypothetical off-by-n bug.
*/
x
=
0
;
for
(
i
=
0
;
i
<
len
/
sizeof
(
long
);
i
++
)
{
x
=
(
0x19660dULL
*
(
u64
)
x
+
0x3c6ef35fULL
)
&
0xffffffffUL
;
bitmap
[
i
]
=
x
;
}
write_extent_buffer
(
eb
,
bitmap
,
0
,
len
);
for
(
i
=
0
;
i
<
len
*
BITS_PER_BYTE
;
i
++
)
{
int
bit
,
bit1
;
bit
=
!!
test_bit
(
i
,
bitmap
);
bit1
=
!!
extent_buffer_test_bit
(
eb
,
0
,
i
);
if
(
bit1
!=
bit
)
{
test_msg
(
"Testing bit pattern failed
\n
"
);
return
-
EINVAL
;
}
bit1
=
!!
extent_buffer_test_bit
(
eb
,
i
/
BITS_PER_BYTE
,
i
%
BITS_PER_BYTE
);
if
(
bit1
!=
bit
)
{
test_msg
(
"Testing bit pattern with offset failed
\n
"
);
return
-
EINVAL
;
}
}
return
0
;
}
static
int
test_eb_bitmaps
(
void
)
{
unsigned
long
len
=
PAGE_CACHE_SIZE
*
4
;
unsigned
long
*
bitmap
;
struct
extent_buffer
*
eb
;
int
ret
;
test_msg
(
"Running extent buffer bitmap tests
\n
"
);
bitmap
=
kmalloc
(
len
,
GFP_NOFS
);
if
(
!
bitmap
)
{
test_msg
(
"Couldn't allocate test bitmap
\n
"
);
return
-
ENOMEM
;
}
eb
=
__alloc_dummy_extent_buffer
(
NULL
,
0
,
len
);
if
(
!
eb
)
{
test_msg
(
"Couldn't allocate test extent buffer
\n
"
);
kfree
(
bitmap
);
return
-
ENOMEM
;
}
ret
=
__test_eb_bitmaps
(
bitmap
,
eb
,
len
);
if
(
ret
)
goto
out
;
/* Do it over again with an extent buffer which isn't page-aligned. */
free_extent_buffer
(
eb
);
eb
=
__alloc_dummy_extent_buffer
(
NULL
,
PAGE_CACHE_SIZE
/
2
,
len
);
if
(
!
eb
)
{
test_msg
(
"Couldn't allocate test extent buffer
\n
"
);
kfree
(
bitmap
);
return
-
ENOMEM
;
}
ret
=
__test_eb_bitmaps
(
bitmap
,
eb
,
len
);
out:
free_extent_buffer
(
eb
);
kfree
(
bitmap
);
return
ret
;
}
int
btrfs_test_extent_io
(
void
)
{
test_msg
(
"Running find delalloc tests
\n
"
);
return
test_find_delalloc
();
int
ret
;
test_msg
(
"Running extent I/O tests
\n
"
);
ret
=
test_find_delalloc
();
if
(
ret
)
goto
out
;
ret
=
test_eb_bitmaps
();
out:
test_msg
(
"Extent I/O tests finished
\n
"
);
return
ret
;
}
fs/btrfs/tests/free-space-tests.c
View file @
f0f76413
...
...
@@ -23,41 +23,6 @@
#include "../free-space-cache.h"
#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
static
struct
btrfs_block_group_cache
*
init_test_block_group
(
void
)
{
struct
btrfs_block_group_cache
*
cache
;
cache
=
kzalloc
(
sizeof
(
*
cache
),
GFP_NOFS
);
if
(
!
cache
)
return
NULL
;
cache
->
free_space_ctl
=
kzalloc
(
sizeof
(
*
cache
->
free_space_ctl
),
GFP_NOFS
);
if
(
!
cache
->
free_space_ctl
)
{
kfree
(
cache
);
return
NULL
;
}
cache
->
fs_info
=
btrfs_alloc_dummy_fs_info
();
if
(
!
cache
->
fs_info
)
{
kfree
(
cache
->
free_space_ctl
);
kfree
(
cache
);
return
NULL
;
}
cache
->
key
.
objectid
=
0
;
cache
->
key
.
offset
=
1024
*
1024
*
1024
;
cache
->
key
.
type
=
BTRFS_BLOCK_GROUP_ITEM_KEY
;
cache
->
sectorsize
=
4096
;
cache
->
full_stripe_len
=
4096
;
spin_lock_init
(
&
cache
->
lock
);
INIT_LIST_HEAD
(
&
cache
->
list
);
INIT_LIST_HEAD
(
&
cache
->
cluster_list
);
INIT_LIST_HEAD
(
&
cache
->
bg_list
);
btrfs_init_free_space_ctl
(
cache
);
return
cache
;
}
/*
* This test just does basic sanity checking, making sure we can add an exten
...
...
@@ -891,7 +856,7 @@ int btrfs_test_free_space_cache(void)
test_msg
(
"Running btrfs free space cache tests
\n
"
);
cache
=
init_test_block_group
(
);
cache
=
btrfs_alloc_dummy_block_group
(
1024
*
1024
*
1024
);
if
(
!
cache
)
{
test_msg
(
"Couldn't run the tests
\n
"
);
return
0
;
...
...
@@ -922,9 +887,7 @@ int btrfs_test_free_space_cache(void)
ret
=
test_steal_space_from_bitmap_to_extent
(
cache
);
out:
__btrfs_remove_free_space_cache
(
cache
->
free_space_ctl
);
kfree
(
cache
->
free_space_ctl
);
kfree
(
cache
);
btrfs_free_dummy_block_group
(
cache
);
btrfs_free_dummy_root
(
root
);
test_msg
(
"Free space cache tests finished
\n
"
);
return
ret
;
...
...
fs/btrfs/tests/free-space-tree-tests.c
0 → 100644
View file @
f0f76413
/*
* Copyright (C) 2015 Facebook. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include "btrfs-tests.h"
#include "../ctree.h"
#include "../disk-io.h"
#include "../free-space-tree.h"
#include "../transaction.h"
struct
free_space_extent
{
u64
start
,
length
;
};
/*
* The test cases align their operations to this in order to hit some of the
* edge cases in the bitmap code.
*/
#define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * 4096)
static
int
__check_free_space_extents
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
cache
,
struct
btrfs_path
*
path
,
struct
free_space_extent
*
extents
,
unsigned
int
num_extents
)
{
struct
btrfs_free_space_info
*
info
;
struct
btrfs_key
key
;
int
prev_bit
=
0
,
bit
;
u64
extent_start
=
0
,
offset
,
end
;
u32
flags
,
extent_count
;
unsigned
int
i
;
int
ret
;
info
=
search_free_space_info
(
trans
,
fs_info
,
cache
,
path
,
0
);
if
(
IS_ERR
(
info
))
{
test_msg
(
"Could not find free space info
\n
"
);
ret
=
PTR_ERR
(
info
);
goto
out
;
}
flags
=
btrfs_free_space_flags
(
path
->
nodes
[
0
],
info
);
extent_count
=
btrfs_free_space_extent_count
(
path
->
nodes
[
0
],
info
);
if
(
extent_count
!=
num_extents
)
{
test_msg
(
"Extent count is wrong
\n
"
);
ret
=
-
EINVAL
;
goto
out
;
}
if
(
flags
&
BTRFS_FREE_SPACE_USING_BITMAPS
)
{
if
(
path
->
slots
[
0
]
!=
0
)
goto
invalid
;
end
=
cache
->
key
.
objectid
+
cache
->
key
.
offset
;
i
=
0
;
while
(
++
path
->
slots
[
0
]
<
btrfs_header_nritems
(
path
->
nodes
[
0
]))
{
btrfs_item_key_to_cpu
(
path
->
nodes
[
0
],
&
key
,
path
->
slots
[
0
]);
if
(
key
.
type
!=
BTRFS_FREE_SPACE_BITMAP_KEY
)
goto
invalid
;
offset
=
key
.
objectid
;
while
(
offset
<
key
.
objectid
+
key
.
offset
)
{
bit
=
free_space_test_bit
(
cache
,
path
,
offset
);
if
(
prev_bit
==
0
&&
bit
==
1
)
{
extent_start
=
offset
;
}
else
if
(
prev_bit
==
1
&&
bit
==
0
)
{
if
(
i
>=
num_extents
)
goto
invalid
;
if
(
i
>=
num_extents
||
extent_start
!=
extents
[
i
].
start
||
offset
-
extent_start
!=
extents
[
i
].
length
)
goto
invalid
;
i
++
;
}
prev_bit
=
bit
;
offset
+=
cache
->
sectorsize
;
}
}
if
(
prev_bit
==
1
)
{
if
(
i
>=
num_extents
||
extent_start
!=
extents
[
i
].
start
||
end
-
extent_start
!=
extents
[
i
].
length
)
goto
invalid
;
i
++
;
}
if
(
i
!=
num_extents
)
goto
invalid
;
}
else
{
if
(
btrfs_header_nritems
(
path
->
nodes
[
0
])
!=
num_extents
+
1
||
path
->
slots
[
0
]
!=
0
)
goto
invalid
;
for
(
i
=
0
;
i
<
num_extents
;
i
++
)
{
path
->
slots
[
0
]
++
;
btrfs_item_key_to_cpu
(
path
->
nodes
[
0
],
&
key
,
path
->
slots
[
0
]);
if
(
key
.
type
!=
BTRFS_FREE_SPACE_EXTENT_KEY
||
key
.
objectid
!=
extents
[
i
].
start
||
key
.
offset
!=
extents
[
i
].
length
)
goto
invalid
;
}
}
ret
=
0
;
out:
btrfs_release_path
(
path
);
return
ret
;
invalid:
test_msg
(
"Free space tree is invalid
\n
"
);
ret
=
-
EINVAL
;
goto
out
;
}
static
int
check_free_space_extents
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
cache
,
struct
btrfs_path
*
path
,
struct
free_space_extent
*
extents
,
unsigned
int
num_extents
)
{
struct
btrfs_free_space_info
*
info
;
u32
flags
;
int
ret
;
info
=
search_free_space_info
(
trans
,
fs_info
,
cache
,
path
,
0
);
if
(
IS_ERR
(
info
))
{
test_msg
(
"Could not find free space info
\n
"
);
btrfs_release_path
(
path
);
return
PTR_ERR
(
info
);
}
flags
=
btrfs_free_space_flags
(
path
->
nodes
[
0
],
info
);
btrfs_release_path
(
path
);
ret
=
__check_free_space_extents
(
trans
,
fs_info
,
cache
,
path
,
extents
,
num_extents
);
if
(
ret
)
return
ret
;
/* Flip it to the other format and check that for good measure. */
if
(
flags
&
BTRFS_FREE_SPACE_USING_BITMAPS
)
{
ret
=
convert_free_space_to_extents
(
trans
,
fs_info
,
cache
,
path
);
if
(
ret
)
{
test_msg
(
"Could not convert to extents
\n
"
);
return
ret
;
}
}
else
{
ret
=
convert_free_space_to_bitmaps
(
trans
,
fs_info
,
cache
,
path
);
if
(
ret
)
{
test_msg
(
"Could not convert to bitmaps
\n
"
);
return
ret
;
}
}
return
__check_free_space_extents
(
trans
,
fs_info
,
cache
,
path
,
extents
,
num_extents
);
}
static
int
test_empty_block_group
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
cache
,
struct
btrfs_path
*
path
)
{
struct
free_space_extent
extents
[]
=
{
{
cache
->
key
.
objectid
,
cache
->
key
.
offset
},
};
return
check_free_space_extents
(
trans
,
fs_info
,
cache
,
path
,
extents
,
ARRAY_SIZE
(
extents
));
}
static
int
test_remove_all
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
cache
,
struct
btrfs_path
*
path
)
{
struct
free_space_extent
extents
[]
=
{};
int
ret
;
ret
=
__remove_from_free_space_tree
(
trans
,
fs_info
,
cache
,
path
,
cache
->
key
.
objectid
,
cache
->
key
.
offset
);
if
(
ret
)
{
test_msg
(
"Could not remove free space
\n
"
);
return
ret
;
}
return
check_free_space_extents
(
trans
,
fs_info
,
cache
,
path
,
extents
,
ARRAY_SIZE
(
extents
));
}
static
int
test_remove_beginning
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
cache
,
struct
btrfs_path
*
path
)
{
struct
free_space_extent
extents
[]
=
{
{
cache
->
key
.
objectid
+
BITMAP_RANGE
,
cache
->
key
.
offset
-
BITMAP_RANGE
},
};
int
ret
;
ret
=
__remove_from_free_space_tree
(
trans
,
fs_info
,
cache
,
path
,
cache
->
key
.
objectid
,
BITMAP_RANGE
);
if
(
ret
)
{
test_msg
(
"Could not remove free space
\n
"
);
return
ret
;
}
return
check_free_space_extents
(
trans
,
fs_info
,
cache
,
path
,
extents
,
ARRAY_SIZE
(
extents
));
}
static
int
test_remove_end
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
cache
,
struct
btrfs_path
*
path
)
{
struct
free_space_extent
extents
[]
=
{
{
cache
->
key
.
objectid
,
cache
->
key
.
offset
-
BITMAP_RANGE
},
};
int
ret
;
ret
=
__remove_from_free_space_tree
(
trans
,
fs_info
,
cache
,
path
,
cache
->
key
.
objectid
+
cache
->
key
.
offset
-
BITMAP_RANGE
,
BITMAP_RANGE
);
if
(
ret
)
{
test_msg
(
"Could not remove free space
\n
"
);
return
ret
;
}
return
check_free_space_extents
(
trans
,
fs_info
,
cache
,
path
,
extents
,
ARRAY_SIZE
(
extents
));
}
static
int
test_remove_middle
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
cache
,
struct
btrfs_path
*
path
)
{
struct
free_space_extent
extents
[]
=
{
{
cache
->
key
.
objectid
,
BITMAP_RANGE
},
{
cache
->
key
.
objectid
+
2
*
BITMAP_RANGE
,
cache
->
key
.
offset
-
2
*
BITMAP_RANGE
},
};
int
ret
;
ret
=
__remove_from_free_space_tree
(
trans
,
fs_info
,
cache
,
path
,
cache
->
key
.
objectid
+
BITMAP_RANGE
,
BITMAP_RANGE
);
if
(
ret
)
{
test_msg
(
"Could not remove free space
\n
"
);
return
ret
;
}
return
check_free_space_extents
(
trans
,
fs_info
,
cache
,
path
,
extents
,
ARRAY_SIZE
(
extents
));
}
static
int
test_merge_left
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
cache
,
struct
btrfs_path
*
path
)
{
struct
free_space_extent
extents
[]
=
{
{
cache
->
key
.
objectid
,
2
*
BITMAP_RANGE
},
};
int
ret
;
ret
=
__remove_from_free_space_tree
(
trans
,
fs_info
,
cache
,
path
,
cache
->
key
.
objectid
,
cache
->
key
.
offset
);
if
(
ret
)
{
test_msg
(
"Could not remove free space
\n
"
);
return
ret
;
}
ret
=
__add_to_free_space_tree
(
trans
,
fs_info
,
cache
,
path
,
cache
->
key
.
objectid
,
BITMAP_RANGE
);
if
(
ret
)
{
test_msg
(
"Could not add free space
\n
"
);
return
ret
;
}
ret
=
__add_to_free_space_tree
(
trans
,
fs_info
,
cache
,
path
,
cache
->
key
.
objectid
+
BITMAP_RANGE
,
BITMAP_RANGE
);
if
(
ret
)
{
test_msg
(
"Could not add free space
\n
"
);
return
ret
;
}
return
check_free_space_extents
(
trans
,
fs_info
,
cache
,
path
,
extents
,
ARRAY_SIZE
(
extents
));
}
static
int
test_merge_right
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
cache
,
struct
btrfs_path
*
path
)
{
struct
free_space_extent
extents
[]
=
{
{
cache
->
key
.
objectid
+
BITMAP_RANGE
,
2
*
BITMAP_RANGE
},
};
int
ret
;
ret
=
__remove_from_free_space_tree
(
trans
,
fs_info
,
cache
,
path
,
cache
->
key
.
objectid
,
cache
->
key
.
offset
);
if
(
ret
)
{
test_msg
(
"Could not remove free space
\n
"
);
return
ret
;
}
ret
=
__add_to_free_space_tree
(
trans
,
fs_info
,
cache
,
path
,
cache
->
key
.
objectid
+
2
*
BITMAP_RANGE
,
BITMAP_RANGE
);
if
(
ret
)
{
test_msg
(
"Could not add free space
\n
"
);
return
ret
;
}
ret
=
__add_to_free_space_tree
(
trans
,
fs_info
,
cache
,
path
,
cache
->
key
.
objectid
+
BITMAP_RANGE
,
BITMAP_RANGE
);
if
(
ret
)
{
test_msg
(
"Could not add free space
\n
"
);
return
ret
;
}
return
check_free_space_extents
(
trans
,
fs_info
,
cache
,
path
,
extents
,
ARRAY_SIZE
(
extents
));
}
static
int
test_merge_both
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
cache
,
struct
btrfs_path
*
path
)
{
struct
free_space_extent
extents
[]
=
{
{
cache
->
key
.
objectid
,
3
*
BITMAP_RANGE
},
};
int
ret
;
ret
=
__remove_from_free_space_tree
(
trans
,
fs_info
,
cache
,
path
,
cache
->
key
.
objectid
,
cache
->
key
.
offset
);
if
(
ret
)
{
test_msg
(
"Could not remove free space
\n
"
);
return
ret
;
}
ret
=
__add_to_free_space_tree
(
trans
,
fs_info
,
cache
,
path
,
cache
->
key
.
objectid
,
BITMAP_RANGE
);
if
(
ret
)
{
test_msg
(
"Could not add free space
\n
"
);
return
ret
;
}
ret
=
__add_to_free_space_tree
(
trans
,
fs_info
,
cache
,
path
,
cache
->
key
.
objectid
+
2
*
BITMAP_RANGE
,
BITMAP_RANGE
);
if
(
ret
)
{
test_msg
(
"Could not add free space
\n
"
);
return
ret
;
}
ret
=
__add_to_free_space_tree
(
trans
,
fs_info
,
cache
,
path
,
cache
->
key
.
objectid
+
BITMAP_RANGE
,
BITMAP_RANGE
);
if
(
ret
)
{
test_msg
(
"Could not add free space
\n
"
);
return
ret
;
}
return
check_free_space_extents
(
trans
,
fs_info
,
cache
,
path
,
extents
,
ARRAY_SIZE
(
extents
));
}
static
int
test_merge_none
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_fs_info
*
fs_info
,
struct
btrfs_block_group_cache
*
cache
,
struct
btrfs_path
*
path
)
{
struct
free_space_extent
extents
[]
=
{
{
cache
->
key
.
objectid
,
BITMAP_RANGE
},
{
cache
->
key
.
objectid
+
2
*
BITMAP_RANGE
,
BITMAP_RANGE
},
{
cache
->
key
.
objectid
+
4
*
BITMAP_RANGE
,
BITMAP_RANGE
},
};
int
ret
;
ret
=
__remove_from_free_space_tree
(
trans
,
fs_info
,
cache
,
path
,
cache
->
key
.
objectid
,
cache
->
key
.
offset
);
if
(
ret
)
{
test_msg
(
"Could not remove free space
\n
"
);
return
ret
;
}
ret
=
__add_to_free_space_tree
(
trans
,
fs_info
,
cache
,
path
,
cache
->
key
.
objectid
,
BITMAP_RANGE
);
if
(
ret
)
{
test_msg
(
"Could not add free space
\n
"
);
return
ret
;
}
ret
=
__add_to_free_space_tree
(
trans
,
fs_info
,
cache
,
path
,
cache
->
key
.
objectid
+
4
*
BITMAP_RANGE
,
BITMAP_RANGE
);
if
(
ret
)
{
test_msg
(
"Could not add free space
\n
"
);
return
ret
;
}
ret
=
__add_to_free_space_tree
(
trans
,
fs_info
,
cache
,
path
,
cache
->
key
.
objectid
+
2
*
BITMAP_RANGE
,
BITMAP_RANGE
);
if
(
ret
)
{
test_msg
(
"Could not add free space
\n
"
);
return
ret
;
}
return
check_free_space_extents
(
trans
,
fs_info
,
cache
,
path
,
extents
,
ARRAY_SIZE
(
extents
));
}
typedef
int
(
*
test_func_t
)(
struct
btrfs_trans_handle
*
,
struct
btrfs_fs_info
*
,
struct
btrfs_block_group_cache
*
,
struct
btrfs_path
*
);
static
int
run_test
(
test_func_t
test_func
,
int
bitmaps
)
{
struct
btrfs_root
*
root
=
NULL
;
struct
btrfs_block_group_cache
*
cache
=
NULL
;
struct
btrfs_trans_handle
trans
;
struct
btrfs_path
*
path
=
NULL
;
int
ret
;
root
=
btrfs_alloc_dummy_root
();
if
(
IS_ERR
(
root
))
{
test_msg
(
"Couldn't allocate dummy root
\n
"
);
ret
=
PTR_ERR
(
root
);
goto
out
;
}
root
->
fs_info
=
btrfs_alloc_dummy_fs_info
();
if
(
!
root
->
fs_info
)
{
test_msg
(
"Couldn't allocate dummy fs info
\n
"
);
ret
=
-
ENOMEM
;
goto
out
;
}
btrfs_set_super_compat_ro_flags
(
root
->
fs_info
->
super_copy
,
BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE
);
root
->
fs_info
->
free_space_root
=
root
;
root
->
fs_info
->
tree_root
=
root
;
root
->
node
=
alloc_test_extent_buffer
(
root
->
fs_info
,
4096
);
if
(
!
root
->
node
)
{
test_msg
(
"Couldn't allocate dummy buffer
\n
"
);
ret
=
-
ENOMEM
;
goto
out
;
}
btrfs_set_header_level
(
root
->
node
,
0
);
btrfs_set_header_nritems
(
root
->
node
,
0
);
root
->
alloc_bytenr
+=
8192
;
cache
=
btrfs_alloc_dummy_block_group
(
8
*
BITMAP_RANGE
);
if
(
!
cache
)
{
test_msg
(
"Couldn't allocate dummy block group cache
\n
"
);
ret
=
-
ENOMEM
;
goto
out
;
}
cache
->
bitmap_low_thresh
=
0
;
cache
->
bitmap_high_thresh
=
(
u32
)
-
1
;
cache
->
needs_free_space
=
1
;
btrfs_init_dummy_trans
(
&
trans
);
path
=
btrfs_alloc_path
();
if
(
!
path
)
{
test_msg
(
"Couldn't allocate path
\n
"
);
return
-
ENOMEM
;
}
ret
=
add_block_group_free_space
(
&
trans
,
root
->
fs_info
,
cache
);
if
(
ret
)
{
test_msg
(
"Could not add block group free space
\n
"
);
goto
out
;
}
if
(
bitmaps
)
{
ret
=
convert_free_space_to_bitmaps
(
&
trans
,
root
->
fs_info
,
cache
,
path
);
if
(
ret
)
{
test_msg
(
"Could not convert block group to bitmaps
\n
"
);
goto
out
;
}
}
ret
=
test_func
(
&
trans
,
root
->
fs_info
,
cache
,
path
);
if
(
ret
)
goto
out
;
ret
=
remove_block_group_free_space
(
&
trans
,
root
->
fs_info
,
cache
);
if
(
ret
)
{
test_msg
(
"Could not remove block group free space
\n
"
);
goto
out
;
}
if
(
btrfs_header_nritems
(
root
->
node
)
!=
0
)
{
test_msg
(
"Free space tree has leftover items
\n
"
);
ret
=
-
EINVAL
;
goto
out
;
}
ret
=
0
;
out:
btrfs_free_path
(
path
);
btrfs_free_dummy_block_group
(
cache
);
btrfs_free_dummy_root
(
root
);
return
ret
;
}
static
int
run_test_both_formats
(
test_func_t
test_func
)
{
int
ret
;
ret
=
run_test
(
test_func
,
0
);
if
(
ret
)
return
ret
;
return
run_test
(
test_func
,
1
);
}
int
btrfs_test_free_space_tree
(
void
)
{
test_func_t
tests
[]
=
{
test_empty_block_group
,
test_remove_all
,
test_remove_beginning
,
test_remove_end
,
test_remove_middle
,
test_merge_left
,
test_merge_right
,
test_merge_both
,
test_merge_none
,
};
int
i
;
test_msg
(
"Running free space tree tests
\n
"
);
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
tests
);
i
++
)
{
int
ret
=
run_test_both_formats
(
tests
[
i
]);
if
(
ret
)
{
test_msg
(
"%pf failed
\n
"
,
tests
[
i
]);
return
ret
;
}
}
return
0
;
}
fs/btrfs/tests/qgroup-tests.c
View file @
f0f76413
...
...
@@ -23,14 +23,6 @@
#include "../qgroup.h"
#include "../backref.h"
static
void
init_dummy_trans
(
struct
btrfs_trans_handle
*
trans
)
{
memset
(
trans
,
0
,
sizeof
(
*
trans
));
trans
->
transid
=
1
;
INIT_LIST_HEAD
(
&
trans
->
qgroup_ref_list
);
trans
->
type
=
__TRANS_DUMMY
;
}
static
int
insert_normal_tree_ref
(
struct
btrfs_root
*
root
,
u64
bytenr
,
u64
num_bytes
,
u64
parent
,
u64
root_objectid
)
{
...
...
@@ -44,7 +36,7 @@ static int insert_normal_tree_ref(struct btrfs_root *root, u64 bytenr,
u32
size
=
sizeof
(
*
item
)
+
sizeof
(
*
iref
)
+
sizeof
(
*
block_info
);
int
ret
;
init_dummy_trans
(
&
trans
);
btrfs_
init_dummy_trans
(
&
trans
);
ins
.
objectid
=
bytenr
;
ins
.
type
=
BTRFS_EXTENT_ITEM_KEY
;
...
...
@@ -94,7 +86,7 @@ static int add_tree_ref(struct btrfs_root *root, u64 bytenr, u64 num_bytes,
u64
refs
;
int
ret
;
init_dummy_trans
(
&
trans
);
btrfs_
init_dummy_trans
(
&
trans
);
key
.
objectid
=
bytenr
;
key
.
type
=
BTRFS_EXTENT_ITEM_KEY
;
...
...
@@ -144,7 +136,7 @@ static int remove_extent_item(struct btrfs_root *root, u64 bytenr,
struct
btrfs_path
*
path
;
int
ret
;
init_dummy_trans
(
&
trans
);
btrfs_
init_dummy_trans
(
&
trans
);
key
.
objectid
=
bytenr
;
key
.
type
=
BTRFS_EXTENT_ITEM_KEY
;
...
...
@@ -178,7 +170,7 @@ static int remove_extent_ref(struct btrfs_root *root, u64 bytenr,
u64
refs
;
int
ret
;
init_dummy_trans
(
&
trans
);
btrfs_
init_dummy_trans
(
&
trans
);
key
.
objectid
=
bytenr
;
key
.
type
=
BTRFS_EXTENT_ITEM_KEY
;
...
...
@@ -232,7 +224,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root)
struct
ulist
*
new_roots
=
NULL
;
int
ret
;
init_dummy_trans
(
&
trans
);
btrfs_
init_dummy_trans
(
&
trans
);
test_msg
(
"Qgroup basic add
\n
"
);
ret
=
btrfs_create_qgroup
(
NULL
,
fs_info
,
5
);
...
...
@@ -326,7 +318,7 @@ static int test_multiple_refs(struct btrfs_root *root)
struct
ulist
*
new_roots
=
NULL
;
int
ret
;
init_dummy_trans
(
&
trans
);
btrfs_
init_dummy_trans
(
&
trans
);
test_msg
(
"Qgroup multiple refs test
\n
"
);
...
...
include/trace/events/btrfs.h
View file @
f0f76413
...
...
@@ -45,7 +45,8 @@ struct btrfs_qgroup_operation;
{ BTRFS_TREE_LOG_OBJECTID, "TREE_LOG" }, \
{ BTRFS_QUOTA_TREE_OBJECTID, "QUOTA_TREE" }, \
{ BTRFS_TREE_RELOC_OBJECTID, "TREE_RELOC" }, \
{ BTRFS_UUID_TREE_OBJECTID, "UUID_RELOC" }, \
{ BTRFS_UUID_TREE_OBJECTID, "UUID_TREE" }, \
{ BTRFS_FREE_SPACE_TREE_OBJECTID, "FREE_SPACE_TREE" }, \
{ BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" })
#define show_root_type(obj) \
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment