Commit 26ce34a9 authored by Chris Mason's avatar Chris Mason

Merge branch 'master' of...

Merge branch 'master' of ssh://mason@master.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
parents 3fa8749e 37d3cddd
......@@ -497,6 +497,23 @@ config OCFS2_DEBUG_FS
this option for debugging only as it is likely to decrease
performance of the filesystem.
config BTRFS_FS
tristate "Btrfs filesystem (EXPERIMENTAL) Unstable disk format"
depends on EXPERIMENTAL
select LIBCRC32C
help
Btrfs is a new filesystem with extents, writable snapshotting,
support for multiple devices and many more features.
Btrfs is highly experimental, and THE DISK FORMAT IS NOT YET
FINALIZED. You should say N here unless you are interested in
testing Btrfs with non-critical data.
To compile this file system support as a module, choose M here. The
module will be called btrfs.
If unsure, say N.
endif # BLOCK
config DNOTIFY
......
......@@ -121,4 +121,5 @@ obj-$(CONFIG_HOSTFS) += hostfs/
obj-$(CONFIG_HPPFS) += hppfs/
obj-$(CONFIG_DEBUG_FS) += debugfs/
obj-$(CONFIG_OCFS2_FS) += ocfs2/
obj-$(CONFIG_BTRFS_FS) += btrfs/
obj-$(CONFIG_GFS2_FS) += gfs2/
This diff is collapsed.
Install Instructions
Btrfs puts snapshots and subvolumes into the root directory of the FS. This
directory can only be changed by btrfsctl right now, and normal filesystem
operations do not work on it. The default subvolume is called 'default',
and you can create files and directories in mount_point/default
Btrfs uses libcrc32c in the kernel for file and metadata checksums. You need
to compile the kernel with:
CONFIG_LIBCRC32C=m
libcrc32c can be static as well. Once your kernel is setup, typing make in the
btrfs module sources will build against the running kernel. When the build is
complete:
modprobe libcrc32c
insmod btrfs.ko
The Btrfs utility programs require libuuid to build. This can be found
in the e2fsprogs sources, and is usually available as libuuid or
e2fsprogs-devel from various distros.
Building the utilities is just make ; make install. The programs go
into /usr/local/bin. The commands available are:
mkfs.btrfs: create a filesystem
btrfsctl: control program to create snapshots and subvolumes:
mount /dev/sda2 /mnt
btrfsctl -s new_subvol_name /mnt
btrfsctl -s snapshot_of_default /mnt/default
btrfsctl -s snapshot_of_new_subvol /mnt/new_subvol_name
btrfsctl -s snapshot_of_a_snapshot /mnt/snapshot_of_new_subvol
ls /mnt
default snapshot_of_a_snapshot snapshot_of_new_subvol
new_subvol_name snapshot_of_default
Snapshots and subvolumes cannot be deleted right now, but you can
rm -rf all the files and directories inside them.
btrfsck: do a limited check of the FS extent trees.</li>
debug-tree: print all of the FS metadata in text form. Example:
debug-tree /dev/sda2 >& big_output_file
ifneq ($(KERNELRELEASE),)
# kbuild part of makefile
obj-$(CONFIG_BTRFS_FS) := btrfs.o
btrfs-y := super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
file-item.o inode-item.o inode-map.o disk-io.o \
transaction.o inode.o file.o tree-defrag.o \
extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
ref-cache.o export.o tree-log.o acl.o free-space-cache.o
else
# Normal Makefile
KERNELDIR := /lib/modules/`uname -r`/build
all:
$(MAKE) -C $(KERNELDIR) M=`pwd` CONFIG_BTRFS_FS=m modules
modules_install:
$(MAKE) -C $(KERNELDIR) M=`pwd` modules_install
clean:
$(MAKE) -C $(KERNELDIR) M=`pwd` clean
endif
/*
* Copyright (C) 2007 Red Hat. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/xattr.h>
#include <linux/posix_acl_xattr.h>
#include <linux/posix_acl.h>
#include <linux/sched.h>
#include "ctree.h"
#include "btrfs_inode.h"
#include "xattr.h"
#ifdef CONFIG_FS_POSIX_ACL
static void btrfs_update_cached_acl(struct inode *inode,
struct posix_acl **p_acl,
struct posix_acl *acl)
{
spin_lock(&inode->i_lock);
if (*p_acl && *p_acl != BTRFS_ACL_NOT_CACHED)
posix_acl_release(*p_acl);
*p_acl = posix_acl_dup(acl);
spin_unlock(&inode->i_lock);
}
static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
{
int size;
const char *name;
char *value = NULL;
struct posix_acl *acl = NULL, **p_acl;
switch (type) {
case ACL_TYPE_ACCESS:
name = POSIX_ACL_XATTR_ACCESS;
p_acl = &BTRFS_I(inode)->i_acl;
break;
case ACL_TYPE_DEFAULT:
name = POSIX_ACL_XATTR_DEFAULT;
p_acl = &BTRFS_I(inode)->i_default_acl;
break;
default:
return ERR_PTR(-EINVAL);
}
spin_lock(&inode->i_lock);
if (*p_acl != BTRFS_ACL_NOT_CACHED)
acl = posix_acl_dup(*p_acl);
spin_unlock(&inode->i_lock);
if (acl)
return acl;
size = __btrfs_getxattr(inode, name, "", 0);
if (size > 0) {
value = kzalloc(size, GFP_NOFS);
if (!value)
return ERR_PTR(-ENOMEM);
size = __btrfs_getxattr(inode, name, value, size);
if (size > 0) {
acl = posix_acl_from_xattr(value, size);
btrfs_update_cached_acl(inode, p_acl, acl);
}
kfree(value);
} else if (size == -ENOENT) {
acl = NULL;
btrfs_update_cached_acl(inode, p_acl, acl);
}
return acl;
}
static int btrfs_xattr_get_acl(struct inode *inode, int type,
void *value, size_t size)
{
struct posix_acl *acl;
int ret = 0;
acl = btrfs_get_acl(inode, type);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl == NULL)
return -ENODATA;
ret = posix_acl_to_xattr(acl, value, size);
posix_acl_release(acl);
return ret;
}
/*
* Needs to be called with fs_mutex held
*/
static int btrfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
int ret, size = 0;
const char *name;
struct posix_acl **p_acl;
char *value = NULL;
mode_t mode;
if (acl) {
ret = posix_acl_valid(acl);
if (ret < 0)
return ret;
ret = 0;
}
switch (type) {
case ACL_TYPE_ACCESS:
mode = inode->i_mode;
ret = posix_acl_equiv_mode(acl, &mode);
if (ret < 0)
return ret;
ret = 0;
inode->i_mode = mode;
name = POSIX_ACL_XATTR_ACCESS;
p_acl = &BTRFS_I(inode)->i_acl;
break;
case ACL_TYPE_DEFAULT:
if (!S_ISDIR(inode->i_mode))
return acl ? -EINVAL : 0;
name = POSIX_ACL_XATTR_DEFAULT;
p_acl = &BTRFS_I(inode)->i_default_acl;
break;
default:
return -EINVAL;
}
if (acl) {
size = posix_acl_xattr_size(acl->a_count);
value = kmalloc(size, GFP_NOFS);
if (!value) {
ret = -ENOMEM;
goto out;
}
ret = posix_acl_to_xattr(acl, value, size);
if (ret < 0)
goto out;
}
ret = __btrfs_setxattr(inode, name, value, size, 0);
out:
if (value)
kfree(value);
if (!ret)
btrfs_update_cached_acl(inode, p_acl, acl);
return ret;
}
static int btrfs_xattr_set_acl(struct inode *inode, int type,
const void *value, size_t size)
{
int ret = 0;
struct posix_acl *acl = NULL;
if (value) {
acl = posix_acl_from_xattr(value, size);
if (acl == NULL) {
value = NULL;
size = 0;
} else if (IS_ERR(acl)) {
return PTR_ERR(acl);
}
}
ret = btrfs_set_acl(inode, acl, type);
posix_acl_release(acl);
return ret;
}
static int btrfs_xattr_acl_access_get(struct inode *inode, const char *name,
void *value, size_t size)
{
return btrfs_xattr_get_acl(inode, ACL_TYPE_ACCESS, value, size);
}
static int btrfs_xattr_acl_access_set(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
return btrfs_xattr_set_acl(inode, ACL_TYPE_ACCESS, value, size);
}
static int btrfs_xattr_acl_default_get(struct inode *inode, const char *name,
void *value, size_t size)
{
return btrfs_xattr_get_acl(inode, ACL_TYPE_DEFAULT, value, size);
}
static int btrfs_xattr_acl_default_set(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
return btrfs_xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size);
}
int btrfs_check_acl(struct inode *inode, int mask)
{
struct posix_acl *acl;
int error = -EAGAIN;
acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS);
if (IS_ERR(acl))
return PTR_ERR(acl);
if (acl) {
error = posix_acl_permission(inode, acl, mask);
posix_acl_release(acl);
}
return error;
}
/*
* btrfs_init_acl is already generally called under fs_mutex, so the locking
* stuff has been fixed to work with that. If the locking stuff changes, we
* need to re-evaluate the acl locking stuff.
*/
int btrfs_init_acl(struct inode *inode, struct inode *dir)
{
struct posix_acl *acl = NULL;
int ret = 0;
/* this happens with subvols */
if (!dir)
return 0;
if (!S_ISLNK(inode->i_mode)) {
if (IS_POSIXACL(dir)) {
acl = btrfs_get_acl(dir, ACL_TYPE_DEFAULT);
if (IS_ERR(acl))
return PTR_ERR(acl);
}
if (!acl)
inode->i_mode &= ~current->fs->umask;
}
if (IS_POSIXACL(dir) && acl) {
struct posix_acl *clone;
mode_t mode;
if (S_ISDIR(inode->i_mode)) {
ret = btrfs_set_acl(inode, acl, ACL_TYPE_DEFAULT);
if (ret)
goto failed;
}
clone = posix_acl_clone(acl, GFP_NOFS);
ret = -ENOMEM;
if (!clone)
goto failed;
mode = inode->i_mode;
ret = posix_acl_create_masq(clone, &mode);
if (ret >= 0) {
inode->i_mode = mode;
if (ret > 0) {
/* we need an acl */
ret = btrfs_set_acl(inode, clone,
ACL_TYPE_ACCESS);
}
}
}
failed:
posix_acl_release(acl);
return ret;
}
int btrfs_acl_chmod(struct inode *inode)
{
struct posix_acl *acl, *clone;
int ret = 0;
if (S_ISLNK(inode->i_mode))
return -EOPNOTSUPP;
if (!IS_POSIXACL(inode))
return 0;
acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS);
if (IS_ERR(acl) || !acl)
return PTR_ERR(acl);
clone = posix_acl_clone(acl, GFP_KERNEL);
posix_acl_release(acl);
if (!clone)
return -ENOMEM;
ret = posix_acl_chmod_masq(clone, inode->i_mode);
if (!ret)
ret = btrfs_set_acl(inode, clone, ACL_TYPE_ACCESS);
posix_acl_release(clone);
return ret;
}
struct xattr_handler btrfs_xattr_acl_default_handler = {
.prefix = POSIX_ACL_XATTR_DEFAULT,
.get = btrfs_xattr_acl_default_get,
.set = btrfs_xattr_acl_default_set,
};
struct xattr_handler btrfs_xattr_acl_access_handler = {
.prefix = POSIX_ACL_XATTR_ACCESS,
.get = btrfs_xattr_acl_access_get,
.set = btrfs_xattr_acl_access_set,
};
#else /* CONFIG_FS_POSIX_ACL */
int btrfs_acl_chmod(struct inode *inode)
{
return 0;
}
int btrfs_init_acl(struct inode *inode, struct inode *dir)
{
return 0;
}
int btrfs_check_acl(struct inode *inode, int mask)
{
return 0;
}
#endif /* CONFIG_FS_POSIX_ACL */
/*
* Copyright (C) 2007 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include <linux/version.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/spinlock.h>
# include <linux/freezer.h>
#include "async-thread.h"
/*
* container for the kthread task pointer and the list of pending work
* One of these is allocated per thread.
*/
struct btrfs_worker_thread {
/* pool we belong to */
struct btrfs_workers *workers;
/* list of struct btrfs_work that are waiting for service */
struct list_head pending;
/* list of worker threads from struct btrfs_workers */
struct list_head worker_list;
/* kthread */
struct task_struct *task;
/* number of things on the pending list */
atomic_t num_pending;
unsigned long sequence;
/* protects the pending list. */
spinlock_t lock;
/* set to non-zero when this thread is already awake and kicking */
int working;
/* are we currently idle */
int idle;
};
/*
* helper function to move a thread onto the idle list after it
* has finished some requests.
*/
static void check_idle_worker(struct btrfs_worker_thread *worker)
{
if (!worker->idle && atomic_read(&worker->num_pending) <
worker->workers->idle_thresh / 2) {
unsigned long flags;
spin_lock_irqsave(&worker->workers->lock, flags);
worker->idle = 1;
list_move(&worker->worker_list, &worker->workers->idle_list);
spin_unlock_irqrestore(&worker->workers->lock, flags);
}
}
/*
* helper function to move a thread off the idle list after new
* pending work is added.
*/
static void check_busy_worker(struct btrfs_worker_thread *worker)
{
if (worker->idle && atomic_read(&worker->num_pending) >=
worker->workers->idle_thresh) {
unsigned long flags;
spin_lock_irqsave(&worker->workers->lock, flags);
worker->idle = 0;
list_move_tail(&worker->worker_list,
&worker->workers->worker_list);
spin_unlock_irqrestore(&worker->workers->lock, flags);
}
}
/*
* main loop for servicing work items
*/
static int worker_loop(void *arg)
{
struct btrfs_worker_thread *worker = arg;
struct list_head *cur;
struct btrfs_work *work;
do {
spin_lock_irq(&worker->lock);
while(!list_empty(&worker->pending)) {
cur = worker->pending.next;
work = list_entry(cur, struct btrfs_work, list);
list_del(&work->list);
clear_bit(0, &work->flags);
work->worker = worker;
spin_unlock_irq(&worker->lock);
work->func(work);
atomic_dec(&worker->num_pending);
spin_lock_irq(&worker->lock);
check_idle_worker(worker);
}
worker->working = 0;
if (freezing(current)) {
refrigerator();
} else {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(&worker->lock);
schedule();
__set_current_state(TASK_RUNNING);
}
} while (!kthread_should_stop());
return 0;
}
/*
* this will wait for all the worker threads to shutdown
*/
int btrfs_stop_workers(struct btrfs_workers *workers)
{
struct list_head *cur;
struct btrfs_worker_thread *worker;
list_splice_init(&workers->idle_list, &workers->worker_list);
while(!list_empty(&workers->worker_list)) {
cur = workers->worker_list.next;
worker = list_entry(cur, struct btrfs_worker_thread,
worker_list);
kthread_stop(worker->task);
list_del(&worker->worker_list);
kfree(worker);
}
return 0;
}
/*
* simple init on struct btrfs_workers
*/
void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
{
workers->num_workers = 0;
INIT_LIST_HEAD(&workers->worker_list);
INIT_LIST_HEAD(&workers->idle_list);
spin_lock_init(&workers->lock);
workers->max_workers = max;
workers->idle_thresh = 32;
workers->name = name;
}
/*
* starts new worker threads. This does not enforce the max worker
* count in case you need to temporarily go past it.
*/
int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
{
struct btrfs_worker_thread *worker;
int ret = 0;
int i;
for (i = 0; i < num_workers; i++) {
worker = kzalloc(sizeof(*worker), GFP_NOFS);
if (!worker) {
ret = -ENOMEM;
goto fail;
}
INIT_LIST_HEAD(&worker->pending);
INIT_LIST_HEAD(&worker->worker_list);
spin_lock_init(&worker->lock);
atomic_set(&worker->num_pending, 0);
worker->task = kthread_run(worker_loop, worker,
"btrfs-%s-%d", workers->name,
workers->num_workers + i);
worker->workers = workers;
if (IS_ERR(worker->task)) {
kfree(worker);
ret = PTR_ERR(worker->task);
goto fail;
}
spin_lock_irq(&workers->lock);
list_add_tail(&worker->worker_list, &workers->idle_list);
worker->idle = 1;
workers->num_workers++;
spin_unlock_irq(&workers->lock);
}
return 0;
fail:
btrfs_stop_workers(workers);
return ret;
}
/*
* run through the list and find a worker thread that doesn't have a lot
* to do right now. This can return null if we aren't yet at the thread
* count limit and all of the threads are busy.
*/
static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
{
struct btrfs_worker_thread *worker;
struct list_head *next;
int enforce_min = workers->num_workers < workers->max_workers;
/*
* if we find an idle thread, don't move it to the end of the
* idle list. This improves the chance that the next submission
* will reuse the same thread, and maybe catch it while it is still
* working
*/
if (!list_empty(&workers->idle_list)) {
next = workers->idle_list.next;
worker = list_entry(next, struct btrfs_worker_thread,
worker_list);
return worker;
}
if (enforce_min || list_empty(&workers->worker_list))
return NULL;
/*
* if we pick a busy task, move the task to the end of the list.
* hopefully this will keep things somewhat evenly balanced.
* Do the move in batches based on the sequence number. This groups
* requests submitted at roughly the same time onto the same worker.
*/
next = workers->worker_list.next;
worker = list_entry(next, struct btrfs_worker_thread, worker_list);
atomic_inc(&worker->num_pending);
worker->sequence++;
if (worker->sequence % workers->idle_thresh == 0)
list_move_tail(next, &workers->worker_list);
return worker;
}
/*
* selects a worker thread to take the next job. This will either find
* an idle worker, start a new worker up to the max count, or just return
* one of the existing busy workers.
*/
static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
{
struct btrfs_worker_thread *worker;
unsigned long flags;
again:
spin_lock_irqsave(&workers->lock, flags);
worker = next_worker(workers);
spin_unlock_irqrestore(&workers->lock, flags);
if (!worker) {
spin_lock_irqsave(&workers->lock, flags);
if (workers->num_workers >= workers->max_workers) {
struct list_head *fallback = NULL;
/*
* we have failed to find any workers, just
* return the force one
*/
if (!list_empty(&workers->worker_list))
fallback = workers->worker_list.next;
if (!list_empty(&workers->idle_list))
fallback = workers->idle_list.next;
BUG_ON(!fallback);
worker = list_entry(fallback,
struct btrfs_worker_thread, worker_list);
spin_unlock_irqrestore(&workers->lock, flags);
} else {
spin_unlock_irqrestore(&workers->lock, flags);
/* we're below the limit, start another worker */
btrfs_start_workers(workers, 1);
goto again;
}
}
return worker;
}
/*
* btrfs_requeue_work just puts the work item back on the tail of the list
* it was taken from. It is intended for use with long running work functions
* that make some progress and want to give the cpu up for others.
*/
int btrfs_requeue_work(struct btrfs_work *work)
{
struct btrfs_worker_thread *worker = work->worker;
unsigned long flags;
if (test_and_set_bit(0, &work->flags))
goto out;
spin_lock_irqsave(&worker->lock, flags);
atomic_inc(&worker->num_pending);
list_add_tail(&work->list, &worker->pending);
/* by definition we're busy, take ourselves off the idle
* list
*/
if (worker->idle) {
spin_lock_irqsave(&worker->workers->lock, flags);
worker->idle = 0;
list_move_tail(&worker->worker_list,
&worker->workers->worker_list);
spin_unlock_irqrestore(&worker->workers->lock, flags);
}
spin_unlock_irqrestore(&worker->lock, flags);
out:
return 0;
}
/*
* places a struct btrfs_work into the pending queue of one of the kthreads
*/
int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
{
struct btrfs_worker_thread *worker;
unsigned long flags;
int wake = 0;
/* don't requeue something already on a list */
if (test_and_set_bit(0, &work->flags))
goto out;
worker = find_worker(workers);
spin_lock_irqsave(&worker->lock, flags);
atomic_inc(&worker->num_pending);
check_busy_worker(worker);
list_add_tail(&work->list, &worker->pending);
/*
* avoid calling into wake_up_process if this thread has already
* been kicked
*/
if (!worker->working)
wake = 1;
worker->working = 1;
spin_unlock_irqrestore(&worker->lock, flags);
if (wake)
wake_up_process(worker->task);
out:
return 0;
}
/*
* Copyright (C) 2007 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#ifndef __BTRFS_ASYNC_THREAD_
#define __BTRFS_ASYNC_THREAD_
struct btrfs_worker_thread;
/*
* This is similar to a workqueue, but it is meant to spread the operations
* across all available cpus instead of just the CPU that was used to
* queue the work. There is also some batching introduced to try and
* cut down on context switches.
*
* By default threads are added on demand up to 2 * the number of cpus.
* Changing struct btrfs_workers->max_workers is one way to prevent
* demand creation of kthreads.
*
* the basic model of these worker threads is to embed a btrfs_work
* structure in your own data struct, and use container_of in a
* work function to get back to your data struct.
*/
struct btrfs_work {
/*
* only func should be set to the function you want called
* your work struct is passed as the only arg
*/
void (*func)(struct btrfs_work *work);
/*
* flags should be set to zero. It is used to make sure the
* struct is only inserted once into the list.
*/
unsigned long flags;
/* don't touch these */
struct btrfs_worker_thread *worker;
struct list_head list;
};
struct btrfs_workers {
/* current number of running workers */
int num_workers;
/* max number of workers allowed. changed by btrfs_start_workers */
int max_workers;
/* once a worker has this many requests or fewer, it is idle */
int idle_thresh;
/* list with all the work threads. The workers on the idle thread
* may be actively servicing jobs, but they haven't yet hit the
* idle thresh limit above.
*/
struct list_head worker_list;
struct list_head idle_list;
/* lock for finding the next worker thread to queue on */
spinlock_t lock;
/* extra name for this worker, used for current->name */
char *name;
};
int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
int btrfs_stop_workers(struct btrfs_workers *workers);
void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max);
int btrfs_requeue_work(struct btrfs_work *work);
#endif
/*
* Copyright (C) 2007 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#ifndef __BTRFS_I__
#define __BTRFS_I__
#include "extent_map.h"
#include "extent_io.h"
#include "ordered-data.h"
/* in memory btrfs inode */
struct btrfs_inode {
/* which subvolume this inode belongs to */
struct btrfs_root *root;
/* the block group preferred for allocations. This pointer is buggy
* and needs to be replaced with a bytenr instead
*/
struct btrfs_block_group_cache *block_group;
/* key used to find this inode on disk. This is used by the code
* to read in roots of subvolumes
*/
struct btrfs_key location;
/* the extent_tree has caches of all the extent mappings to disk */
struct extent_map_tree extent_tree;
/* the io_tree does range state (DIRTY, LOCKED etc) */
struct extent_io_tree io_tree;
/* special utility tree used to record which mirrors have already been
* tried when checksums fail for a given block
*/
struct extent_io_tree io_failure_tree;
/* held while inserting checksums to avoid races */
struct mutex csum_mutex;
/* held while inesrting or deleting extents from files */
struct mutex extent_mutex;
/* held while logging the inode in tree-log.c */
struct mutex log_mutex;
/* used to order data wrt metadata */
struct btrfs_ordered_inode_tree ordered_tree;
/* standard acl pointers */
struct posix_acl *i_acl;
struct posix_acl *i_default_acl;
/* for keeping track of orphaned inodes */
struct list_head i_orphan;
/* list of all the delalloc inodes in the FS. There are times we need
* to write all the delalloc pages to disk, and this list is used
* to walk them all.
*/
struct list_head delalloc_inodes;
/* full 64 bit generation number, struct vfs_inode doesn't have a big
* enough field for this.
*/
u64 generation;
/*
* transid of the trans_handle that last modified this inode
*/
u64 last_trans;
/*
* transid that last logged this inode
*/
u64 logged_trans;
/*
* trans that last made a change that should be fully fsync'd. This
* gets reset to zero each time the inode is logged
*/
u64 log_dirty_trans;
/* total number of bytes pending delalloc, used by stat to calc the
* real block usage of the file
*/
u64 delalloc_bytes;
/*
* the size of the file stored in the metadata on disk. data=ordered
* means the in-memory i_size might be larger than the size on disk
* because not all the blocks are written yet.
*/
u64 disk_i_size;
/* flags field from the on disk inode */
u32 flags;
/*
* if this is a directory then index_cnt is the counter for the index
* number for new files that are created
*/
u64 index_cnt;
struct inode vfs_inode;
};
static inline struct btrfs_inode *BTRFS_I(struct inode *inode)
{
return container_of(inode, struct btrfs_inode, vfs_inode);
}
static inline void btrfs_i_size_write(struct inode *inode, u64 size)
{
inode->i_size = size;
BTRFS_I(inode)->disk_i_size = size;
}
#endif
#ifndef _COMPAT_H_
#define _COMPAT_H_
#define btrfs_drop_nlink(inode) drop_nlink(inode)
#define btrfs_inc_nlink(inode) inc_nlink(inode)
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)
static inline struct dentry *d_obtain_alias(struct inode *inode)
{
struct dentry *d;
if (!inode)
return NULL;
if (IS_ERR(inode))
return ERR_CAST(inode);
d = d_alloc_anon(inode);
if (!d)
iput(inode);
return d;
}
#endif
#endif /* _COMPAT_H_ */
/*
* Copyright (C) 2008 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#ifndef __BTRFS_CRC32C__
#define __BTRFS_CRC32C__
#include <asm/byteorder.h>
#include <linux/crc32c.h>
#include <linux/version.h>
/* #define CONFIG_BTRFS_HW_SUM 1 */
#ifdef CONFIG_BTRFS_HW_SUM
#ifdef CONFIG_X86
/*
* Using hardware provided CRC32 instruction to accelerate the CRC32 disposal.
* CRC32C polynomial:0x1EDC6F41(BE)/0x82F63B78(LE)
* CRC32 is a new instruction in Intel SSE4.2, the reference can be found at:
* http://www.intel.com/products/processor/manuals/
* Intel(R) 64 and IA-32 Architectures Software Developer's Manual
* Volume 2A: Instruction Set Reference, A-M
*/
#include <asm/cpufeature.h>
#include <asm/processor.h>
#define X86_FEATURE_XMM4_2 (4*32+20) /* Streaming SIMD Extensions-4.2 */
#define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2)
#ifdef CONFIG_X86_64
#define REX_PRE "0x48, "
#define SCALE_F 8
#else
#define REX_PRE
#define SCALE_F 4
#endif
static inline u32 btrfs_crc32c_le_hw_byte(u32 crc, unsigned char const *data,
size_t length)
{
while (length--) {
__asm__ __volatile__(
".byte 0xf2, 0xf, 0x38, 0xf0, 0xf1"
:"=S"(crc)
:"0"(crc), "c"(*data)
);
data++;
}
return crc;
}
static inline u32 __pure btrfs_crc32c_le_hw(u32 crc, unsigned char const *p,
size_t len)
{
unsigned int iquotient = len / SCALE_F;
unsigned int iremainder = len % SCALE_F;
#ifdef CONFIG_X86_64
u64 *ptmp = (u64 *)p;
#else
u32 *ptmp = (u32 *)p;
#endif
while (iquotient--) {
__asm__ __volatile__(
".byte 0xf2, " REX_PRE "0xf, 0x38, 0xf1, 0xf1;"
:"=S"(crc)
:"0"(crc), "c"(*ptmp)
);
ptmp++;
}
if (iremainder)
crc = btrfs_crc32c_le_hw_byte(crc, (unsigned char *)ptmp,
iremainder);
return crc;
}
#endif /* CONFIG_BTRFS_HW_SUM */
static inline u32 __btrfs_crc32c(u32 crc, unsigned char const *address,
size_t len)
{
#ifdef CONFIG_BTRFS_HW_SUM
if (cpu_has_xmm4_2)
return btrfs_crc32c_le_hw(crc, address, len);
#endif
return crc32c_le(crc, address, len);
}
#else
#define __btrfs_crc32c(seed, data, length) crc32c(seed, data, length)
#endif /* CONFIG_X86 */
/**
* implementation of crc32c_le() changed in linux-2.6.23,
* has of v0.13 btrfs-progs is using the latest version.
* We must workaround older implementations of crc32c_le()
* found on older kernel versions.
*/
#define btrfs_crc32c(seed, data, length) \
__btrfs_crc32c(seed, (unsigned char const *)data, length)
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (C) 2007 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#ifndef __DISKIO__
#define __DISKIO__
#define BTRFS_SUPER_INFO_OFFSET (16 * 1024)
#define BTRFS_SUPER_INFO_SIZE 4096
struct btrfs_device;
struct btrfs_fs_devices;
struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
u32 blocksize, u64 parent_transid);
int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
u64 parent_transid);
struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
u64 bytenr, u32 blocksize);
int clean_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *buf);
struct btrfs_root *open_ctree(struct super_block *sb,
struct btrfs_fs_devices *fs_devices,
char *options);
int close_ctree(struct btrfs_root *root);
int write_ctree_super(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
u64 bytenr, u32 blocksize);
struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
u64 root_objectid);
struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
struct btrfs_key *location,
const char *name, int namelen);
struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
struct btrfs_key *location);
struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
struct btrfs_key *location);
int btrfs_insert_dev_radix(struct btrfs_root *root,
struct block_device *bdev,
u64 device_id,
u64 block_start,
u64 num_blocks);
void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid);
int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
int wait_on_tree_block_writeback(struct btrfs_root *root,
struct extent_buffer *buf);
int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len);
void btrfs_csum_final(u32 crc, char *result);
int btrfs_open_device(struct btrfs_device *dev);
int btrfs_verify_block_csum(struct btrfs_root *root,
struct extent_buffer *buf);
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
int metadata);
int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
int rw, struct bio *bio, int mirror_num,
extent_submit_bio_hook_t *submit_bio_hook);
int btrfs_congested_async(struct btrfs_fs_info *info, int iodone);
unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
int btrfs_write_tree_block(struct extent_buffer *buf);
int btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
int btree_lock_page_hook(struct page *page);
#endif
#include <linux/fs.h>
#include <linux/types.h>
#include "ctree.h"
#include "disk-io.h"
#include "btrfs_inode.h"
#include "print-tree.h"
#include "export.h"
#include "compat.h"
#define BTRFS_FID_SIZE_NON_CONNECTABLE (offsetof(struct btrfs_fid, parent_objectid)/4)
#define BTRFS_FID_SIZE_CONNECTABLE (offsetof(struct btrfs_fid, parent_root_objectid)/4)
#define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid)/4)
static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
int connectable)
{
struct btrfs_fid *fid = (struct btrfs_fid *)fh;
struct inode *inode = dentry->d_inode;
int len = *max_len;
int type;
if ((len < BTRFS_FID_SIZE_NON_CONNECTABLE) ||
(connectable && len < BTRFS_FID_SIZE_CONNECTABLE))
return 255;
len = BTRFS_FID_SIZE_NON_CONNECTABLE;
type = FILEID_BTRFS_WITHOUT_PARENT;
fid->objectid = BTRFS_I(inode)->location.objectid;
fid->root_objectid = BTRFS_I(inode)->root->objectid;
fid->gen = inode->i_generation;
if (connectable && !S_ISDIR(inode->i_mode)) {
struct inode *parent;
u64 parent_root_id;
spin_lock(&dentry->d_lock);
parent = dentry->d_parent->d_inode;
fid->parent_objectid = BTRFS_I(parent)->location.objectid;
fid->parent_gen = parent->i_generation;
parent_root_id = BTRFS_I(parent)->root->objectid;
spin_unlock(&dentry->d_lock);
if (parent_root_id != fid->root_objectid) {
fid->parent_root_objectid = parent_root_id;
len = BTRFS_FID_SIZE_CONNECTABLE_ROOT;
type = FILEID_BTRFS_WITH_PARENT_ROOT;
} else {
len = BTRFS_FID_SIZE_CONNECTABLE;
type = FILEID_BTRFS_WITH_PARENT;
}
}
*max_len = len;
return type;
}
static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
u64 root_objectid, u32 generation)
{
struct btrfs_root *root;
struct inode *inode;
struct btrfs_key key;
key.objectid = root_objectid;
btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
key.offset = (u64)-1;
root = btrfs_read_fs_root_no_name(btrfs_sb(sb)->fs_info, &key);
if (IS_ERR(root))
return ERR_CAST(root);
key.objectid = objectid;
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
key.offset = 0;
inode = btrfs_iget(sb, &key, root, NULL);
if (IS_ERR(inode))
return (void *)inode;
if (generation != inode->i_generation) {
iput(inode);
return ERR_PTR(-ESTALE);
}
return d_obtain_alias(inode);
}
static struct dentry *btrfs_fh_to_parent(struct super_block *sb, struct fid *fh,
int fh_len, int fh_type)
{
struct btrfs_fid *fid = (struct btrfs_fid *) fh;
u64 objectid, root_objectid;
u32 generation;
if (fh_type == FILEID_BTRFS_WITH_PARENT) {
if (fh_len != BTRFS_FID_SIZE_CONNECTABLE)
return NULL;
root_objectid = fid->root_objectid;
} else if (fh_type == FILEID_BTRFS_WITH_PARENT_ROOT) {
if (fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT)
return NULL;
root_objectid = fid->parent_root_objectid;
} else
return NULL;
objectid = fid->parent_objectid;
generation = fid->parent_gen;
return btrfs_get_dentry(sb, objectid, root_objectid, generation);
}
static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
int fh_len, int fh_type)
{
struct btrfs_fid *fid = (struct btrfs_fid *) fh;
u64 objectid, root_objectid;
u32 generation;
if ((fh_type != FILEID_BTRFS_WITH_PARENT ||
fh_len != BTRFS_FID_SIZE_CONNECTABLE) &&
(fh_type != FILEID_BTRFS_WITH_PARENT_ROOT ||
fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT) &&
(fh_type != FILEID_BTRFS_WITHOUT_PARENT ||
fh_len != BTRFS_FID_SIZE_NON_CONNECTABLE))
return NULL;
objectid = fid->objectid;
root_objectid = fid->root_objectid;
generation = fid->gen;
return btrfs_get_dentry(sb, objectid, root_objectid, generation);
}
static struct dentry *btrfs_get_parent(struct dentry *child)
{
struct inode *dir = child->d_inode;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_key key;
struct btrfs_path *path;
struct extent_buffer *leaf;
int slot;
u64 objectid;
int ret;
path = btrfs_alloc_path();
key.objectid = dir->i_ino;
btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
key.offset = (u64)-1;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0) {
/* Error */
btrfs_free_path(path);
return ERR_PTR(ret);
}
leaf = path->nodes[0];
slot = path->slots[0];
if (ret) {
/* btrfs_search_slot() returns the slot where we'd want to
insert a backref for parent inode #0xFFFFFFFFFFFFFFFF.
The _real_ backref, telling us what the parent inode
_actually_ is, will be in the slot _before_ the one
that btrfs_search_slot() returns. */
if (!slot) {
/* Unless there is _no_ key in the tree before... */
btrfs_free_path(path);
return ERR_PTR(-EIO);
}
slot--;
}
btrfs_item_key_to_cpu(leaf, &key, slot);
btrfs_free_path(path);
if (key.objectid != dir->i_ino || key.type != BTRFS_INODE_REF_KEY)
return ERR_PTR(-EINVAL);
objectid = key.offset;
/* If we are already at the root of a subvol, return the real root */
if (objectid == dir->i_ino)
return dget(dir->i_sb->s_root);
/* Build a new key for the inode item */
key.objectid = objectid;
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
key.offset = 0;
return d_obtain_alias(btrfs_iget(root->fs_info->sb, &key, root, NULL));
}
const struct export_operations btrfs_export_ops = {
.encode_fh = btrfs_encode_fh,
.fh_to_dentry = btrfs_fh_to_dentry,
.fh_to_parent = btrfs_fh_to_parent,
.get_parent = btrfs_get_parent,
};
#ifndef BTRFS_EXPORT_H
#define BTRFS_EXPORT_H
#include <linux/exportfs.h>
extern const struct export_operations btrfs_export_ops;
struct btrfs_fid {
u64 objectid;
u64 root_objectid;
u32 gen;
u64 parent_objectid;
u32 parent_gen;
u64 parent_root_objectid;
} __attribute__ ((packed));
#endif
This diff is collapsed.
This diff is collapsed.
#ifndef __EXTENTIO__
#define __EXTENTIO__
#include <linux/rbtree.h>
/* bits for the extent state */
#define EXTENT_DIRTY 1
#define EXTENT_WRITEBACK (1 << 1)
#define EXTENT_UPTODATE (1 << 2)
#define EXTENT_LOCKED (1 << 3)
#define EXTENT_NEW (1 << 4)
#define EXTENT_DELALLOC (1 << 5)
#define EXTENT_DEFRAG (1 << 6)
#define EXTENT_DEFRAG_DONE (1 << 7)
#define EXTENT_BUFFER_FILLED (1 << 8)
#define EXTENT_ORDERED (1 << 9)
#define EXTENT_ORDERED_METADATA (1 << 10)
#define EXTENT_BOUNDARY (1 << 11)
#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
/*
* page->private values. Every page that is controlled by the extent
* map has page->private set to one.
*/
#define EXTENT_PAGE_PRIVATE 1
#define EXTENT_PAGE_PRIVATE_FIRST_PAGE 3
struct extent_state;
typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
struct bio *bio, int mirror_num);
struct extent_io_ops {
int (*fill_delalloc)(struct inode *inode, u64 start, u64 end);
int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
extent_submit_bio_hook_t *submit_bio_hook;
int (*merge_bio_hook)(struct page *page, unsigned long offset,
size_t size, struct bio *bio);
int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
u64 start, u64 end,
struct extent_state *state);
int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
u64 start, u64 end,
struct extent_state *state);
int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
struct extent_state *state);
int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
struct extent_state *state, int uptodate);
int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
unsigned long old, unsigned long bits);
int (*clear_bit_hook)(struct inode *inode, u64 start, u64 end,
unsigned long old, unsigned long bits);
int (*write_cache_pages_lock_hook)(struct page *page);
};
struct extent_io_tree {
struct rb_root state;
struct rb_root buffer;
struct address_space *mapping;
u64 dirty_bytes;
spinlock_t lock;
spinlock_t buffer_lock;
struct extent_io_ops *ops;
};
struct extent_state {
u64 start;
u64 end; /* inclusive */
struct rb_node rb_node;
struct extent_io_tree *tree;
wait_queue_head_t wq;
atomic_t refs;
unsigned long state;
/* for use by the FS */
u64 private;
struct list_head leak_list;
};
struct extent_buffer {
u64 start;
unsigned long len;
char *map_token;
char *kaddr;
unsigned long map_start;
unsigned long map_len;
struct page *first_page;
atomic_t refs;
int flags;
struct list_head leak_list;
struct rb_node rb_node;
struct mutex mutex;
};
struct extent_map_tree;
static inline struct extent_state *extent_state_next(struct extent_state *state)
{
struct rb_node *node;
node = rb_next(&state->rb_node);
if (!node)
return NULL;
return rb_entry(node, struct extent_state, rb_node);
}
typedef struct extent_map *(get_extent_t)(struct inode *inode,
struct page *page,
size_t page_offset,
u64 start, u64 len,
int create);
void extent_io_tree_init(struct extent_io_tree *tree,
struct address_space *mapping, gfp_t mask);
int try_release_extent_mapping(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page,
gfp_t mask);
int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page);
int try_release_extent_state(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page,
gfp_t mask);
int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask);
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
get_extent_t *get_extent);
int __init extent_io_init(void);
void extent_io_exit(void);
u64 count_range_bits(struct extent_io_tree *tree,
u64 *start, u64 search_end,
u64 max_bytes, unsigned long bits);
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
int bits, int filled);
int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
int bits, gfp_t mask);
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
int bits, int wake, int delete, gfp_t mask);
int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
int bits, gfp_t mask);
int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start,
u64 end, gfp_t mask);
int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask);
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
u64 *start_ret, u64 *end_ret, int bits);
struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
u64 start, int bits);
int extent_invalidatepage(struct extent_io_tree *tree,
struct page *page, unsigned long offset);
int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
get_extent_t *get_extent,
struct writeback_control *wbc);
int extent_writepages(struct extent_io_tree *tree,
struct address_space *mapping,
get_extent_t *get_extent,
struct writeback_control *wbc);
int extent_readpages(struct extent_io_tree *tree,
struct address_space *mapping,
struct list_head *pages, unsigned nr_pages,
get_extent_t get_extent);
int extent_prepare_write(struct extent_io_tree *tree,
struct inode *inode, struct page *page,
unsigned from, unsigned to, get_extent_t *get_extent);
int extent_commit_write(struct extent_io_tree *tree,
struct inode *inode, struct page *page,
unsigned from, unsigned to);
sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
get_extent_t *get_extent);
int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end);
int set_state_private(struct extent_io_tree *tree, u64 start, u64 private);
int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
void set_page_extent_mapped(struct page *page);
struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
u64 start, unsigned long len,
struct page *page0,
gfp_t mask);
struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
u64 start, unsigned long len,
gfp_t mask);
void free_extent_buffer(struct extent_buffer *eb);
int read_extent_buffer_pages(struct extent_io_tree *tree,
struct extent_buffer *eb, u64 start, int wait,
get_extent_t *get_extent, int mirror_num);
static inline void extent_buffer_get(struct extent_buffer *eb)
{
atomic_inc(&eb->refs);
}
int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
unsigned long start,
unsigned long len);
void read_extent_buffer(struct extent_buffer *eb, void *dst,
unsigned long start,
unsigned long len);
void write_extent_buffer(struct extent_buffer *eb, const void *src,
unsigned long start, unsigned long len);
void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
unsigned long dst_offset, unsigned long src_offset,
unsigned long len);
void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_offset, unsigned long len);
void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
unsigned long src_offset, unsigned long len);
void memset_extent_buffer(struct extent_buffer *eb, char c,
unsigned long start, unsigned long len);
int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
struct extent_buffer *eb);
int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end);
int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits);
int clear_extent_buffer_dirty(struct extent_io_tree *tree,
struct extent_buffer *eb);
int set_extent_buffer_dirty(struct extent_io_tree *tree,
struct extent_buffer *eb);
int set_extent_buffer_uptodate(struct extent_io_tree *tree,
struct extent_buffer *eb);
int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
struct extent_buffer *eb);
int extent_buffer_uptodate(struct extent_io_tree *tree,
struct extent_buffer *eb);
int map_extent_buffer(struct extent_buffer *eb, unsigned long offset,
unsigned long min_len, char **token, char **map,
unsigned long *map_start,
unsigned long *map_len, int km);
int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
unsigned long min_len, char **token, char **map,
unsigned long *map_start,
unsigned long *map_len, int km);
void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km);
int release_extent_buffer_tail_pages(struct extent_buffer *eb);
int extent_range_uptodate(struct extent_io_tree *tree,
u64 start, u64 end);
#endif
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/version.h>
#include <linux/hardirq.h>
#include "extent_map.h"
/* temporary define until extent_map moves out of btrfs */
struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
unsigned long extra_flags,
void (*ctor)(void *, struct kmem_cache *,
unsigned long));
static struct kmem_cache *extent_map_cache;
int __init extent_map_init(void)
{
extent_map_cache = btrfs_cache_create("extent_map",
sizeof(struct extent_map), 0,
NULL);
if (!extent_map_cache)
return -ENOMEM;
return 0;
}
void extent_map_exit(void)
{
if (extent_map_cache)
kmem_cache_destroy(extent_map_cache);
}
/**
* extent_map_tree_init - initialize extent map tree
* @tree: tree to initialize
* @mask: flags for memory allocations during tree operations
*
* Initialize the extent tree @tree. Should be called for each new inode
* or other user of the extent_map interface.
*/
void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
{
tree->map.rb_node = NULL;
spin_lock_init(&tree->lock);
}
EXPORT_SYMBOL(extent_map_tree_init);
/**
* alloc_extent_map - allocate new extent map structure
* @mask: memory allocation flags
*
* Allocate a new extent_map structure. The new structure is
* returned with a reference count of one and needs to be
* freed using free_extent_map()
*/
struct extent_map *alloc_extent_map(gfp_t mask)
{
struct extent_map *em;
em = kmem_cache_alloc(extent_map_cache, mask);
if (!em || IS_ERR(em))
return em;
em->in_tree = 0;
em->flags = 0;
atomic_set(&em->refs, 1);
return em;
}
EXPORT_SYMBOL(alloc_extent_map);
/**
* free_extent_map - drop reference count of an extent_map
* @em: extent map beeing releasead
*
* Drops the reference out on @em by one and free the structure
* if the reference count hits zero.
*/
void free_extent_map(struct extent_map *em)
{
if (!em)
return;
WARN_ON(atomic_read(&em->refs) == 0);
if (atomic_dec_and_test(&em->refs)) {
WARN_ON(em->in_tree);
kmem_cache_free(extent_map_cache, em);
}
}
EXPORT_SYMBOL(free_extent_map);
static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
struct rb_node *node)
{
struct rb_node ** p = &root->rb_node;
struct rb_node * parent = NULL;
struct extent_map *entry;
while(*p) {
parent = *p;
entry = rb_entry(parent, struct extent_map, rb_node);
WARN_ON(!entry->in_tree);
if (offset < entry->start)
p = &(*p)->rb_left;
else if (offset >= extent_map_end(entry))
p = &(*p)->rb_right;
else
return parent;
}
entry = rb_entry(node, struct extent_map, rb_node);
entry->in_tree = 1;
rb_link_node(node, parent, p);
rb_insert_color(node, root);
return NULL;
}
/*
* search through the tree for an extent_map with a given offset. If
* it can't be found, try to find some neighboring extents
*/
static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
struct rb_node **prev_ret,
struct rb_node **next_ret)
{
struct rb_node * n = root->rb_node;
struct rb_node *prev = NULL;
struct rb_node *orig_prev = NULL;
struct extent_map *entry;
struct extent_map *prev_entry = NULL;
while(n) {
entry = rb_entry(n, struct extent_map, rb_node);
prev = n;
prev_entry = entry;
WARN_ON(!entry->in_tree);
if (offset < entry->start)
n = n->rb_left;
else if (offset >= extent_map_end(entry))
n = n->rb_right;
else
return n;
}
if (prev_ret) {
orig_prev = prev;
while(prev && offset >= extent_map_end(prev_entry)) {
prev = rb_next(prev);
prev_entry = rb_entry(prev, struct extent_map, rb_node);
}
*prev_ret = prev;
prev = orig_prev;
}
if (next_ret) {
prev_entry = rb_entry(prev, struct extent_map, rb_node);
while(prev && offset < prev_entry->start) {
prev = rb_prev(prev);
prev_entry = rb_entry(prev, struct extent_map, rb_node);
}
*next_ret = prev;
}
return NULL;
}
/*
* look for an offset in the tree, and if it can't be found, return
* the first offset we can find smaller than 'offset'.
*/
static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
{
struct rb_node *prev;
struct rb_node *ret;
ret = __tree_search(root, offset, &prev, NULL);
if (!ret)
return prev;
return ret;
}
/* check to see if two extent_map structs are adjacent and safe to merge */
static int mergable_maps(struct extent_map *prev, struct extent_map *next)
{
if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
return 0;
if (extent_map_end(prev) == next->start &&
prev->flags == next->flags &&
prev->bdev == next->bdev &&
((next->block_start == EXTENT_MAP_HOLE &&
prev->block_start == EXTENT_MAP_HOLE) ||
(next->block_start == EXTENT_MAP_INLINE &&
prev->block_start == EXTENT_MAP_INLINE) ||
(next->block_start == EXTENT_MAP_DELALLOC &&
prev->block_start == EXTENT_MAP_DELALLOC) ||
(next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
next->block_start == extent_map_block_end(prev)))) {
return 1;
}
return 0;
}
/**
* add_extent_mapping - add new extent map to the extent tree
* @tree: tree to insert new map in
* @em: map to insert
*
* Insert @em into @tree or perform a simple forward/backward merge with
* existing mappings. The extent_map struct passed in will be inserted
* into the tree directly, with an additional reference taken, or a
* reference dropped if the merge attempt was sucessfull.
*/
int add_extent_mapping(struct extent_map_tree *tree,
struct extent_map *em)
{
int ret = 0;
struct extent_map *merge = NULL;
struct rb_node *rb;
struct extent_map *exist;
exist = lookup_extent_mapping(tree, em->start, em->len);
if (exist) {
free_extent_map(exist);
ret = -EEXIST;
goto out;
}
assert_spin_locked(&tree->lock);
rb = tree_insert(&tree->map, em->start, &em->rb_node);
if (rb) {
ret = -EEXIST;
free_extent_map(merge);
goto out;
}
atomic_inc(&em->refs);
if (em->start != 0) {
rb = rb_prev(&em->rb_node);
if (rb)
merge = rb_entry(rb, struct extent_map, rb_node);
if (rb && mergable_maps(merge, em)) {
em->start = merge->start;
em->len += merge->len;
em->block_start = merge->block_start;
merge->in_tree = 0;
rb_erase(&merge->rb_node, &tree->map);
free_extent_map(merge);
}
}
rb = rb_next(&em->rb_node);
if (rb)
merge = rb_entry(rb, struct extent_map, rb_node);
if (rb && mergable_maps(em, merge)) {
em->len += merge->len;
rb_erase(&merge->rb_node, &tree->map);
merge->in_tree = 0;
free_extent_map(merge);
}
out:
return ret;
}
EXPORT_SYMBOL(add_extent_mapping);
/* simple helper to do math around the end of an extent, handling wrap */
static u64 range_end(u64 start, u64 len)
{
if (start + len < start)
return (u64)-1;
return start + len;
}
/**
* lookup_extent_mapping - lookup extent_map
* @tree: tree to lookup in
* @start: byte offset to start the search
* @len: length of the lookup range
*
* Find and return the first extent_map struct in @tree that intersects the
* [start, len] range. There may be additional objects in the tree that
* intersect, so check the object returned carefully to make sure that no
* additional lookups are needed.
*/
struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len)
{
struct extent_map *em;
struct rb_node *rb_node;
struct rb_node *prev = NULL;
struct rb_node *next = NULL;
u64 end = range_end(start, len);
assert_spin_locked(&tree->lock);
rb_node = __tree_search(&tree->map, start, &prev, &next);
if (!rb_node && prev) {
em = rb_entry(prev, struct extent_map, rb_node);
if (end > em->start && start < extent_map_end(em))
goto found;
}
if (!rb_node && next) {
em = rb_entry(next, struct extent_map, rb_node);
if (end > em->start && start < extent_map_end(em))
goto found;
}
if (!rb_node) {
em = NULL;
goto out;
}
if (IS_ERR(rb_node)) {
em = ERR_PTR(PTR_ERR(rb_node));
goto out;
}
em = rb_entry(rb_node, struct extent_map, rb_node);
if (end > em->start && start < extent_map_end(em))
goto found;
em = NULL;
goto out;
found:
atomic_inc(&em->refs);
out:
return em;
}
EXPORT_SYMBOL(lookup_extent_mapping);
/**
* remove_extent_mapping - removes an extent_map from the extent tree
* @tree: extent tree to remove from
* @em: extent map beeing removed
*
* Removes @em from @tree. No reference counts are dropped, and no checks
* are done to see if the range is in use
*/
int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
{
int ret = 0;
WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
assert_spin_locked(&tree->lock);
rb_erase(&em->rb_node, &tree->map);
em->in_tree = 0;
return ret;
}
EXPORT_SYMBOL(remove_extent_mapping);
#ifndef __EXTENTMAP__
#define __EXTENTMAP__
#include <linux/rbtree.h>
#define EXTENT_MAP_LAST_BYTE (u64)-4
#define EXTENT_MAP_HOLE (u64)-3
#define EXTENT_MAP_INLINE (u64)-2
#define EXTENT_MAP_DELALLOC (u64)-1
/* bits for the flags field */
#define EXTENT_FLAG_PINNED 0 /* this entry not yet on disk, don't free it */
struct extent_map {
struct rb_node rb_node;
/* all of these are in bytes */
u64 start;
u64 len;
u64 block_start;
unsigned long flags;
struct block_device *bdev;
atomic_t refs;
int in_tree;
};
struct extent_map_tree {
struct rb_root map;
spinlock_t lock;
};
static inline u64 extent_map_end(struct extent_map *em)
{
if (em->start + em->len < em->start)
return (u64)-1;
return em->start + em->len;
}
static inline u64 extent_map_block_end(struct extent_map *em)
{
if (em->block_start + em->len < em->block_start)
return (u64)-1;
return em->block_start + em->len;
}
void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask);
struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len);
int add_extent_mapping(struct extent_map_tree *tree,
struct extent_map *em);
int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
struct extent_map *alloc_extent_map(gfp_t mask);
void free_extent_map(struct extent_map *em);
int __init extent_map_init(void);
void extent_map_exit(void);
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (C) 2007 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#ifndef __HASH__
#define __HASH__
#include "crc32c.h"
static inline u64 btrfs_name_hash(const char *name, int len)
{
return btrfs_crc32c((u32)~1, name, len);
}
#endif
/*
* Copyright (C) 2007 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License v2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*/
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
int find_name_in_backref(struct btrfs_path *path, const char * name,
int name_len, struct btrfs_inode_ref **ref_ret)
{
struct extent_buffer *leaf;
struct btrfs_inode_ref *ref;
unsigned long ptr;
unsigned long name_ptr;
u32 item_size;
u32 cur_offset = 0;
int len;
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
while (cur_offset < item_size) {
ref = (struct btrfs_inode_ref *)(ptr + cur_offset);
len = btrfs_inode_ref_name_len(leaf, ref);
name_ptr = (unsigned long)(ref + 1);
cur_offset += len + sizeof(*ref);
if (len != name_len)
continue;
if (memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0) {
*ref_ret = ref;
return 1;
}
}
return 0;
}
int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
const char *name, int name_len,
u64 inode_objectid, u64 ref_objectid, u64 *index)
{
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_inode_ref *ref;
struct extent_buffer *leaf;
unsigned long ptr;
unsigned long item_start;
u32 item_size;
u32 sub_item_len;
int ret;
int del_len = name_len + sizeof(*ref);
key.objectid = inode_objectid;
key.offset = ref_objectid;
btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0) {
ret = -ENOENT;
goto out;
} else if (ret < 0) {
goto out;
}
if (!find_name_in_backref(path, name, name_len, &ref)) {
ret = -ENOENT;
goto out;
}
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
if (index)
*index = btrfs_inode_ref_index(leaf, ref);
if (del_len == item_size) {
ret = btrfs_del_item(trans, root, path);
goto out;
}
ptr = (unsigned long)ref;
sub_item_len = name_len + sizeof(*ref);
item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_size - (ptr + sub_item_len - item_start));
ret = btrfs_truncate_item(trans, root, path,
item_size - sub_item_len, 1);
BUG_ON(ret);
out:
btrfs_free_path(path);
return ret;
}
int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
const char *name, int name_len,
u64 inode_objectid, u64 ref_objectid, u64 index)
{
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_inode_ref *ref;
unsigned long ptr;
int ret;
int ins_len = name_len + sizeof(*ref);
key.objectid = inode_objectid;
key.offset = ref_objectid;
btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = btrfs_insert_empty_item(trans, root, path, &key,
ins_len);
if (ret == -EEXIST) {
u32 old_size;
if (find_name_in_backref(path, name, name_len, &ref))
goto out;
old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
ret = btrfs_extend_item(trans, root, path, ins_len);
BUG_ON(ret);
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_ref);
ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
btrfs_set_inode_ref_index(path->nodes[0], ref, index);
ptr = (unsigned long)(ref + 1);
ret = 0;
} else if (ret < 0) {
goto out;
} else {
ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_ref);
btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
btrfs_set_inode_ref_index(path->nodes[0], ref, index);
ptr = (unsigned long)(ref + 1);
}
write_extent_buffer(path->nodes[0], name, ptr, name_len);
btrfs_mark_buffer_dirty(path->nodes[0]);
out:
btrfs_free_path(path);
return ret;
}
int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path, u64 objectid)
{
struct btrfs_key key;
int ret;
key.objectid = objectid;
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
key.offset = 0;
ret = btrfs_insert_empty_item(trans, root, path, &key,
sizeof(struct btrfs_inode_item));
if (ret == 0 && objectid > root->highest_inode)
root->highest_inode = objectid;
return ret;
}
int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_path *path,
struct btrfs_key *location, int mod)
{
int ins_len = mod < 0 ? -1 : 0;
int cow = mod != 0;
int ret;
int slot;
struct extent_buffer *leaf;
struct btrfs_key found_key;
ret = btrfs_search_slot(trans, root, location, path, ins_len, cow);
if (ret > 0 && btrfs_key_type(location) == BTRFS_ROOT_ITEM_KEY &&
location->offset == (u64)-1 && path->slots[0] != 0) {
slot = path->slots[0] - 1;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, slot);
if (found_key.objectid == location->objectid &&
btrfs_key_type(&found_key) == btrfs_key_type(location)) {
path->slots[0]--;
return 0;
}
}
return ret;
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#ifndef __BTRFS_VERSION_H
#define __BTRFS_VERSION_H
#define BTRFS_BUILD_VERSION "Btrfs"
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment