Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
83ebade3
Commit
83ebade3
authored
Sep 11, 2009
by
Chris Mason
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'master' of
git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
parents
74fca6a4
93c82d57
Changes
18
Show whitespace changes
Inline
Side-by-side
Showing
18 changed files
with
580 additions
and
317 deletions
+580
-317
fs/btrfs/async-thread.c
fs/btrfs/async-thread.c
+184
-46
fs/btrfs/async-thread.h
fs/btrfs/async-thread.h
+12
-0
fs/btrfs/compression.c
fs/btrfs/compression.c
+4
-4
fs/btrfs/ctree.h
fs/btrfs/ctree.h
+1
-1
fs/btrfs/disk-io.c
fs/btrfs/disk-io.c
+19
-17
fs/btrfs/extent-tree.c
fs/btrfs/extent-tree.c
+2
-2
fs/btrfs/extent_io.c
fs/btrfs/extent_io.c
+167
-126
fs/btrfs/extent_io.h
fs/btrfs/extent_io.h
+9
-7
fs/btrfs/extent_map.c
fs/btrfs/extent_map.c
+51
-4
fs/btrfs/extent_map.h
fs/btrfs/extent_map.h
+2
-1
fs/btrfs/file.c
fs/btrfs/file.c
+8
-27
fs/btrfs/inode.c
fs/btrfs/inode.c
+74
-38
fs/btrfs/ioctl.c
fs/btrfs/ioctl.c
+2
-3
fs/btrfs/ordered-data.c
fs/btrfs/ordered-data.c
+17
-16
fs/btrfs/ordered-data.h
fs/btrfs/ordered-data.h
+3
-0
fs/btrfs/relocation.c
fs/btrfs/relocation.c
+3
-3
fs/btrfs/tree-log.c
fs/btrfs/tree-log.c
+1
-1
fs/btrfs/volumes.c
fs/btrfs/volumes.c
+21
-21
No files found.
fs/btrfs/async-thread.c
View file @
83ebade3
...
@@ -48,6 +48,9 @@ struct btrfs_worker_thread {
...
@@ -48,6 +48,9 @@ struct btrfs_worker_thread {
/* number of things on the pending list */
/* number of things on the pending list */
atomic_t
num_pending
;
atomic_t
num_pending
;
/* reference counter for this struct */
atomic_t
refs
;
unsigned
long
sequence
;
unsigned
long
sequence
;
/* protects the pending list. */
/* protects the pending list. */
...
@@ -93,17 +96,40 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
...
@@ -93,17 +96,40 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
}
}
}
}
static
noinline
int
run_ordered_completions
(
struct
btrfs_workers
*
workers
,
static
void
check_pending_worker_creates
(
struct
btrfs_worker_thread
*
worker
)
struct
btrfs_work
*
work
)
{
{
struct
btrfs_workers
*
workers
=
worker
->
workers
;
unsigned
long
flags
;
unsigned
long
flags
;
rmb
();
if
(
!
workers
->
atomic_start_pending
)
return
;
spin_lock_irqsave
(
&
workers
->
lock
,
flags
);
if
(
!
workers
->
atomic_start_pending
)
goto
out
;
workers
->
atomic_start_pending
=
0
;
if
(
workers
->
num_workers
>=
workers
->
max_workers
)
goto
out
;
spin_unlock_irqrestore
(
&
workers
->
lock
,
flags
);
btrfs_start_workers
(
workers
,
1
);
return
;
out:
spin_unlock_irqrestore
(
&
workers
->
lock
,
flags
);
}
static
noinline
int
run_ordered_completions
(
struct
btrfs_workers
*
workers
,
struct
btrfs_work
*
work
)
{
if
(
!
workers
->
ordered
)
if
(
!
workers
->
ordered
)
return
0
;
return
0
;
set_bit
(
WORK_DONE_BIT
,
&
work
->
flags
);
set_bit
(
WORK_DONE_BIT
,
&
work
->
flags
);
spin_lock
_irqsave
(
&
workers
->
lock
,
flags
);
spin_lock
(
&
workers
->
order_lock
);
while
(
1
)
{
while
(
1
)
{
if
(
!
list_empty
(
&
workers
->
prio_order_list
))
{
if
(
!
list_empty
(
&
workers
->
prio_order_list
))
{
...
@@ -126,45 +152,117 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,
...
@@ -126,45 +152,117 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,
if
(
test_and_set_bit
(
WORK_ORDER_DONE_BIT
,
&
work
->
flags
))
if
(
test_and_set_bit
(
WORK_ORDER_DONE_BIT
,
&
work
->
flags
))
break
;
break
;
spin_unlock
_irqrestore
(
&
workers
->
lock
,
flags
);
spin_unlock
(
&
workers
->
order_lock
);
work
->
ordered_func
(
work
);
work
->
ordered_func
(
work
);
/* now take the lock again and call the freeing code */
/* now take the lock again and call the freeing code */
spin_lock
_irqsave
(
&
workers
->
lock
,
flags
);
spin_lock
(
&
workers
->
order_lock
);
list_del
(
&
work
->
order_list
);
list_del
(
&
work
->
order_list
);
work
->
ordered_free
(
work
);
work
->
ordered_free
(
work
);
}
}
spin_unlock
_irqrestore
(
&
workers
->
lock
,
flags
);
spin_unlock
(
&
workers
->
order_lock
);
return
0
;
return
0
;
}
}
static
void
put_worker
(
struct
btrfs_worker_thread
*
worker
)
{
if
(
atomic_dec_and_test
(
&
worker
->
refs
))
kfree
(
worker
);
}
static
int
try_worker_shutdown
(
struct
btrfs_worker_thread
*
worker
)
{
int
freeit
=
0
;
spin_lock_irq
(
&
worker
->
lock
);
spin_lock_irq
(
&
worker
->
workers
->
lock
);
if
(
worker
->
workers
->
num_workers
>
1
&&
worker
->
idle
&&
!
worker
->
working
&&
!
list_empty
(
&
worker
->
worker_list
)
&&
list_empty
(
&
worker
->
prio_pending
)
&&
list_empty
(
&
worker
->
pending
))
{
freeit
=
1
;
list_del_init
(
&
worker
->
worker_list
);
worker
->
workers
->
num_workers
--
;
}
spin_unlock_irq
(
&
worker
->
workers
->
lock
);
spin_unlock_irq
(
&
worker
->
lock
);
if
(
freeit
)
put_worker
(
worker
);
return
freeit
;
}
static
struct
btrfs_work
*
get_next_work
(
struct
btrfs_worker_thread
*
worker
,
struct
list_head
*
prio_head
,
struct
list_head
*
head
)
{
struct
btrfs_work
*
work
=
NULL
;
struct
list_head
*
cur
=
NULL
;
if
(
!
list_empty
(
prio_head
))
cur
=
prio_head
->
next
;
smp_mb
();
if
(
!
list_empty
(
&
worker
->
prio_pending
))
goto
refill
;
if
(
!
list_empty
(
head
))
cur
=
head
->
next
;
if
(
cur
)
goto
out
;
refill:
spin_lock_irq
(
&
worker
->
lock
);
list_splice_tail_init
(
&
worker
->
prio_pending
,
prio_head
);
list_splice_tail_init
(
&
worker
->
pending
,
head
);
if
(
!
list_empty
(
prio_head
))
cur
=
prio_head
->
next
;
else
if
(
!
list_empty
(
head
))
cur
=
head
->
next
;
spin_unlock_irq
(
&
worker
->
lock
);
if
(
!
cur
)
goto
out_fail
;
out:
work
=
list_entry
(
cur
,
struct
btrfs_work
,
list
);
out_fail:
return
work
;
}
/*
/*
* main loop for servicing work items
* main loop for servicing work items
*/
*/
static
int
worker_loop
(
void
*
arg
)
static
int
worker_loop
(
void
*
arg
)
{
{
struct
btrfs_worker_thread
*
worker
=
arg
;
struct
btrfs_worker_thread
*
worker
=
arg
;
struct
list_head
*
cur
;
struct
list_head
head
;
struct
list_head
prio_head
;
struct
btrfs_work
*
work
;
struct
btrfs_work
*
work
;
INIT_LIST_HEAD
(
&
head
);
INIT_LIST_HEAD
(
&
prio_head
);
do
{
do
{
spin_lock_irq
(
&
worker
->
lock
);
again:
again_locked:
while
(
1
)
{
while
(
1
)
{
if
(
!
list_empty
(
&
worker
->
prio_pending
))
cur
=
worker
->
prio_pending
.
next
;
else
if
(
!
list_empty
(
&
worker
->
pending
))
work
=
get_next_work
(
worker
,
&
prio_head
,
&
head
);
cur
=
worker
->
pending
.
next
;
if
(
!
work
)
else
break
;
break
;
work
=
list_entry
(
cur
,
struct
btrfs_work
,
list
);
list_del
(
&
work
->
list
);
list_del
(
&
work
->
list
);
clear_bit
(
WORK_QUEUED_BIT
,
&
work
->
flags
);
clear_bit
(
WORK_QUEUED_BIT
,
&
work
->
flags
);
work
->
worker
=
worker
;
work
->
worker
=
worker
;
spin_unlock_irq
(
&
worker
->
lock
);
work
->
func
(
work
);
work
->
func
(
work
);
...
@@ -175,9 +273,13 @@ static int worker_loop(void *arg)
...
@@ -175,9 +273,13 @@ static int worker_loop(void *arg)
*/
*/
run_ordered_completions
(
worker
->
workers
,
work
);
run_ordered_completions
(
worker
->
workers
,
work
);
check_pending_worker_creates
(
worker
);
}
spin_lock_irq
(
&
worker
->
lock
);
spin_lock_irq
(
&
worker
->
lock
);
check_idle_worker
(
worker
);
check_idle_worker
(
worker
);
}
if
(
freezing
(
current
))
{
if
(
freezing
(
current
))
{
worker
->
working
=
0
;
worker
->
working
=
0
;
spin_unlock_irq
(
&
worker
->
lock
);
spin_unlock_irq
(
&
worker
->
lock
);
...
@@ -216,8 +318,10 @@ static int worker_loop(void *arg)
...
@@ -216,8 +318,10 @@ static int worker_loop(void *arg)
spin_lock_irq
(
&
worker
->
lock
);
spin_lock_irq
(
&
worker
->
lock
);
set_current_state
(
TASK_INTERRUPTIBLE
);
set_current_state
(
TASK_INTERRUPTIBLE
);
if
(
!
list_empty
(
&
worker
->
pending
)
||
if
(
!
list_empty
(
&
worker
->
pending
)
||
!
list_empty
(
&
worker
->
prio_pending
))
!
list_empty
(
&
worker
->
prio_pending
))
{
goto
again_locked
;
spin_unlock_irq
(
&
worker
->
lock
);
goto
again
;
}
/*
/*
* this makes sure we get a wakeup when someone
* this makes sure we get a wakeup when someone
...
@@ -226,8 +330,13 @@ static int worker_loop(void *arg)
...
@@ -226,8 +330,13 @@ static int worker_loop(void *arg)
worker
->
working
=
0
;
worker
->
working
=
0
;
spin_unlock_irq
(
&
worker
->
lock
);
spin_unlock_irq
(
&
worker
->
lock
);
if
(
!
kthread_should_stop
())
if
(
!
kthread_should_stop
())
{
schedule
();
schedule_timeout
(
HZ
*
120
);
if
(
!
worker
->
working
&&
try_worker_shutdown
(
worker
))
{
return
0
;
}
}
}
}
__set_current_state
(
TASK_RUNNING
);
__set_current_state
(
TASK_RUNNING
);
}
}
...
@@ -242,16 +351,30 @@ int btrfs_stop_workers(struct btrfs_workers *workers)
...
@@ -242,16 +351,30 @@ int btrfs_stop_workers(struct btrfs_workers *workers)
{
{
struct
list_head
*
cur
;
struct
list_head
*
cur
;
struct
btrfs_worker_thread
*
worker
;
struct
btrfs_worker_thread
*
worker
;
int
can_stop
;
spin_lock_irq
(
&
workers
->
lock
);
list_splice_init
(
&
workers
->
idle_list
,
&
workers
->
worker_list
);
list_splice_init
(
&
workers
->
idle_list
,
&
workers
->
worker_list
);
while
(
!
list_empty
(
&
workers
->
worker_list
))
{
while
(
!
list_empty
(
&
workers
->
worker_list
))
{
cur
=
workers
->
worker_list
.
next
;
cur
=
workers
->
worker_list
.
next
;
worker
=
list_entry
(
cur
,
struct
btrfs_worker_thread
,
worker
=
list_entry
(
cur
,
struct
btrfs_worker_thread
,
worker_list
);
worker_list
);
atomic_inc
(
&
worker
->
refs
);
workers
->
num_workers
-=
1
;
if
(
!
list_empty
(
&
worker
->
worker_list
))
{
list_del_init
(
&
worker
->
worker_list
);
put_worker
(
worker
);
can_stop
=
1
;
}
else
can_stop
=
0
;
spin_unlock_irq
(
&
workers
->
lock
);
if
(
can_stop
)
kthread_stop
(
worker
->
task
);
kthread_stop
(
worker
->
task
);
list_del
(
&
worker
->
worker_list
);
spin_lock_irq
(
&
workers
->
lock
);
kfree
(
worker
);
put_worker
(
worker
);
}
}
spin_unlock_irq
(
&
workers
->
lock
);
return
0
;
return
0
;
}
}
...
@@ -266,10 +389,13 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
...
@@ -266,10 +389,13 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
INIT_LIST_HEAD
(
&
workers
->
order_list
);
INIT_LIST_HEAD
(
&
workers
->
order_list
);
INIT_LIST_HEAD
(
&
workers
->
prio_order_list
);
INIT_LIST_HEAD
(
&
workers
->
prio_order_list
);
spin_lock_init
(
&
workers
->
lock
);
spin_lock_init
(
&
workers
->
lock
);
spin_lock_init
(
&
workers
->
order_lock
);
workers
->
max_workers
=
max
;
workers
->
max_workers
=
max
;
workers
->
idle_thresh
=
32
;
workers
->
idle_thresh
=
32
;
workers
->
name
=
name
;
workers
->
name
=
name
;
workers
->
ordered
=
0
;
workers
->
ordered
=
0
;
workers
->
atomic_start_pending
=
0
;
workers
->
atomic_worker_start
=
0
;
}
}
/*
/*
...
@@ -293,7 +419,9 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
...
@@ -293,7 +419,9 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
INIT_LIST_HEAD
(
&
worker
->
prio_pending
);
INIT_LIST_HEAD
(
&
worker
->
prio_pending
);
INIT_LIST_HEAD
(
&
worker
->
worker_list
);
INIT_LIST_HEAD
(
&
worker
->
worker_list
);
spin_lock_init
(
&
worker
->
lock
);
spin_lock_init
(
&
worker
->
lock
);
atomic_set
(
&
worker
->
num_pending
,
0
);
atomic_set
(
&
worker
->
num_pending
,
0
);
atomic_set
(
&
worker
->
refs
,
1
);
worker
->
workers
=
workers
;
worker
->
workers
=
workers
;
worker
->
task
=
kthread_run
(
worker_loop
,
worker
,
worker
->
task
=
kthread_run
(
worker_loop
,
worker
,
"btrfs-%s-%d"
,
workers
->
name
,
"btrfs-%s-%d"
,
workers
->
name
,
...
@@ -303,7 +431,6 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
...
@@ -303,7 +431,6 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
kfree
(
worker
);
kfree
(
worker
);
goto
fail
;
goto
fail
;
}
}
spin_lock_irq
(
&
workers
->
lock
);
spin_lock_irq
(
&
workers
->
lock
);
list_add_tail
(
&
worker
->
worker_list
,
&
workers
->
idle_list
);
list_add_tail
(
&
worker
->
worker_list
,
&
workers
->
idle_list
);
worker
->
idle
=
1
;
worker
->
idle
=
1
;
...
@@ -367,19 +494,33 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
...
@@ -367,19 +494,33 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
{
{
struct
btrfs_worker_thread
*
worker
;
struct
btrfs_worker_thread
*
worker
;
unsigned
long
flags
;
unsigned
long
flags
;
struct
list_head
*
fallback
;
again:
again:
spin_lock_irqsave
(
&
workers
->
lock
,
flags
);
spin_lock_irqsave
(
&
workers
->
lock
,
flags
);
worker
=
next_worker
(
workers
);
worker
=
next_worker
(
workers
);
spin_unlock_irqrestore
(
&
workers
->
lock
,
flags
);
if
(
!
worker
)
{
if
(
!
worker
)
{
spin_lock_irqsave
(
&
workers
->
lock
,
flags
);
if
(
workers
->
num_workers
>=
workers
->
max_workers
)
{
if
(
workers
->
num_workers
>=
workers
->
max_workers
)
{
struct
list_head
*
fallback
=
NULL
;
goto
fallback
;
}
else
if
(
workers
->
atomic_worker_start
)
{
workers
->
atomic_start_pending
=
1
;
goto
fallback
;
}
else
{
spin_unlock_irqrestore
(
&
workers
->
lock
,
flags
);
/* we're below the limit, start another worker */
btrfs_start_workers
(
workers
,
1
);
goto
again
;
}
}
spin_unlock_irqrestore
(
&
workers
->
lock
,
flags
);
return
worker
;
fallback:
fallback
=
NULL
;
/*
/*
* we have failed to find any workers, just
* we have failed to find any workers, just
* return the force one
* return the first one we can find.
*/
*/
if
(
!
list_empty
(
&
workers
->
worker_list
))
if
(
!
list_empty
(
&
workers
->
worker_list
))
fallback
=
workers
->
worker_list
.
next
;
fallback
=
workers
->
worker_list
.
next
;
...
@@ -389,13 +530,6 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
...
@@ -389,13 +530,6 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
worker
=
list_entry
(
fallback
,
worker
=
list_entry
(
fallback
,
struct
btrfs_worker_thread
,
worker_list
);
struct
btrfs_worker_thread
,
worker_list
);
spin_unlock_irqrestore
(
&
workers
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
workers
->
lock
,
flags
);
}
else
{
spin_unlock_irqrestore
(
&
workers
->
lock
,
flags
);
/* we're below the limit, start another worker */
btrfs_start_workers
(
workers
,
1
);
goto
again
;
}
}
return
worker
;
return
worker
;
}
}
...
@@ -435,9 +569,9 @@ int btrfs_requeue_work(struct btrfs_work *work)
...
@@ -435,9 +569,9 @@ int btrfs_requeue_work(struct btrfs_work *work)
worker
->
working
=
1
;
worker
->
working
=
1
;
}
}
spin_unlock_irqrestore
(
&
worker
->
lock
,
flags
);
if
(
wake
)
if
(
wake
)
wake_up_process
(
worker
->
task
);
wake_up_process
(
worker
->
task
);
spin_unlock_irqrestore
(
&
worker
->
lock
,
flags
);
out:
out:
return
0
;
return
0
;
...
@@ -463,14 +597,18 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
...
@@ -463,14 +597,18 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
worker
=
find_worker
(
workers
);
worker
=
find_worker
(
workers
);
if
(
workers
->
ordered
)
{
if
(
workers
->
ordered
)
{
spin_lock_irqsave
(
&
workers
->
lock
,
flags
);
/*
* you're not allowed to do ordered queues from an
* interrupt handler
*/
spin_lock
(
&
workers
->
order_lock
);
if
(
test_bit
(
WORK_HIGH_PRIO_BIT
,
&
work
->
flags
))
{
if
(
test_bit
(
WORK_HIGH_PRIO_BIT
,
&
work
->
flags
))
{
list_add_tail
(
&
work
->
order_list
,
list_add_tail
(
&
work
->
order_list
,
&
workers
->
prio_order_list
);
&
workers
->
prio_order_list
);
}
else
{
}
else
{
list_add_tail
(
&
work
->
order_list
,
&
workers
->
order_list
);
list_add_tail
(
&
work
->
order_list
,
&
workers
->
order_list
);
}
}
spin_unlock
_irqrestore
(
&
workers
->
lock
,
flags
);
spin_unlock
(
&
workers
->
order_lock
);
}
else
{
}
else
{
INIT_LIST_HEAD
(
&
work
->
order_list
);
INIT_LIST_HEAD
(
&
work
->
order_list
);
}
}
...
@@ -492,10 +630,10 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
...
@@ -492,10 +630,10 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
wake
=
1
;
wake
=
1
;
worker
->
working
=
1
;
worker
->
working
=
1
;
spin_unlock_irqrestore
(
&
worker
->
lock
,
flags
);
if
(
wake
)
if
(
wake
)
wake_up_process
(
worker
->
task
);
wake_up_process
(
worker
->
task
);
spin_unlock_irqrestore
(
&
worker
->
lock
,
flags
);
out:
out:
return
0
;
return
0
;
}
}
fs/btrfs/async-thread.h
View file @
83ebade3
...
@@ -73,6 +73,15 @@ struct btrfs_workers {
...
@@ -73,6 +73,15 @@ struct btrfs_workers {
/* force completions in the order they were queued */
/* force completions in the order they were queued */
int
ordered
;
int
ordered
;
/* more workers required, but in an interrupt handler */
int
atomic_start_pending
;
/*
* are we allowed to sleep while starting workers or are we required
* to start them at a later time?
*/
int
atomic_worker_start
;
/* list with all the work threads. The workers on the idle thread
/* list with all the work threads. The workers on the idle thread
* may be actively servicing jobs, but they haven't yet hit the
* may be actively servicing jobs, but they haven't yet hit the
* idle thresh limit above.
* idle thresh limit above.
...
@@ -90,6 +99,9 @@ struct btrfs_workers {
...
@@ -90,6 +99,9 @@ struct btrfs_workers {
/* lock for finding the next worker thread to queue on */
/* lock for finding the next worker thread to queue on */
spinlock_t
lock
;
spinlock_t
lock
;
/* lock for the ordered lists */
spinlock_t
order_lock
;
/* extra name for this worker, used for current->name */
/* extra name for this worker, used for current->name */
char
*
name
;
char
*
name
;
};
};
...
...
fs/btrfs/compression.c
View file @
83ebade3
...
@@ -506,10 +506,10 @@ static noinline int add_ra_bio_pages(struct inode *inode,
...
@@ -506,10 +506,10 @@ static noinline int add_ra_bio_pages(struct inode *inode,
*/
*/
set_page_extent_mapped
(
page
);
set_page_extent_mapped
(
page
);
lock_extent
(
tree
,
last_offset
,
end
,
GFP_NOFS
);
lock_extent
(
tree
,
last_offset
,
end
,
GFP_NOFS
);
spin
_lock
(
&
em_tree
->
lock
);
read
_lock
(
&
em_tree
->
lock
);
em
=
lookup_extent_mapping
(
em_tree
,
last_offset
,
em
=
lookup_extent_mapping
(
em_tree
,
last_offset
,
PAGE_CACHE_SIZE
);
PAGE_CACHE_SIZE
);
spin
_unlock
(
&
em_tree
->
lock
);
read
_unlock
(
&
em_tree
->
lock
);
if
(
!
em
||
last_offset
<
em
->
start
||
if
(
!
em
||
last_offset
<
em
->
start
||
(
last_offset
+
PAGE_CACHE_SIZE
>
extent_map_end
(
em
))
||
(
last_offset
+
PAGE_CACHE_SIZE
>
extent_map_end
(
em
))
||
...
@@ -593,11 +593,11 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
...
@@ -593,11 +593,11 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
em_tree
=
&
BTRFS_I
(
inode
)
->
extent_tree
;
em_tree
=
&
BTRFS_I
(
inode
)
->
extent_tree
;
/* we need the actual starting offset of this extent in the file */
/* we need the actual starting offset of this extent in the file */
spin
_lock
(
&
em_tree
->
lock
);
read
_lock
(
&
em_tree
->
lock
);
em
=
lookup_extent_mapping
(
em_tree
,
em
=
lookup_extent_mapping
(
em_tree
,
page_offset
(
bio
->
bi_io_vec
->
bv_page
),
page_offset
(
bio
->
bi_io_vec
->
bv_page
),
PAGE_CACHE_SIZE
);
PAGE_CACHE_SIZE
);
spin
_unlock
(
&
em_tree
->
lock
);
read
_unlock
(
&
em_tree
->
lock
);
compressed_len
=
em
->
block_len
;
compressed_len
=
em
->
block_len
;
cb
=
kmalloc
(
compressed_bio_size
(
root
,
compressed_len
),
GFP_NOFS
);
cb
=
kmalloc
(
compressed_bio_size
(
root
,
compressed_len
),
GFP_NOFS
);
...
...
fs/btrfs/ctree.h
View file @
83ebade3
...
@@ -2290,7 +2290,7 @@ extern struct file_operations btrfs_file_operations;
...
@@ -2290,7 +2290,7 @@ extern struct file_operations btrfs_file_operations;
int
btrfs_drop_extents
(
struct
btrfs_trans_handle
*
trans
,
int
btrfs_drop_extents
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_root
*
root
,
struct
inode
*
inode
,
struct
btrfs_root
*
root
,
struct
inode
*
inode
,
u64
start
,
u64
end
,
u64
locked_end
,
u64
start
,
u64
end
,
u64
locked_end
,
u64
inline_limit
,
u64
*
hint_block
);
u64
inline_limit
,
u64
*
hint_block
,
int
drop_cache
);
int
btrfs_mark_extent_written
(
struct
btrfs_trans_handle
*
trans
,
int
btrfs_mark_extent_written
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_root
*
root
,
struct
btrfs_root
*
root
,
struct
inode
*
inode
,
u64
start
,
u64
end
);
struct
inode
*
inode
,
u64
start
,
u64
end
);
...
...
fs/btrfs/disk-io.c
View file @
83ebade3
...
@@ -123,15 +123,15 @@ static struct extent_map *btree_get_extent(struct inode *inode,
...
@@ -123,15 +123,15 @@ static struct extent_map *btree_get_extent(struct inode *inode,
struct
extent_map
*
em
;
struct
extent_map
*
em
;
int
ret
;
int
ret
;
spin
_lock
(
&
em_tree
->
lock
);
read
_lock
(
&
em_tree
->
lock
);
em
=
lookup_extent_mapping
(
em_tree
,
start
,
len
);
em
=
lookup_extent_mapping
(
em_tree
,
start
,
len
);
if
(
em
)
{
if
(
em
)
{
em
->
bdev
=
em
->
bdev
=
BTRFS_I
(
inode
)
->
root
->
fs_info
->
fs_devices
->
latest_bdev
;
BTRFS_I
(
inode
)
->
root
->
fs_info
->
fs_devices
->
latest_bdev
;
spin
_unlock
(
&
em_tree
->
lock
);
read
_unlock
(
&
em_tree
->
lock
);
goto
out
;
goto
out
;
}
}
spin
_unlock
(
&
em_tree
->
lock
);
read
_unlock
(
&
em_tree
->
lock
);
em
=
alloc_extent_map
(
GFP_NOFS
);
em
=
alloc_extent_map
(
GFP_NOFS
);
if
(
!
em
)
{
if
(
!
em
)
{
...
@@ -144,7 +144,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
...
@@ -144,7 +144,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
em
->
block_start
=
0
;
em
->
block_start
=
0
;
em
->
bdev
=
BTRFS_I
(
inode
)
->
root
->
fs_info
->
fs_devices
->
latest_bdev
;
em
->
bdev
=
BTRFS_I
(
inode
)
->
root
->
fs_info
->
fs_devices
->
latest_bdev
;
spin
_lock
(
&
em_tree
->
lock
);
write
_lock
(
&
em_tree
->
lock
);
ret
=
add_extent_mapping
(
em_tree
,
em
);
ret
=
add_extent_mapping
(
em_tree
,
em
);
if
(
ret
==
-
EEXIST
)
{
if
(
ret
==
-
EEXIST
)
{
u64
failed_start
=
em
->
start
;
u64
failed_start
=
em
->
start
;
...
@@ -163,7 +163,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
...
@@ -163,7 +163,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
free_extent_map
(
em
);
free_extent_map
(
em
);
em
=
NULL
;
em
=
NULL
;
}
}
spin
_unlock
(
&
em_tree
->
lock
);
write
_unlock
(
&
em_tree
->
lock
);
if
(
ret
)
if
(
ret
)
em
=
ERR_PTR
(
ret
);
em
=
ERR_PTR
(
ret
);
...
@@ -1325,9 +1325,9 @@ static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
...
@@ -1325,9 +1325,9 @@ static void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
offset
=
page_offset
(
page
);
offset
=
page_offset
(
page
);
em_tree
=
&
BTRFS_I
(
inode
)
->
extent_tree
;
em_tree
=
&
BTRFS_I
(
inode
)
->
extent_tree
;
spin
_lock
(
&
em_tree
->
lock
);
read
_lock
(
&
em_tree
->
lock
);
em
=
lookup_extent_mapping
(
em_tree
,
offset
,
PAGE_CACHE_SIZE
);
em
=
lookup_extent_mapping
(
em_tree
,
offset
,
PAGE_CACHE_SIZE
);
spin
_unlock
(
&
em_tree
->
lock
);
read
_unlock
(
&
em_tree
->
lock
);
if
(
!
em
)
{
if
(
!
em
)
{
__unplug_io_fn
(
bdi
,
page
);
__unplug_io_fn
(
bdi
,
page
);
return
;
return
;
...
@@ -1698,7 +1698,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
...
@@ -1698,7 +1698,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
err
=
-
EINVAL
;
err
=
-
EINVAL
;
goto
fail_iput
;
goto
fail_iput
;
}
}
printk
(
"thread pool is %d
\n
"
,
fs_info
->
thread_pool_size
);
/*
/*
* we need to start all the end_io workers up front because the
* we need to start all the end_io workers up front because the
* queue work function gets called at interrupt time, and so it
* queue work function gets called at interrupt time, and so it
...
@@ -1743,20 +1743,22 @@ struct btrfs_root *open_ctree(struct super_block *sb,
...
@@ -1743,20 +1743,22 @@ struct btrfs_root *open_ctree(struct super_block *sb,
fs_info
->
endio_workers
.
idle_thresh
=
4
;
fs_info
->
endio_workers
.
idle_thresh
=
4
;
fs_info
->
endio_meta_workers
.
idle_thresh
=
4
;
fs_info
->
endio_meta_workers
.
idle_thresh
=
4
;
fs_info
->
endio_write_workers
.
idle_thresh
=
64
;
fs_info
->
endio_write_workers
.
idle_thresh
=
2
;
fs_info
->
endio_meta_write_workers
.
idle_thresh
=
64
;
fs_info
->
endio_meta_write_workers
.
idle_thresh
=
2
;
fs_info
->
endio_workers
.
atomic_worker_start
=
1
;
fs_info
->
endio_meta_workers
.
atomic_worker_start
=
1
;
fs_info
->
endio_write_workers
.
atomic_worker_start
=
1
;
fs_info
->
endio_meta_write_workers
.
atomic_worker_start
=
1
;
btrfs_start_workers
(
&
fs_info
->
workers
,
1
);
btrfs_start_workers
(
&
fs_info
->
workers
,
1
);
btrfs_start_workers
(
&
fs_info
->
submit_workers
,
1
);
btrfs_start_workers
(
&
fs_info
->
submit_workers
,
1
);
btrfs_start_workers
(
&
fs_info
->
delalloc_workers
,
1
);
btrfs_start_workers
(
&
fs_info
->
delalloc_workers
,
1
);
btrfs_start_workers
(
&
fs_info
->
fixup_workers
,
1
);
btrfs_start_workers
(
&
fs_info
->
fixup_workers
,
1
);
btrfs_start_workers
(
&
fs_info
->
endio_workers
,
fs_info
->
thread_pool_size
);
btrfs_start_workers
(
&
fs_info
->
endio_workers
,
1
);
btrfs_start_workers
(
&
fs_info
->
endio_meta_workers
,
btrfs_start_workers
(
&
fs_info
->
endio_meta_workers
,
1
);
fs_info
->
thread_pool_size
);
btrfs_start_workers
(
&
fs_info
->
endio_meta_write_workers
,
1
);
btrfs_start_workers
(
&
fs_info
->
endio_meta_write_workers
,
btrfs_start_workers
(
&
fs_info
->
endio_write_workers
,
1
);
fs_info
->
thread_pool_size
);
btrfs_start_workers
(
&
fs_info
->
endio_write_workers
,
fs_info
->
thread_pool_size
);
fs_info
->
bdi
.
ra_pages
*=
btrfs_super_num_devices
(
disk_super
);
fs_info
->
bdi
.
ra_pages
*=
btrfs_super_num_devices
(
disk_super
);
fs_info
->
bdi
.
ra_pages
=
max
(
fs_info
->
bdi
.
ra_pages
,
fs_info
->
bdi
.
ra_pages
=
max
(
fs_info
->
bdi
.
ra_pages
,
...
...
fs/btrfs/extent-tree.c
View file @
83ebade3
...
@@ -5396,9 +5396,9 @@ static noinline int relocate_data_extent(struct inode *reloc_inode,
...
@@ -5396,9 +5396,9 @@ static noinline int relocate_data_extent(struct inode *reloc_inode,
lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
while (1) {
while (1) {
int ret;
int ret;
spin
_lock(&em_tree->lock);
write
_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
ret = add_extent_mapping(em_tree, em);
spin
_unlock(&em_tree->lock);
write
_unlock(&em_tree->lock);
if (ret != -EEXIST) {
if (ret != -EEXIST) {
free_extent_map(em);
free_extent_map(em);
break;
break;
...
...
fs/btrfs/extent_io.c
View file @
83ebade3
...
@@ -367,10 +367,10 @@ static int insert_state(struct extent_io_tree *tree,
...
@@ -367,10 +367,10 @@ static int insert_state(struct extent_io_tree *tree,
}
}
if
(
bits
&
EXTENT_DIRTY
)
if
(
bits
&
EXTENT_DIRTY
)
tree
->
dirty_bytes
+=
end
-
start
+
1
;
tree
->
dirty_bytes
+=
end
-
start
+
1
;
set_state_cb
(
tree
,
state
,
bits
);
state
->
state
|=
bits
;
state
->
start
=
start
;
state
->
start
=
start
;
state
->
end
=
end
;
state
->
end
=
end
;
set_state_cb
(
tree
,
state
,
bits
);
state
->
state
|=
bits
;
node
=
tree_insert
(
&
tree
->
state
,
end
,
&
state
->
rb_node
);
node
=
tree_insert
(
&
tree
->
state
,
end
,
&
state
->
rb_node
);
if
(
node
)
{
if
(
node
)
{
struct
extent_state
*
found
;
struct
extent_state
*
found
;
...
@@ -471,10 +471,14 @@ static int clear_state_bit(struct extent_io_tree *tree,
...
@@ -471,10 +471,14 @@ static int clear_state_bit(struct extent_io_tree *tree,
* bits were already set, or zero if none of the bits were already set.
* bits were already set, or zero if none of the bits were already set.
*/
*/
int
clear_extent_bit
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
clear_extent_bit
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
bits
,
int
wake
,
int
delete
,
gfp_t
mask
)
int
bits
,
int
wake
,
int
delete
,
struct
extent_state
**
cached_state
,
gfp_t
mask
)
{
{
struct
extent_state
*
state
;
struct
extent_state
*
state
;
struct
extent_state
*
cached
;
struct
extent_state
*
prealloc
=
NULL
;
struct
extent_state
*
prealloc
=
NULL
;
struct
rb_node
*
next_node
;
struct
rb_node
*
node
;
struct
rb_node
*
node
;
u64
last_end
;
u64
last_end
;
int
err
;
int
err
;
...
@@ -488,6 +492,17 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
...
@@ -488,6 +492,17 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
}
}
spin_lock
(
&
tree
->
lock
);
spin_lock
(
&
tree
->
lock
);
if
(
cached_state
)
{
cached
=
*
cached_state
;
*
cached_state
=
NULL
;
if
(
cached
->
tree
&&
cached
->
start
==
start
)
{
atomic_dec
(
&
cached
->
refs
);
state
=
cached
;
last_end
=
state
->
end
;
goto
found
;
}
free_extent_state
(
cached
);
}
/*
/*
* this search will find the extents that end after
* this search will find the extents that end after
* our range starts
* our range starts
...
@@ -496,6 +511,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
...
@@ -496,6 +511,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
if
(
!
node
)
if
(
!
node
)
goto
out
;
goto
out
;
state
=
rb_entry
(
node
,
struct
extent_state
,
rb_node
);
state
=
rb_entry
(
node
,
struct
extent_state
,
rb_node
);
hit_next:
if
(
state
->
start
>
end
)
if
(
state
->
start
>
end
)
goto
out
;
goto
out
;
WARN_ON
(
state
->
end
<
start
);
WARN_ON
(
state
->
end
<
start
);
...
@@ -555,11 +571,21 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
...
@@ -555,11 +571,21 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
prealloc
=
NULL
;
prealloc
=
NULL
;
goto
out
;
goto
out
;
}
}
found:
if
(
state
->
end
<
end
&&
prealloc
&&
!
need_resched
())
next_node
=
rb_next
(
&
state
->
rb_node
);
else
next_node
=
NULL
;
set
|=
clear_state_bit
(
tree
,
state
,
bits
,
wake
,
delete
);
set
|=
clear_state_bit
(
tree
,
state
,
bits
,
wake
,
delete
);
if
(
last_end
==
(
u64
)
-
1
)
if
(
last_end
==
(
u64
)
-
1
)
goto
out
;
goto
out
;
start
=
last_end
+
1
;
start
=
last_end
+
1
;
if
(
start
<=
end
&&
next_node
)
{
state
=
rb_entry
(
next_node
,
struct
extent_state
,
rb_node
);
if
(
state
->
start
==
start
)
goto
hit_next
;
}
goto
search_again
;
goto
search_again
;
out:
out:
...
@@ -653,26 +679,37 @@ static void set_state_bits(struct extent_io_tree *tree,
...
@@ -653,26 +679,37 @@ static void set_state_bits(struct extent_io_tree *tree,
state
->
state
|=
bits
;
state
->
state
|=
bits
;
}
}
static
void
cache_state
(
struct
extent_state
*
state
,
struct
extent_state
**
cached_ptr
)
{
if
(
cached_ptr
&&
!
(
*
cached_ptr
))
{
if
(
state
->
state
&
(
EXTENT_IOBITS
|
EXTENT_BOUNDARY
))
{
*
cached_ptr
=
state
;
atomic_inc
(
&
state
->
refs
);
}
}
}
/*
/*
* set some bits on a range in the tree. This may require allocations
* set some bits on a range in the tree. This may require allocations
or
*
or
sleeping, so the gfp mask is used to indicate what is allowed.
* sleeping, so the gfp mask is used to indicate what is allowed.
*
*
* If
'exclusive' == 1, this will fail with -EEXIST if some part of th
e
* If
any of the exclusive bits are set, this will fail with -EEXIST if som
e
*
range already has the desired bits set. The start of the existing
*
part of the range already has the desired bits set. The start of the
* range is returned in failed_start in this case.
*
existing
range is returned in failed_start in this case.
*
*
* [start, end] is inclusive
* [start, end] is inclusive This takes the tree lock.
* This takes the tree lock.
*/
*/
static
int
set_extent_bit
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
static
int
set_extent_bit
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
bits
,
int
exclusive
,
u64
*
failed_start
,
int
bits
,
int
exclusive_bits
,
u64
*
failed_start
,
struct
extent_state
**
cached_state
,
gfp_t
mask
)
gfp_t
mask
)
{
{
struct
extent_state
*
state
;
struct
extent_state
*
state
;
struct
extent_state
*
prealloc
=
NULL
;
struct
extent_state
*
prealloc
=
NULL
;
struct
rb_node
*
node
;
struct
rb_node
*
node
;
int
err
=
0
;
int
err
=
0
;
int
set
;
u64
last_start
;
u64
last_start
;
u64
last_end
;
u64
last_end
;
again:
again:
...
@@ -683,6 +720,13 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
...
@@ -683,6 +720,13 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
}
}
spin_lock
(
&
tree
->
lock
);
spin_lock
(
&
tree
->
lock
);
if
(
cached_state
&&
*
cached_state
)
{
state
=
*
cached_state
;
if
(
state
->
start
==
start
&&
state
->
tree
)
{
node
=
&
state
->
rb_node
;
goto
hit_next
;
}
}
/*
/*
* this search will find all the extents that end after
* this search will find all the extents that end after
* our range starts.
* our range starts.
...
@@ -694,8 +738,8 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
...
@@ -694,8 +738,8 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
BUG_ON
(
err
==
-
EEXIST
);
BUG_ON
(
err
==
-
EEXIST
);
goto
out
;
goto
out
;
}
}
state
=
rb_entry
(
node
,
struct
extent_state
,
rb_node
);
state
=
rb_entry
(
node
,
struct
extent_state
,
rb_node
);
hit_next:
last_start
=
state
->
start
;
last_start
=
state
->
start
;
last_end
=
state
->
end
;
last_end
=
state
->
end
;
...
@@ -706,17 +750,28 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
...
@@ -706,17 +750,28 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
* Just lock what we found and keep going
* Just lock what we found and keep going
*/
*/
if
(
state
->
start
==
start
&&
state
->
end
<=
end
)
{
if
(
state
->
start
==
start
&&
state
->
end
<=
end
)
{
s
et
=
state
->
state
&
bits
;
s
truct
rb_node
*
next_node
;
if
(
s
et
&&
exclusive
)
{
if
(
s
tate
->
state
&
exclusive_bits
)
{
*
failed_start
=
state
->
start
;
*
failed_start
=
state
->
start
;
err
=
-
EEXIST
;
err
=
-
EEXIST
;
goto
out
;
goto
out
;
}
}
set_state_bits
(
tree
,
state
,
bits
);
set_state_bits
(
tree
,
state
,
bits
);
cache_state
(
state
,
cached_state
);
merge_state
(
tree
,
state
);
merge_state
(
tree
,
state
);
if
(
last_end
==
(
u64
)
-
1
)
if
(
last_end
==
(
u64
)
-
1
)
goto
out
;
goto
out
;
start
=
last_end
+
1
;
start
=
last_end
+
1
;
if
(
start
<
end
&&
prealloc
&&
!
need_resched
())
{
next_node
=
rb_next
(
node
);
if
(
next_node
)
{
state
=
rb_entry
(
next_node
,
struct
extent_state
,
rb_node
);
if
(
state
->
start
==
start
)
goto
hit_next
;
}
}
goto
search_again
;
goto
search_again
;
}
}
...
@@ -737,8 +792,7 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
...
@@ -737,8 +792,7 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
* desired bit on it.
* desired bit on it.
*/
*/
if
(
state
->
start
<
start
)
{
if
(
state
->
start
<
start
)
{
set
=
state
->
state
&
bits
;
if
(
state
->
state
&
exclusive_bits
)
{
if
(
exclusive
&&
set
)
{
*
failed_start
=
start
;
*
failed_start
=
start
;
err
=
-
EEXIST
;
err
=
-
EEXIST
;
goto
out
;
goto
out
;
...
@@ -750,6 +804,7 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
...
@@ -750,6 +804,7 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
goto
out
;
goto
out
;
if
(
state
->
end
<=
end
)
{
if
(
state
->
end
<=
end
)
{
set_state_bits
(
tree
,
state
,
bits
);
set_state_bits
(
tree
,
state
,
bits
);
cache_state
(
state
,
cached_state
);
merge_state
(
tree
,
state
);
merge_state
(
tree
,
state
);
if
(
last_end
==
(
u64
)
-
1
)
if
(
last_end
==
(
u64
)
-
1
)
goto
out
;
goto
out
;
...
@@ -774,6 +829,7 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
...
@@ -774,6 +829,7 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
this_end
=
last_start
-
1
;
this_end
=
last_start
-
1
;
err
=
insert_state
(
tree
,
prealloc
,
start
,
this_end
,
err
=
insert_state
(
tree
,
prealloc
,
start
,
this_end
,
bits
);
bits
);
cache_state
(
prealloc
,
cached_state
);
prealloc
=
NULL
;
prealloc
=
NULL
;
BUG_ON
(
err
==
-
EEXIST
);
BUG_ON
(
err
==
-
EEXIST
);
if
(
err
)
if
(
err
)
...
@@ -788,8 +844,7 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
...
@@ -788,8 +844,7 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
* on the first half
* on the first half
*/
*/
if
(
state
->
start
<=
end
&&
state
->
end
>
end
)
{
if
(
state
->
start
<=
end
&&
state
->
end
>
end
)
{
set
=
state
->
state
&
bits
;
if
(
state
->
state
&
exclusive_bits
)
{
if
(
exclusive
&&
set
)
{
*
failed_start
=
start
;
*
failed_start
=
start
;
err
=
-
EEXIST
;
err
=
-
EEXIST
;
goto
out
;
goto
out
;
...
@@ -798,6 +853,7 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
...
@@ -798,6 +853,7 @@ static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
BUG_ON
(
err
==
-
EEXIST
);
BUG_ON
(
err
==
-
EEXIST
);
set_state_bits
(
tree
,
prealloc
,
bits
);
set_state_bits
(
tree
,
prealloc
,
bits
);
cache_state
(
prealloc
,
cached_state
);
merge_state
(
tree
,
prealloc
);
merge_state
(
tree
,
prealloc
);
prealloc
=
NULL
;
prealloc
=
NULL
;
goto
out
;
goto
out
;
...
@@ -826,86 +882,64 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
...
@@ -826,86 +882,64 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t
mask
)
gfp_t
mask
)
{
{
return
set_extent_bit
(
tree
,
start
,
end
,
EXTENT_DIRTY
,
0
,
NULL
,
return
set_extent_bit
(
tree
,
start
,
end
,
EXTENT_DIRTY
,
0
,
NULL
,
mask
);
NULL
,
mask
);
}
int
set_extent_ordered
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
)
{
return
set_extent_bit
(
tree
,
start
,
end
,
EXTENT_ORDERED
,
0
,
NULL
,
mask
);
}
}
int
set_extent_bits
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
set_extent_bits
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
bits
,
gfp_t
mask
)
int
bits
,
gfp_t
mask
)
{
{
return
set_extent_bit
(
tree
,
start
,
end
,
bits
,
0
,
NULL
,
return
set_extent_bit
(
tree
,
start
,
end
,
bits
,
0
,
NULL
,
mask
);
NULL
,
mask
);
}
}
int
clear_extent_bits
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
clear_extent_bits
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
bits
,
gfp_t
mask
)
int
bits
,
gfp_t
mask
)
{
{
return
clear_extent_bit
(
tree
,
start
,
end
,
bits
,
0
,
0
,
mask
);
return
clear_extent_bit
(
tree
,
start
,
end
,
bits
,
0
,
0
,
NULL
,
mask
);
}
}
int
set_extent_delalloc
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
set_extent_delalloc
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
)
gfp_t
mask
)
{
{
return
set_extent_bit
(
tree
,
start
,
end
,
return
set_extent_bit
(
tree
,
start
,
end
,
EXTENT_DELALLOC
|
EXTENT_DIRTY
,
EXTENT_DELALLOC
|
EXTENT_DIRTY
|
EXTENT_UPTODATE
,
0
,
NULL
,
mask
);
0
,
NULL
,
NULL
,
mask
);
}
}
int
clear_extent_dirty
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
clear_extent_dirty
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
)
gfp_t
mask
)
{
{
return
clear_extent_bit
(
tree
,
start
,
end
,
return
clear_extent_bit
(
tree
,
start
,
end
,
EXTENT_DIRTY
|
EXTENT_DELALLOC
,
0
,
0
,
mask
);
EXTENT_DIRTY
|
EXTENT_DELALLOC
,
0
,
0
,
}
NULL
,
mask
);
int
clear_extent_ordered
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
)
{
return
clear_extent_bit
(
tree
,
start
,
end
,
EXTENT_ORDERED
,
1
,
0
,
mask
);
}
}
int
set_extent_new
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
set_extent_new
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
)
gfp_t
mask
)
{
{
return
set_extent_bit
(
tree
,
start
,
end
,
EXTENT_NEW
,
0
,
NULL
,
return
set_extent_bit
(
tree
,
start
,
end
,
EXTENT_NEW
,
0
,
NULL
,
mask
);
NULL
,
mask
);
}
}
static
int
clear_extent_new
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
static
int
clear_extent_new
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
)
gfp_t
mask
)
{
{
return
clear_extent_bit
(
tree
,
start
,
end
,
EXTENT_NEW
,
0
,
0
,
mask
);
return
clear_extent_bit
(
tree
,
start
,
end
,
EXTENT_NEW
,
0
,
0
,
NULL
,
mask
);
}
}
int
set_extent_uptodate
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
set_extent_uptodate
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
)
gfp_t
mask
)
{
{
return
set_extent_bit
(
tree
,
start
,
end
,
EXTENT_UPTODATE
,
0
,
NULL
,
return
set_extent_bit
(
tree
,
start
,
end
,
EXTENT_UPTODATE
,
0
,
NULL
,
mask
);
NULL
,
mask
);
}
}
static
int
clear_extent_uptodate
(
struct
extent_io_tree
*
tree
,
u64
start
,
static
int
clear_extent_uptodate
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
)
u64
end
,
gfp_t
mask
)
{
{
return
clear_extent_bit
(
tree
,
start
,
end
,
EXTENT_UPTODATE
,
0
,
0
,
mask
);
return
clear_extent_bit
(
tree
,
start
,
end
,
EXTENT_UPTODATE
,
0
,
0
,
}
NULL
,
mask
);
static
int
set_extent_writeback
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
)
{
return
set_extent_bit
(
tree
,
start
,
end
,
EXTENT_WRITEBACK
,
0
,
NULL
,
mask
);
}
static
int
clear_extent_writeback
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
)
{
return
clear_extent_bit
(
tree
,
start
,
end
,
EXTENT_WRITEBACK
,
1
,
0
,
mask
);
}
}
int
wait_on_extent_writeback
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
)
int
wait_on_extent_writeback
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
)
...
@@ -917,13 +951,15 @@ int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
...
@@ -917,13 +951,15 @@ int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
* either insert or lock state struct between start and end use mask to tell
* either insert or lock state struct between start and end use mask to tell
* us if waiting is desired.
* us if waiting is desired.
*/
*/
int
lock_extent
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
)
int
lock_extent_bits
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
bits
,
struct
extent_state
**
cached_state
,
gfp_t
mask
)
{
{
int
err
;
int
err
;
u64
failed_start
;
u64
failed_start
;
while
(
1
)
{
while
(
1
)
{
err
=
set_extent_bit
(
tree
,
start
,
end
,
EXTENT_LOCKED
,
1
,
err
=
set_extent_bit
(
tree
,
start
,
end
,
EXTENT_LOCKED
|
bits
,
&
failed_start
,
mask
);
EXTENT_LOCKED
,
&
failed_start
,
cached_state
,
mask
);
if
(
err
==
-
EEXIST
&&
(
mask
&
__GFP_WAIT
))
{
if
(
err
==
-
EEXIST
&&
(
mask
&
__GFP_WAIT
))
{
wait_extent_bit
(
tree
,
failed_start
,
end
,
EXTENT_LOCKED
);
wait_extent_bit
(
tree
,
failed_start
,
end
,
EXTENT_LOCKED
);
start
=
failed_start
;
start
=
failed_start
;
...
@@ -935,27 +971,40 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
...
@@ -935,27 +971,40 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
return
err
;
return
err
;
}
}
int
lock_extent
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
)
{
return
lock_extent_bits
(
tree
,
start
,
end
,
0
,
NULL
,
mask
);
}
int
try_lock_extent
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
try_lock_extent
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
)
gfp_t
mask
)
{
{
int
err
;
int
err
;
u64
failed_start
;
u64
failed_start
;
err
=
set_extent_bit
(
tree
,
start
,
end
,
EXTENT_LOCKED
,
1
,
err
=
set_extent_bit
(
tree
,
start
,
end
,
EXTENT_LOCKED
,
EXTENT_LOCKED
,
&
failed_start
,
mask
);
&
failed_start
,
NULL
,
mask
);
if
(
err
==
-
EEXIST
)
{
if
(
err
==
-
EEXIST
)
{
if
(
failed_start
>
start
)
if
(
failed_start
>
start
)
clear_extent_bit
(
tree
,
start
,
failed_start
-
1
,
clear_extent_bit
(
tree
,
start
,
failed_start
-
1
,
EXTENT_LOCKED
,
1
,
0
,
mask
);
EXTENT_LOCKED
,
1
,
0
,
NULL
,
mask
);
return
0
;
return
0
;
}
}
return
1
;
return
1
;
}
}
int
unlock_extent_cached
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
struct
extent_state
**
cached
,
gfp_t
mask
)
{
return
clear_extent_bit
(
tree
,
start
,
end
,
EXTENT_LOCKED
,
1
,
0
,
cached
,
mask
);
}
int
unlock_extent
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
unlock_extent
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
)
gfp_t
mask
)
{
{
return
clear_extent_bit
(
tree
,
start
,
end
,
EXTENT_LOCKED
,
1
,
0
,
mask
);
return
clear_extent_bit
(
tree
,
start
,
end
,
EXTENT_LOCKED
,
1
,
0
,
NULL
,
mask
);
}
}
/*
/*
...
@@ -974,7 +1023,6 @@ int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
...
@@ -974,7 +1023,6 @@ int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
page_cache_release
(
page
);
page_cache_release
(
page
);
index
++
;
index
++
;
}
}
set_extent_dirty
(
tree
,
start
,
end
,
GFP_NOFS
);
return
0
;
return
0
;
}
}
...
@@ -994,7 +1042,6 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
...
@@ -994,7 +1042,6 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
page_cache_release
(
page
);
page_cache_release
(
page
);
index
++
;
index
++
;
}
}
set_extent_writeback
(
tree
,
start
,
end
,
GFP_NOFS
);
return
0
;
return
0
;
}
}
...
@@ -1232,6 +1279,7 @@ static noinline u64 find_lock_delalloc_range(struct inode *inode,
...
@@ -1232,6 +1279,7 @@ static noinline u64 find_lock_delalloc_range(struct inode *inode,
u64
delalloc_start
;
u64
delalloc_start
;
u64
delalloc_end
;
u64
delalloc_end
;
u64
found
;
u64
found
;
struct
extent_state
*
cached_state
=
NULL
;
int
ret
;
int
ret
;
int
loops
=
0
;
int
loops
=
0
;
...
@@ -1269,6 +1317,7 @@ static noinline u64 find_lock_delalloc_range(struct inode *inode,
...
@@ -1269,6 +1317,7 @@ static noinline u64 find_lock_delalloc_range(struct inode *inode,
/* some of the pages are gone, lets avoid looping by
/* some of the pages are gone, lets avoid looping by
* shortening the size of the delalloc range we're searching
* shortening the size of the delalloc range we're searching
*/
*/
free_extent_state
(
cached_state
);
if
(
!
loops
)
{
if
(
!
loops
)
{
unsigned
long
offset
=
(
*
start
)
&
(
PAGE_CACHE_SIZE
-
1
);
unsigned
long
offset
=
(
*
start
)
&
(
PAGE_CACHE_SIZE
-
1
);
max_bytes
=
PAGE_CACHE_SIZE
-
offset
;
max_bytes
=
PAGE_CACHE_SIZE
-
offset
;
...
@@ -1282,18 +1331,21 @@ static noinline u64 find_lock_delalloc_range(struct inode *inode,
...
@@ -1282,18 +1331,21 @@ static noinline u64 find_lock_delalloc_range(struct inode *inode,
BUG_ON
(
ret
);
BUG_ON
(
ret
);
/* step three, lock the state bits for the whole range */
/* step three, lock the state bits for the whole range */
lock_extent
(
tree
,
delalloc_start
,
delalloc_end
,
GFP_NOFS
);
lock_extent_bits
(
tree
,
delalloc_start
,
delalloc_end
,
0
,
&
cached_state
,
GFP_NOFS
);
/* then test to make sure it is all still delalloc */
/* then test to make sure it is all still delalloc */
ret
=
test_range_bit
(
tree
,
delalloc_start
,
delalloc_end
,
ret
=
test_range_bit
(
tree
,
delalloc_start
,
delalloc_end
,
EXTENT_DELALLOC
,
1
);
EXTENT_DELALLOC
,
1
,
cached_state
);
if
(
!
ret
)
{
if
(
!
ret
)
{
unlock_extent
(
tree
,
delalloc_start
,
delalloc_end
,
GFP_NOFS
);
unlock_extent_cached
(
tree
,
delalloc_start
,
delalloc_end
,
&
cached_state
,
GFP_NOFS
);
__unlock_for_delalloc
(
inode
,
locked_page
,
__unlock_for_delalloc
(
inode
,
locked_page
,
delalloc_start
,
delalloc_end
);
delalloc_start
,
delalloc_end
);
cond_resched
();
cond_resched
();
goto
again
;
goto
again
;
}
}
free_extent_state
(
cached_state
);
*
start
=
delalloc_start
;
*
start
=
delalloc_start
;
*
end
=
delalloc_end
;
*
end
=
delalloc_end
;
out_failed:
out_failed:
...
@@ -1307,7 +1359,8 @@ int extent_clear_unlock_delalloc(struct inode *inode,
...
@@ -1307,7 +1359,8 @@ int extent_clear_unlock_delalloc(struct inode *inode,
int
clear_unlock
,
int
clear_unlock
,
int
clear_delalloc
,
int
clear_dirty
,
int
clear_delalloc
,
int
clear_dirty
,
int
set_writeback
,
int
set_writeback
,
int
end_writeback
)
int
end_writeback
,
int
set_private2
)
{
{
int
ret
;
int
ret
;
struct
page
*
pages
[
16
];
struct
page
*
pages
[
16
];
...
@@ -1325,8 +1378,9 @@ int extent_clear_unlock_delalloc(struct inode *inode,
...
@@ -1325,8 +1378,9 @@ int extent_clear_unlock_delalloc(struct inode *inode,
if
(
clear_delalloc
)
if
(
clear_delalloc
)
clear_bits
|=
EXTENT_DELALLOC
;
clear_bits
|=
EXTENT_DELALLOC
;
clear_extent_bit
(
tree
,
start
,
end
,
clear_bits
,
1
,
0
,
GFP_NOFS
);
clear_extent_bit
(
tree
,
start
,
end
,
clear_bits
,
1
,
0
,
NULL
,
GFP_NOFS
);
if
(
!
(
unlock_pages
||
clear_dirty
||
set_writeback
||
end_writeback
))
if
(
!
(
unlock_pages
||
clear_dirty
||
set_writeback
||
end_writeback
||
set_private2
))
return
0
;
return
0
;
while
(
nr_pages
>
0
)
{
while
(
nr_pages
>
0
)
{
...
@@ -1334,6 +1388,10 @@ int extent_clear_unlock_delalloc(struct inode *inode,
...
@@ -1334,6 +1388,10 @@ int extent_clear_unlock_delalloc(struct inode *inode,
min_t
(
unsigned
long
,
min_t
(
unsigned
long
,
nr_pages
,
ARRAY_SIZE
(
pages
)),
pages
);
nr_pages
,
ARRAY_SIZE
(
pages
)),
pages
);
for
(
i
=
0
;
i
<
ret
;
i
++
)
{
for
(
i
=
0
;
i
<
ret
;
i
++
)
{
if
(
set_private2
)
SetPagePrivate2
(
pages
[
i
]);
if
(
pages
[
i
]
==
locked_page
)
{
if
(
pages
[
i
]
==
locked_page
)
{
page_cache_release
(
pages
[
i
]);
page_cache_release
(
pages
[
i
]);
continue
;
continue
;
...
@@ -1476,13 +1534,16 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
...
@@ -1476,13 +1534,16 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
* range is found set.
* range is found set.
*/
*/
int
test_range_bit
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
test_range_bit
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
bits
,
int
filled
)
int
bits
,
int
filled
,
struct
extent_state
*
cached
)
{
{
struct
extent_state
*
state
=
NULL
;
struct
extent_state
*
state
=
NULL
;
struct
rb_node
*
node
;
struct
rb_node
*
node
;
int
bitset
=
0
;
int
bitset
=
0
;
spin_lock
(
&
tree
->
lock
);
spin_lock
(
&
tree
->
lock
);
if
(
cached
&&
cached
->
tree
&&
cached
->
start
==
start
)
node
=
&
cached
->
rb_node
;
else
node
=
tree_search
(
tree
,
start
);
node
=
tree_search
(
tree
,
start
);
while
(
node
&&
start
<=
end
)
{
while
(
node
&&
start
<=
end
)
{
state
=
rb_entry
(
node
,
struct
extent_state
,
rb_node
);
state
=
rb_entry
(
node
,
struct
extent_state
,
rb_node
);
...
@@ -1526,7 +1587,7 @@ static int check_page_uptodate(struct extent_io_tree *tree,
...
@@ -1526,7 +1587,7 @@ static int check_page_uptodate(struct extent_io_tree *tree,
{
{
u64
start
=
(
u64
)
page
->
index
<<
PAGE_CACHE_SHIFT
;
u64
start
=
(
u64
)
page
->
index
<<
PAGE_CACHE_SHIFT
;
u64
end
=
start
+
PAGE_CACHE_SIZE
-
1
;
u64
end
=
start
+
PAGE_CACHE_SIZE
-
1
;
if
(
test_range_bit
(
tree
,
start
,
end
,
EXTENT_UPTODATE
,
1
))
if
(
test_range_bit
(
tree
,
start
,
end
,
EXTENT_UPTODATE
,
1
,
NULL
))
SetPageUptodate
(
page
);
SetPageUptodate
(
page
);
return
0
;
return
0
;
}
}
...
@@ -1540,7 +1601,7 @@ static int check_page_locked(struct extent_io_tree *tree,
...
@@ -1540,7 +1601,7 @@ static int check_page_locked(struct extent_io_tree *tree,
{
{
u64
start
=
(
u64
)
page
->
index
<<
PAGE_CACHE_SHIFT
;
u64
start
=
(
u64
)
page
->
index
<<
PAGE_CACHE_SHIFT
;
u64
end
=
start
+
PAGE_CACHE_SIZE
-
1
;
u64
end
=
start
+
PAGE_CACHE_SIZE
-
1
;
if
(
!
test_range_bit
(
tree
,
start
,
end
,
EXTENT_LOCKED
,
0
))
if
(
!
test_range_bit
(
tree
,
start
,
end
,
EXTENT_LOCKED
,
0
,
NULL
))
unlock_page
(
page
);
unlock_page
(
page
);
return
0
;
return
0
;
}
}
...
@@ -1552,9 +1613,6 @@ static int check_page_locked(struct extent_io_tree *tree,
...
@@ -1552,9 +1613,6 @@ static int check_page_locked(struct extent_io_tree *tree,
static
int
check_page_writeback
(
struct
extent_io_tree
*
tree
,
static
int
check_page_writeback
(
struct
extent_io_tree
*
tree
,
struct
page
*
page
)
struct
page
*
page
)
{
{
u64
start
=
(
u64
)
page
->
index
<<
PAGE_CACHE_SHIFT
;
u64
end
=
start
+
PAGE_CACHE_SIZE
-
1
;
if
(
!
test_range_bit
(
tree
,
start
,
end
,
EXTENT_WRITEBACK
,
0
))
end_page_writeback
(
page
);
end_page_writeback
(
page
);
return
0
;
return
0
;
}
}
...
@@ -1613,13 +1671,11 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
...
@@ -1613,13 +1671,11 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
}
}
if
(
!
uptodate
)
{
if
(
!
uptodate
)
{
clear_extent_uptodate
(
tree
,
start
,
end
,
GFP_
ATOMIC
);
clear_extent_uptodate
(
tree
,
start
,
end
,
GFP_
NOFS
);
ClearPageUptodate
(
page
);
ClearPageUptodate
(
page
);
SetPageError
(
page
);
SetPageError
(
page
);
}
}
clear_extent_writeback
(
tree
,
start
,
end
,
GFP_ATOMIC
);
if
(
whole_page
)
if
(
whole_page
)
end_page_writeback
(
page
);
end_page_writeback
(
page
);
else
else
...
@@ -1983,7 +2039,8 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
...
@@ -1983,7 +2039,8 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
continue
;
continue
;
}
}
/* the get_extent function already copied into the page */
/* the get_extent function already copied into the page */
if
(
test_range_bit
(
tree
,
cur
,
cur_end
,
EXTENT_UPTODATE
,
1
))
{
if
(
test_range_bit
(
tree
,
cur
,
cur_end
,
EXTENT_UPTODATE
,
1
,
NULL
))
{
check_page_uptodate
(
tree
,
page
);
check_page_uptodate
(
tree
,
page
);
unlock_extent
(
tree
,
cur
,
cur
+
iosize
-
1
,
GFP_NOFS
);
unlock_extent
(
tree
,
cur
,
cur
+
iosize
-
1
,
GFP_NOFS
);
cur
=
cur
+
iosize
;
cur
=
cur
+
iosize
;
...
@@ -2078,6 +2135,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
...
@@ -2078,6 +2135,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
u64
iosize
;
u64
iosize
;
u64
unlock_start
;
u64
unlock_start
;
sector_t
sector
;
sector_t
sector
;
struct
extent_state
*
cached_state
=
NULL
;
struct
extent_map
*
em
;
struct
extent_map
*
em
;
struct
block_device
*
bdev
;
struct
block_device
*
bdev
;
int
ret
;
int
ret
;
...
@@ -2124,6 +2182,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
...
@@ -2124,6 +2182,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
delalloc_end
=
0
;
delalloc_end
=
0
;
page_started
=
0
;
page_started
=
0
;
if
(
!
epd
->
extent_locked
)
{
if
(
!
epd
->
extent_locked
)
{
u64
delalloc_to_write
;
/*
/*
* make sure the wbc mapping index is at least updated
* make sure the wbc mapping index is at least updated
* to this page.
* to this page.
...
@@ -2143,6 +2202,14 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
...
@@ -2143,6 +2202,14 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
tree
->
ops
->
fill_delalloc
(
inode
,
page
,
delalloc_start
,
tree
->
ops
->
fill_delalloc
(
inode
,
page
,
delalloc_start
,
delalloc_end
,
&
page_started
,
delalloc_end
,
&
page_started
,
&
nr_written
);
&
nr_written
);
delalloc_to_write
=
(
delalloc_end
-
max_t
(
u64
,
page_offset
(
page
),
delalloc_start
)
+
1
)
>>
PAGE_CACHE_SHIFT
;
if
(
wbc
->
nr_to_write
<
delalloc_to_write
)
{
wbc
->
nr_to_write
=
min_t
(
long
,
8192
,
delalloc_to_write
);
}
delalloc_start
=
delalloc_end
+
1
;
delalloc_start
=
delalloc_end
+
1
;
}
}
...
@@ -2160,15 +2227,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
...
@@ -2160,15 +2227,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
goto
done_unlocked
;
goto
done_unlocked
;
}
}
}
}
lock_extent
(
tree
,
start
,
page_end
,
GFP_NOFS
);
unlock_start
=
start
;
if
(
tree
->
ops
&&
tree
->
ops
->
writepage_start_hook
)
{
if
(
tree
->
ops
&&
tree
->
ops
->
writepage_start_hook
)
{
ret
=
tree
->
ops
->
writepage_start_hook
(
page
,
start
,
ret
=
tree
->
ops
->
writepage_start_hook
(
page
,
start
,
page_end
);
page_end
);
if
(
ret
==
-
EAGAIN
)
{
if
(
ret
==
-
EAGAIN
)
{
unlock_extent
(
tree
,
start
,
page_end
,
GFP_NOFS
);
redirty_page_for_writepage
(
wbc
,
page
);
redirty_page_for_writepage
(
wbc
,
page
);
update_nr_written
(
page
,
wbc
,
nr_written
);
update_nr_written
(
page
,
wbc
,
nr_written
);
unlock_page
(
page
);
unlock_page
(
page
);
...
@@ -2184,12 +2246,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
...
@@ -2184,12 +2246,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
update_nr_written
(
page
,
wbc
,
nr_written
+
1
);
update_nr_written
(
page
,
wbc
,
nr_written
+
1
);
end
=
page_end
;
end
=
page_end
;
if
(
test_range_bit
(
tree
,
start
,
page_end
,
EXTENT_DELALLOC
,
0
))
printk
(
KERN_ERR
"btrfs delalloc bits after lock_extent
\n
"
);
if
(
last_byte
<=
start
)
{
if
(
last_byte
<=
start
)
{
clear_extent_dirty
(
tree
,
start
,
page_end
,
GFP_NOFS
);
unlock_extent
(
tree
,
start
,
page_end
,
GFP_NOFS
);
if
(
tree
->
ops
&&
tree
->
ops
->
writepage_end_io_hook
)
if
(
tree
->
ops
&&
tree
->
ops
->
writepage_end_io_hook
)
tree
->
ops
->
writepage_end_io_hook
(
page
,
start
,
tree
->
ops
->
writepage_end_io_hook
(
page
,
start
,
page_end
,
NULL
,
1
);
page_end
,
NULL
,
1
);
...
@@ -2197,13 +2254,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
...
@@ -2197,13 +2254,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
goto
done
;
goto
done
;
}
}
set_extent_uptodate
(
tree
,
start
,
page_end
,
GFP_NOFS
);
blocksize
=
inode
->
i_sb
->
s_blocksize
;
blocksize
=
inode
->
i_sb
->
s_blocksize
;
while
(
cur
<=
end
)
{
while
(
cur
<=
end
)
{
if
(
cur
>=
last_byte
)
{
if
(
cur
>=
last_byte
)
{
clear_extent_dirty
(
tree
,
cur
,
page_end
,
GFP_NOFS
);
unlock_extent
(
tree
,
unlock_start
,
page_end
,
GFP_NOFS
);
if
(
tree
->
ops
&&
tree
->
ops
->
writepage_end_io_hook
)
if
(
tree
->
ops
&&
tree
->
ops
->
writepage_end_io_hook
)
tree
->
ops
->
writepage_end_io_hook
(
page
,
cur
,
tree
->
ops
->
writepage_end_io_hook
(
page
,
cur
,
page_end
,
NULL
,
1
);
page_end
,
NULL
,
1
);
...
@@ -2235,12 +2289,6 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
...
@@ -2235,12 +2289,6 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
*/
*/
if
(
compressed
||
block_start
==
EXTENT_MAP_HOLE
||
if
(
compressed
||
block_start
==
EXTENT_MAP_HOLE
||
block_start
==
EXTENT_MAP_INLINE
)
{
block_start
==
EXTENT_MAP_INLINE
)
{
clear_extent_dirty
(
tree
,
cur
,
cur
+
iosize
-
1
,
GFP_NOFS
);
unlock_extent
(
tree
,
unlock_start
,
cur
+
iosize
-
1
,
GFP_NOFS
);
/*
/*
* end_io notification does not happen here for
* end_io notification does not happen here for
* compressed extents
* compressed extents
...
@@ -2265,13 +2313,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
...
@@ -2265,13 +2313,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
}
}
/* leave this out until we have a page_mkwrite call */
/* leave this out until we have a page_mkwrite call */
if
(
0
&&
!
test_range_bit
(
tree
,
cur
,
cur
+
iosize
-
1
,
if
(
0
&&
!
test_range_bit
(
tree
,
cur
,
cur
+
iosize
-
1
,
EXTENT_DIRTY
,
0
))
{
EXTENT_DIRTY
,
0
,
NULL
))
{
cur
=
cur
+
iosize
;
cur
=
cur
+
iosize
;
pg_offset
+=
iosize
;
pg_offset
+=
iosize
;
continue
;
continue
;
}
}
clear_extent_dirty
(
tree
,
cur
,
cur
+
iosize
-
1
,
GFP_NOFS
);
if
(
tree
->
ops
&&
tree
->
ops
->
writepage_io_hook
)
{
if
(
tree
->
ops
&&
tree
->
ops
->
writepage_io_hook
)
{
ret
=
tree
->
ops
->
writepage_io_hook
(
page
,
cur
,
ret
=
tree
->
ops
->
writepage_io_hook
(
page
,
cur
,
cur
+
iosize
-
1
);
cur
+
iosize
-
1
);
...
@@ -2309,12 +2356,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
...
@@ -2309,12 +2356,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
set_page_writeback
(
page
);
set_page_writeback
(
page
);
end_page_writeback
(
page
);
end_page_writeback
(
page
);
}
}
if
(
unlock_start
<=
page_end
)
unlock_extent
(
tree
,
unlock_start
,
page_end
,
GFP_NOFS
);
unlock_page
(
page
);
unlock_page
(
page
);
done_unlocked:
done_unlocked:
/* drop our reference on any cached states */
free_extent_state
(
cached_state
);
return
0
;
return
0
;
}
}
...
@@ -2339,7 +2386,6 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
...
@@ -2339,7 +2386,6 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
writepage_t
writepage
,
void
*
data
,
writepage_t
writepage
,
void
*
data
,
void
(
*
flush_fn
)(
void
*
))
void
(
*
flush_fn
)(
void
*
))
{
{
struct
backing_dev_info
*
bdi
=
mapping
->
backing_dev_info
;
int
ret
=
0
;
int
ret
=
0
;
int
done
=
0
;
int
done
=
0
;
struct
pagevec
pvec
;
struct
pagevec
pvec
;
...
@@ -2414,10 +2460,6 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
...
@@ -2414,10 +2460,6 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
}
}
if
(
ret
||
wbc
->
nr_to_write
<=
0
)
if
(
ret
||
wbc
->
nr_to_write
<=
0
)
done
=
1
;
done
=
1
;
if
(
wbc
->
nonblocking
&&
bdi_write_congested
(
bdi
))
{
wbc
->
encountered_congestion
=
1
;
done
=
1
;
}
}
}
pagevec_release
(
&
pvec
);
pagevec_release
(
&
pvec
);
cond_resched
();
cond_resched
();
...
@@ -2604,10 +2646,10 @@ int extent_invalidatepage(struct extent_io_tree *tree,
...
@@ -2604,10 +2646,10 @@ int extent_invalidatepage(struct extent_io_tree *tree,
return
0
;
return
0
;
lock_extent
(
tree
,
start
,
end
,
GFP_NOFS
);
lock_extent
(
tree
,
start
,
end
,
GFP_NOFS
);
wait_on_
extent_writeback
(
tree
,
start
,
end
);
wait_on_
page_writeback
(
page
);
clear_extent_bit
(
tree
,
start
,
end
,
clear_extent_bit
(
tree
,
start
,
end
,
EXTENT_LOCKED
|
EXTENT_DIRTY
|
EXTENT_DELALLOC
,
EXTENT_LOCKED
|
EXTENT_DIRTY
|
EXTENT_DELALLOC
,
1
,
1
,
GFP_NOFS
);
1
,
1
,
NULL
,
GFP_NOFS
);
return
0
;
return
0
;
}
}
...
@@ -2687,7 +2729,7 @@ int extent_prepare_write(struct extent_io_tree *tree,
...
@@ -2687,7 +2729,7 @@ int extent_prepare_write(struct extent_io_tree *tree,
!
isnew
&&
!
PageUptodate
(
page
)
&&
!
isnew
&&
!
PageUptodate
(
page
)
&&
(
block_off_end
>
to
||
block_off_start
<
from
)
&&
(
block_off_end
>
to
||
block_off_start
<
from
)
&&
!
test_range_bit
(
tree
,
block_start
,
cur_end
,
!
test_range_bit
(
tree
,
block_start
,
cur_end
,
EXTENT_UPTODATE
,
1
))
{
EXTENT_UPTODATE
,
1
,
NULL
))
{
u64
sector
;
u64
sector
;
u64
extent_offset
=
block_start
-
em
->
start
;
u64
extent_offset
=
block_start
-
em
->
start
;
size_t
iosize
;
size_t
iosize
;
...
@@ -2701,7 +2743,7 @@ int extent_prepare_write(struct extent_io_tree *tree,
...
@@ -2701,7 +2743,7 @@ int extent_prepare_write(struct extent_io_tree *tree,
*/
*/
set_extent_bit
(
tree
,
block_start
,
set_extent_bit
(
tree
,
block_start
,
block_start
+
iosize
-
1
,
block_start
+
iosize
-
1
,
EXTENT_LOCKED
,
0
,
NULL
,
GFP_NOFS
);
EXTENT_LOCKED
,
0
,
NULL
,
NULL
,
GFP_NOFS
);
ret
=
submit_extent_page
(
READ
,
tree
,
page
,
ret
=
submit_extent_page
(
READ
,
tree
,
page
,
sector
,
iosize
,
page_offset
,
em
->
bdev
,
sector
,
iosize
,
page_offset
,
em
->
bdev
,
NULL
,
1
,
NULL
,
1
,
...
@@ -2742,13 +2784,13 @@ int try_release_extent_state(struct extent_map_tree *map,
...
@@ -2742,13 +2784,13 @@ int try_release_extent_state(struct extent_map_tree *map,
int
ret
=
1
;
int
ret
=
1
;
if
(
test_range_bit
(
tree
,
start
,
end
,
if
(
test_range_bit
(
tree
,
start
,
end
,
EXTENT_IOBITS
|
EXTENT_ORDERED
,
0
))
EXTENT_IOBITS
,
0
,
NULL
))
ret
=
0
;
ret
=
0
;
else
{
else
{
if
((
mask
&
GFP_NOFS
)
==
GFP_NOFS
)
if
((
mask
&
GFP_NOFS
)
==
GFP_NOFS
)
mask
=
GFP_NOFS
;
mask
=
GFP_NOFS
;
clear_extent_bit
(
tree
,
start
,
end
,
EXTENT_UPTODATE
,
clear_extent_bit
(
tree
,
start
,
end
,
EXTENT_UPTODATE
,
1
,
1
,
mask
);
1
,
1
,
NULL
,
mask
);
}
}
return
ret
;
return
ret
;
}
}
...
@@ -2771,29 +2813,28 @@ int try_release_extent_mapping(struct extent_map_tree *map,
...
@@ -2771,29 +2813,28 @@ int try_release_extent_mapping(struct extent_map_tree *map,
u64
len
;
u64
len
;
while
(
start
<=
end
)
{
while
(
start
<=
end
)
{
len
=
end
-
start
+
1
;
len
=
end
-
start
+
1
;
spin
_lock
(
&
map
->
lock
);
write
_lock
(
&
map
->
lock
);
em
=
lookup_extent_mapping
(
map
,
start
,
len
);
em
=
lookup_extent_mapping
(
map
,
start
,
len
);
if
(
!
em
||
IS_ERR
(
em
))
{
if
(
!
em
||
IS_ERR
(
em
))
{
spin
_unlock
(
&
map
->
lock
);
write
_unlock
(
&
map
->
lock
);
break
;
break
;
}
}
if
(
test_bit
(
EXTENT_FLAG_PINNED
,
&
em
->
flags
)
||
if
(
test_bit
(
EXTENT_FLAG_PINNED
,
&
em
->
flags
)
||
em
->
start
!=
start
)
{
em
->
start
!=
start
)
{
spin
_unlock
(
&
map
->
lock
);
write
_unlock
(
&
map
->
lock
);
free_extent_map
(
em
);
free_extent_map
(
em
);
break
;
break
;
}
}
if
(
!
test_range_bit
(
tree
,
em
->
start
,
if
(
!
test_range_bit
(
tree
,
em
->
start
,
extent_map_end
(
em
)
-
1
,
extent_map_end
(
em
)
-
1
,
EXTENT_LOCKED
|
EXTENT_WRITEBACK
|
EXTENT_LOCKED
|
EXTENT_WRITEBACK
,
EXTENT_ORDERED
,
0
,
NULL
))
{
0
))
{
remove_extent_mapping
(
map
,
em
);
remove_extent_mapping
(
map
,
em
);
/* once for the rb tree */
/* once for the rb tree */
free_extent_map
(
em
);
free_extent_map
(
em
);
}
}
start
=
extent_map_end
(
em
);
start
=
extent_map_end
(
em
);
spin
_unlock
(
&
map
->
lock
);
write
_unlock
(
&
map
->
lock
);
/* once for us */
/* once for us */
free_extent_map
(
em
);
free_extent_map
(
em
);
...
@@ -3203,7 +3244,7 @@ int extent_range_uptodate(struct extent_io_tree *tree,
...
@@ -3203,7 +3244,7 @@ int extent_range_uptodate(struct extent_io_tree *tree,
int
uptodate
;
int
uptodate
;
unsigned
long
index
;
unsigned
long
index
;
ret
=
test_range_bit
(
tree
,
start
,
end
,
EXTENT_UPTODATE
,
1
);
ret
=
test_range_bit
(
tree
,
start
,
end
,
EXTENT_UPTODATE
,
1
,
NULL
);
if
(
ret
)
if
(
ret
)
return
1
;
return
1
;
while
(
start
<=
end
)
{
while
(
start
<=
end
)
{
...
@@ -3233,7 +3274,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree,
...
@@ -3233,7 +3274,7 @@ int extent_buffer_uptodate(struct extent_io_tree *tree,
return
1
;
return
1
;
ret
=
test_range_bit
(
tree
,
eb
->
start
,
eb
->
start
+
eb
->
len
-
1
,
ret
=
test_range_bit
(
tree
,
eb
->
start
,
eb
->
start
+
eb
->
len
-
1
,
EXTENT_UPTODATE
,
1
);
EXTENT_UPTODATE
,
1
,
NULL
);
if
(
ret
)
if
(
ret
)
return
ret
;
return
ret
;
...
@@ -3269,7 +3310,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
...
@@ -3269,7 +3310,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
return
0
;
return
0
;
if
(
test_range_bit
(
tree
,
eb
->
start
,
eb
->
start
+
eb
->
len
-
1
,
if
(
test_range_bit
(
tree
,
eb
->
start
,
eb
->
start
+
eb
->
len
-
1
,
EXTENT_UPTODATE
,
1
))
{
EXTENT_UPTODATE
,
1
,
NULL
))
{
return
0
;
return
0
;
}
}
...
...
fs/btrfs/extent_io.h
View file @
83ebade3
...
@@ -13,10 +13,8 @@
...
@@ -13,10 +13,8 @@
#define EXTENT_DEFRAG (1 << 6)
#define EXTENT_DEFRAG (1 << 6)
#define EXTENT_DEFRAG_DONE (1 << 7)
#define EXTENT_DEFRAG_DONE (1 << 7)
#define EXTENT_BUFFER_FILLED (1 << 8)
#define EXTENT_BUFFER_FILLED (1 << 8)
#define EXTENT_ORDERED (1 << 9)
#define EXTENT_BOUNDARY (1 << 9)
#define EXTENT_ORDERED_METADATA (1 << 10)
#define EXTENT_NODATASUM (1 << 10)
#define EXTENT_BOUNDARY (1 << 11)
#define EXTENT_NODATASUM (1 << 12)
#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
/* flags for bio submission */
/* flags for bio submission */
...
@@ -142,6 +140,8 @@ int try_release_extent_state(struct extent_map_tree *map,
...
@@ -142,6 +140,8 @@ int try_release_extent_state(struct extent_map_tree *map,
struct
extent_io_tree
*
tree
,
struct
page
*
page
,
struct
extent_io_tree
*
tree
,
struct
page
*
page
,
gfp_t
mask
);
gfp_t
mask
);
int
lock_extent
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
);
int
lock_extent
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
);
int
lock_extent_bits
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
bits
,
struct
extent_state
**
cached
,
gfp_t
mask
);
int
unlock_extent
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
);
int
unlock_extent
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
);
int
try_lock_extent
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
try_lock_extent
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
gfp_t
mask
);
gfp_t
mask
);
...
@@ -155,11 +155,12 @@ u64 count_range_bits(struct extent_io_tree *tree,
...
@@ -155,11 +155,12 @@ u64 count_range_bits(struct extent_io_tree *tree,
u64
max_bytes
,
unsigned
long
bits
);
u64
max_bytes
,
unsigned
long
bits
);
int
test_range_bit
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
test_range_bit
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
bits
,
int
filled
);
int
bits
,
int
filled
,
struct
extent_state
*
cached_state
);
int
clear_extent_bits
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
clear_extent_bits
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
bits
,
gfp_t
mask
);
int
bits
,
gfp_t
mask
);
int
clear_extent_bit
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
clear_extent_bit
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
bits
,
int
wake
,
int
delete
,
gfp_t
mask
);
int
bits
,
int
wake
,
int
delete
,
struct
extent_state
**
cached
,
gfp_t
mask
);
int
set_extent_bits
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
set_extent_bits
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
bits
,
gfp_t
mask
);
int
bits
,
gfp_t
mask
);
int
set_extent_uptodate
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
int
set_extent_uptodate
(
struct
extent_io_tree
*
tree
,
u64
start
,
u64
end
,
...
@@ -282,5 +283,6 @@ int extent_clear_unlock_delalloc(struct inode *inode,
...
@@ -282,5 +283,6 @@ int extent_clear_unlock_delalloc(struct inode *inode,
int
clear_unlock
,
int
clear_unlock
,
int
clear_delalloc
,
int
clear_dirty
,
int
clear_delalloc
,
int
clear_dirty
,
int
set_writeback
,
int
set_writeback
,
int
end_writeback
);
int
end_writeback
,
int
set_private2
);
#endif
#endif
fs/btrfs/extent_map.c
View file @
83ebade3
...
@@ -36,7 +36,7 @@ void extent_map_exit(void)
...
@@ -36,7 +36,7 @@ void extent_map_exit(void)
void
extent_map_tree_init
(
struct
extent_map_tree
*
tree
,
gfp_t
mask
)
void
extent_map_tree_init
(
struct
extent_map_tree
*
tree
,
gfp_t
mask
)
{
{
tree
->
map
.
rb_node
=
NULL
;
tree
->
map
.
rb_node
=
NULL
;
spin_
lock_init
(
&
tree
->
lock
);
rw
lock_init
(
&
tree
->
lock
);
}
}
/**
/**
...
@@ -198,6 +198,56 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next)
...
@@ -198,6 +198,56 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next)
return
0
;
return
0
;
}
}
int
unpin_extent_cache
(
struct
extent_map_tree
*
tree
,
u64
start
,
u64
len
)
{
int
ret
=
0
;
struct
extent_map
*
merge
=
NULL
;
struct
rb_node
*
rb
;
struct
extent_map
*
em
;
write_lock
(
&
tree
->
lock
);
em
=
lookup_extent_mapping
(
tree
,
start
,
len
);
WARN_ON
(
em
->
start
!=
start
||
!
em
);
if
(
!
em
)
goto
out
;
clear_bit
(
EXTENT_FLAG_PINNED
,
&
em
->
flags
);
if
(
em
->
start
!=
0
)
{
rb
=
rb_prev
(
&
em
->
rb_node
);
if
(
rb
)
merge
=
rb_entry
(
rb
,
struct
extent_map
,
rb_node
);
if
(
rb
&&
mergable_maps
(
merge
,
em
))
{
em
->
start
=
merge
->
start
;
em
->
len
+=
merge
->
len
;
em
->
block_len
+=
merge
->
block_len
;
em
->
block_start
=
merge
->
block_start
;
merge
->
in_tree
=
0
;
rb_erase
(
&
merge
->
rb_node
,
&
tree
->
map
);
free_extent_map
(
merge
);
}
}
rb
=
rb_next
(
&
em
->
rb_node
);
if
(
rb
)
merge
=
rb_entry
(
rb
,
struct
extent_map
,
rb_node
);
if
(
rb
&&
mergable_maps
(
em
,
merge
))
{
em
->
len
+=
merge
->
len
;
em
->
block_len
+=
merge
->
len
;
rb_erase
(
&
merge
->
rb_node
,
&
tree
->
map
);
merge
->
in_tree
=
0
;
free_extent_map
(
merge
);
}
free_extent_map
(
em
);
out:
write_unlock
(
&
tree
->
lock
);
return
ret
;
}
/**
/**
* add_extent_mapping - add new extent map to the extent tree
* add_extent_mapping - add new extent map to the extent tree
* @tree: tree to insert new map in
* @tree: tree to insert new map in
...
@@ -222,7 +272,6 @@ int add_extent_mapping(struct extent_map_tree *tree,
...
@@ -222,7 +272,6 @@ int add_extent_mapping(struct extent_map_tree *tree,
ret
=
-
EEXIST
;
ret
=
-
EEXIST
;
goto
out
;
goto
out
;
}
}
assert_spin_locked
(
&
tree
->
lock
);
rb
=
tree_insert
(
&
tree
->
map
,
em
->
start
,
&
em
->
rb_node
);
rb
=
tree_insert
(
&
tree
->
map
,
em
->
start
,
&
em
->
rb_node
);
if
(
rb
)
{
if
(
rb
)
{
ret
=
-
EEXIST
;
ret
=
-
EEXIST
;
...
@@ -285,7 +334,6 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
...
@@ -285,7 +334,6 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
struct
rb_node
*
next
=
NULL
;
struct
rb_node
*
next
=
NULL
;
u64
end
=
range_end
(
start
,
len
);
u64
end
=
range_end
(
start
,
len
);
assert_spin_locked
(
&
tree
->
lock
);
rb_node
=
__tree_search
(
&
tree
->
map
,
start
,
&
prev
,
&
next
);
rb_node
=
__tree_search
(
&
tree
->
map
,
start
,
&
prev
,
&
next
);
if
(
!
rb_node
&&
prev
)
{
if
(
!
rb_node
&&
prev
)
{
em
=
rb_entry
(
prev
,
struct
extent_map
,
rb_node
);
em
=
rb_entry
(
prev
,
struct
extent_map
,
rb_node
);
...
@@ -331,7 +379,6 @@ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
...
@@ -331,7 +379,6 @@ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
int
ret
=
0
;
int
ret
=
0
;
WARN_ON
(
test_bit
(
EXTENT_FLAG_PINNED
,
&
em
->
flags
));
WARN_ON
(
test_bit
(
EXTENT_FLAG_PINNED
,
&
em
->
flags
));
assert_spin_locked
(
&
tree
->
lock
);
rb_erase
(
&
em
->
rb_node
,
&
tree
->
map
);
rb_erase
(
&
em
->
rb_node
,
&
tree
->
map
);
em
->
in_tree
=
0
;
em
->
in_tree
=
0
;
return
ret
;
return
ret
;
...
...
fs/btrfs/extent_map.h
View file @
83ebade3
...
@@ -31,7 +31,7 @@ struct extent_map {
...
@@ -31,7 +31,7 @@ struct extent_map {
struct
extent_map_tree
{
struct
extent_map_tree
{
struct
rb_root
map
;
struct
rb_root
map
;
spin
lock_t
lock
;
rw
lock_t
lock
;
};
};
static
inline
u64
extent_map_end
(
struct
extent_map
*
em
)
static
inline
u64
extent_map_end
(
struct
extent_map
*
em
)
...
@@ -59,4 +59,5 @@ struct extent_map *alloc_extent_map(gfp_t mask);
...
@@ -59,4 +59,5 @@ struct extent_map *alloc_extent_map(gfp_t mask);
void
free_extent_map
(
struct
extent_map
*
em
);
void
free_extent_map
(
struct
extent_map
*
em
);
int
__init
extent_map_init
(
void
);
int
__init
extent_map_init
(
void
);
void
extent_map_exit
(
void
);
void
extent_map_exit
(
void
);
int
unpin_extent_cache
(
struct
extent_map_tree
*
tree
,
u64
start
,
u64
len
);
#endif
#endif
fs/btrfs/file.c
View file @
83ebade3
...
@@ -112,8 +112,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
...
@@ -112,8 +112,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
int
err
=
0
;
int
err
=
0
;
int
i
;
int
i
;
struct
inode
*
inode
=
fdentry
(
file
)
->
d_inode
;
struct
inode
*
inode
=
fdentry
(
file
)
->
d_inode
;
struct
extent_io_tree
*
io_tree
=
&
BTRFS_I
(
inode
)
->
io_tree
;
u64
hint_byte
;
u64
num_bytes
;
u64
num_bytes
;
u64
start_pos
;
u64
start_pos
;
u64
end_of_last_block
;
u64
end_of_last_block
;
...
@@ -125,22 +123,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
...
@@ -125,22 +123,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
root
->
sectorsize
-
1
)
&
~
((
u64
)
root
->
sectorsize
-
1
);
root
->
sectorsize
-
1
)
&
~
((
u64
)
root
->
sectorsize
-
1
);
end_of_last_block
=
start_pos
+
num_bytes
-
1
;
end_of_last_block
=
start_pos
+
num_bytes
-
1
;
lock_extent
(
io_tree
,
start_pos
,
end_of_last_block
,
GFP_NOFS
);
trans
=
btrfs_join_transaction
(
root
,
1
);
if
(
!
trans
)
{
err
=
-
ENOMEM
;
goto
out_unlock
;
}
btrfs_set_trans_block_group
(
trans
,
inode
);
hint_byte
=
0
;
set_extent_uptodate
(
io_tree
,
start_pos
,
end_of_last_block
,
GFP_NOFS
);
/* check for reserved extents on each page, we don't want
* to reset the delalloc bit on things that already have
* extents reserved.
*/
btrfs_set_extent_delalloc
(
inode
,
start_pos
,
end_of_last_block
);
btrfs_set_extent_delalloc
(
inode
,
start_pos
,
end_of_last_block
);
for
(
i
=
0
;
i
<
num_pages
;
i
++
)
{
for
(
i
=
0
;
i
<
num_pages
;
i
++
)
{
struct
page
*
p
=
pages
[
i
];
struct
page
*
p
=
pages
[
i
];
...
@@ -155,9 +137,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
...
@@ -155,9 +137,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
* at this time.
* at this time.
*/
*/
}
}
err
=
btrfs_end_transaction
(
trans
,
root
);
out_unlock:
unlock_extent
(
io_tree
,
start_pos
,
end_of_last_block
,
GFP_NOFS
);
return
err
;
return
err
;
}
}
...
@@ -189,18 +168,18 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
...
@@ -189,18 +168,18 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
if
(
!
split2
)
if
(
!
split2
)
split2
=
alloc_extent_map
(
GFP_NOFS
);
split2
=
alloc_extent_map
(
GFP_NOFS
);
spin
_lock
(
&
em_tree
->
lock
);
write
_lock
(
&
em_tree
->
lock
);
em
=
lookup_extent_mapping
(
em_tree
,
start
,
len
);
em
=
lookup_extent_mapping
(
em_tree
,
start
,
len
);
if
(
!
em
)
{
if
(
!
em
)
{
spin
_unlock
(
&
em_tree
->
lock
);
write
_unlock
(
&
em_tree
->
lock
);
break
;
break
;
}
}
flags
=
em
->
flags
;
flags
=
em
->
flags
;
if
(
skip_pinned
&&
test_bit
(
EXTENT_FLAG_PINNED
,
&
em
->
flags
))
{
if
(
skip_pinned
&&
test_bit
(
EXTENT_FLAG_PINNED
,
&
em
->
flags
))
{
spin_unlock
(
&
em_tree
->
lock
);
if
(
em
->
start
<=
start
&&
if
(
em
->
start
<=
start
&&
(
!
testend
||
em
->
start
+
em
->
len
>=
start
+
len
))
{
(
!
testend
||
em
->
start
+
em
->
len
>=
start
+
len
))
{
free_extent_map
(
em
);
free_extent_map
(
em
);
write_unlock
(
&
em_tree
->
lock
);
break
;
break
;
}
}
if
(
start
<
em
->
start
)
{
if
(
start
<
em
->
start
)
{
...
@@ -210,6 +189,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
...
@@ -210,6 +189,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
start
=
em
->
start
+
em
->
len
;
start
=
em
->
start
+
em
->
len
;
}
}
free_extent_map
(
em
);
free_extent_map
(
em
);
write_unlock
(
&
em_tree
->
lock
);
continue
;
continue
;
}
}
compressed
=
test_bit
(
EXTENT_FLAG_COMPRESSED
,
&
em
->
flags
);
compressed
=
test_bit
(
EXTENT_FLAG_COMPRESSED
,
&
em
->
flags
);
...
@@ -260,7 +240,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
...
@@ -260,7 +240,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
free_extent_map
(
split
);
free_extent_map
(
split
);
split
=
NULL
;
split
=
NULL
;
}
}
spin
_unlock
(
&
em_tree
->
lock
);
write
_unlock
(
&
em_tree
->
lock
);
/* once for us */
/* once for us */
free_extent_map
(
em
);
free_extent_map
(
em
);
...
@@ -289,7 +269,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
...
@@ -289,7 +269,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
noinline
int
btrfs_drop_extents
(
struct
btrfs_trans_handle
*
trans
,
noinline
int
btrfs_drop_extents
(
struct
btrfs_trans_handle
*
trans
,
struct
btrfs_root
*
root
,
struct
inode
*
inode
,
struct
btrfs_root
*
root
,
struct
inode
*
inode
,
u64
start
,
u64
end
,
u64
locked_end
,
u64
start
,
u64
end
,
u64
locked_end
,
u64
inline_limit
,
u64
*
hint_byte
)
u64
inline_limit
,
u64
*
hint_byte
,
int
drop_cache
)
{
{
u64
extent_end
=
0
;
u64
extent_end
=
0
;
u64
search_start
=
start
;
u64
search_start
=
start
;
...
@@ -314,6 +294,7 @@ noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
...
@@ -314,6 +294,7 @@ noinline int btrfs_drop_extents(struct btrfs_trans_handle *trans,
int
ret
;
int
ret
;
inline_limit
=
0
;
inline_limit
=
0
;
if
(
drop_cache
)
btrfs_drop_extent_cache
(
inode
,
start
,
end
-
1
,
0
);
btrfs_drop_extent_cache
(
inode
,
start
,
end
-
1
,
0
);
path
=
btrfs_alloc_path
();
path
=
btrfs_alloc_path
();
...
...
fs/btrfs/inode.c
View file @
83ebade3
...
@@ -231,7 +231,8 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
...
@@ -231,7 +231,8 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
}
}
ret
=
btrfs_drop_extents
(
trans
,
root
,
inode
,
start
,
ret
=
btrfs_drop_extents
(
trans
,
root
,
inode
,
start
,
aligned_end
,
aligned_end
,
start
,
&
hint_byte
);
aligned_end
,
aligned_end
,
start
,
&
hint_byte
,
1
);
BUG_ON
(
ret
);
BUG_ON
(
ret
);
if
(
isize
>
actual_end
)
if
(
isize
>
actual_end
)
...
@@ -240,7 +241,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
...
@@ -240,7 +241,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
inline_len
,
compressed_size
,
inline_len
,
compressed_size
,
compressed_pages
);
compressed_pages
);
BUG_ON
(
ret
);
BUG_ON
(
ret
);
btrfs_drop_extent_cache
(
inode
,
start
,
aligned_end
,
0
);
btrfs_drop_extent_cache
(
inode
,
start
,
aligned_end
-
1
,
0
);
return
0
;
return
0
;
}
}
...
@@ -425,7 +426,7 @@ static noinline int compress_file_range(struct inode *inode,
...
@@ -425,7 +426,7 @@ static noinline int compress_file_range(struct inode *inode,
extent_clear_unlock_delalloc
(
inode
,
extent_clear_unlock_delalloc
(
inode
,
&
BTRFS_I
(
inode
)
->
io_tree
,
&
BTRFS_I
(
inode
)
->
io_tree
,
start
,
end
,
NULL
,
1
,
0
,
start
,
end
,
NULL
,
1
,
0
,
0
,
1
,
1
,
1
);
0
,
1
,
1
,
1
,
0
);
ret
=
0
;
ret
=
0
;
goto
free_pages_out
;
goto
free_pages_out
;
}
}
...
@@ -611,9 +612,9 @@ static noinline int submit_compressed_extents(struct inode *inode,
...
@@ -611,9 +612,9 @@ static noinline int submit_compressed_extents(struct inode *inode,
set_bit
(
EXTENT_FLAG_COMPRESSED
,
&
em
->
flags
);
set_bit
(
EXTENT_FLAG_COMPRESSED
,
&
em
->
flags
);
while
(
1
)
{
while
(
1
)
{
spin
_lock
(
&
em_tree
->
lock
);
write
_lock
(
&
em_tree
->
lock
);
ret
=
add_extent_mapping
(
em_tree
,
em
);
ret
=
add_extent_mapping
(
em_tree
,
em
);
spin
_unlock
(
&
em_tree
->
lock
);
write
_unlock
(
&
em_tree
->
lock
);
if
(
ret
!=
-
EEXIST
)
{
if
(
ret
!=
-
EEXIST
)
{
free_extent_map
(
em
);
free_extent_map
(
em
);
break
;
break
;
...
@@ -640,7 +641,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
...
@@ -640,7 +641,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
async_extent
->
start
,
async_extent
->
start
,
async_extent
->
start
+
async_extent
->
start
+
async_extent
->
ram_size
-
1
,
async_extent
->
ram_size
-
1
,
NULL
,
1
,
1
,
0
,
1
,
1
,
0
);
NULL
,
1
,
1
,
0
,
1
,
1
,
0
,
0
);
ret
=
btrfs_submit_compressed_write
(
inode
,
ret
=
btrfs_submit_compressed_write
(
inode
,
async_extent
->
start
,
async_extent
->
start
,
...
@@ -713,7 +714,7 @@ static noinline int cow_file_range(struct inode *inode,
...
@@ -713,7 +714,7 @@ static noinline int cow_file_range(struct inode *inode,
extent_clear_unlock_delalloc
(
inode
,
extent_clear_unlock_delalloc
(
inode
,
&
BTRFS_I
(
inode
)
->
io_tree
,
&
BTRFS_I
(
inode
)
->
io_tree
,
start
,
end
,
NULL
,
1
,
1
,
start
,
end
,
NULL
,
1
,
1
,
1
,
1
,
1
,
1
);
1
,
1
,
1
,
1
,
0
);
*
nr_written
=
*
nr_written
+
*
nr_written
=
*
nr_written
+
(
end
-
start
+
PAGE_CACHE_SIZE
)
/
PAGE_CACHE_SIZE
;
(
end
-
start
+
PAGE_CACHE_SIZE
)
/
PAGE_CACHE_SIZE
;
*
page_started
=
1
;
*
page_started
=
1
;
...
@@ -747,9 +748,9 @@ static noinline int cow_file_range(struct inode *inode,
...
@@ -747,9 +748,9 @@ static noinline int cow_file_range(struct inode *inode,
set_bit
(
EXTENT_FLAG_PINNED
,
&
em
->
flags
);
set_bit
(
EXTENT_FLAG_PINNED
,
&
em
->
flags
);
while
(
1
)
{
while
(
1
)
{
spin
_lock
(
&
em_tree
->
lock
);
write
_lock
(
&
em_tree
->
lock
);
ret
=
add_extent_mapping
(
em_tree
,
em
);
ret
=
add_extent_mapping
(
em_tree
,
em
);
spin
_unlock
(
&
em_tree
->
lock
);
write
_unlock
(
&
em_tree
->
lock
);
if
(
ret
!=
-
EEXIST
)
{
if
(
ret
!=
-
EEXIST
)
{
free_extent_map
(
em
);
free_extent_map
(
em
);
break
;
break
;
...
@@ -776,11 +777,14 @@ static noinline int cow_file_range(struct inode *inode,
...
@@ -776,11 +777,14 @@ static noinline int cow_file_range(struct inode *inode,
/* we're not doing compressed IO, don't unlock the first
/* we're not doing compressed IO, don't unlock the first
* page (which the caller expects to stay locked), don't
* page (which the caller expects to stay locked), don't
* clear any dirty bits and don't set any writeback bits
* clear any dirty bits and don't set any writeback bits
*
* Do set the Private2 bit so we know this page was properly
* setup for writepage
*/
*/
extent_clear_unlock_delalloc
(
inode
,
&
BTRFS_I
(
inode
)
->
io_tree
,
extent_clear_unlock_delalloc
(
inode
,
&
BTRFS_I
(
inode
)
->
io_tree
,
start
,
start
+
ram_size
-
1
,
start
,
start
+
ram_size
-
1
,
locked_page
,
unlock
,
1
,
locked_page
,
unlock
,
1
,
1
,
0
,
0
,
0
);
1
,
0
,
0
,
0
,
1
);
disk_num_bytes
-=
cur_alloc_size
;
disk_num_bytes
-=
cur_alloc_size
;
num_bytes
-=
cur_alloc_size
;
num_bytes
-=
cur_alloc_size
;
alloc_hint
=
ins
.
objectid
+
ins
.
offset
;
alloc_hint
=
ins
.
objectid
+
ins
.
offset
;
...
@@ -853,7 +857,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
...
@@ -853,7 +857,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
int
limit
=
10
*
1024
*
1042
;
int
limit
=
10
*
1024
*
1042
;
clear_extent_bit
(
&
BTRFS_I
(
inode
)
->
io_tree
,
start
,
end
,
EXTENT_LOCKED
|
clear_extent_bit
(
&
BTRFS_I
(
inode
)
->
io_tree
,
start
,
end
,
EXTENT_LOCKED
|
EXTENT_DELALLOC
,
1
,
0
,
GFP_NOFS
);
EXTENT_DELALLOC
,
1
,
0
,
NULL
,
GFP_NOFS
);
while
(
start
<
end
)
{
while
(
start
<
end
)
{
async_cow
=
kmalloc
(
sizeof
(
*
async_cow
),
GFP_NOFS
);
async_cow
=
kmalloc
(
sizeof
(
*
async_cow
),
GFP_NOFS
);
async_cow
->
inode
=
inode
;
async_cow
->
inode
=
inode
;
...
@@ -1080,9 +1084,9 @@ static noinline int run_delalloc_nocow(struct inode *inode,
...
@@ -1080,9 +1084,9 @@ static noinline int run_delalloc_nocow(struct inode *inode,
em
->
bdev
=
root
->
fs_info
->
fs_devices
->
latest_bdev
;
em
->
bdev
=
root
->
fs_info
->
fs_devices
->
latest_bdev
;
set_bit
(
EXTENT_FLAG_PINNED
,
&
em
->
flags
);
set_bit
(
EXTENT_FLAG_PINNED
,
&
em
->
flags
);
while
(
1
)
{
while
(
1
)
{
spin
_lock
(
&
em_tree
->
lock
);
write
_lock
(
&
em_tree
->
lock
);
ret
=
add_extent_mapping
(
em_tree
,
em
);
ret
=
add_extent_mapping
(
em_tree
,
em
);
spin
_unlock
(
&
em_tree
->
lock
);
write
_unlock
(
&
em_tree
->
lock
);
if
(
ret
!=
-
EEXIST
)
{
if
(
ret
!=
-
EEXIST
)
{
free_extent_map
(
em
);
free_extent_map
(
em
);
break
;
break
;
...
@@ -1101,7 +1105,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
...
@@ -1101,7 +1105,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
extent_clear_unlock_delalloc
(
inode
,
&
BTRFS_I
(
inode
)
->
io_tree
,
extent_clear_unlock_delalloc
(
inode
,
&
BTRFS_I
(
inode
)
->
io_tree
,
cur_offset
,
cur_offset
+
num_bytes
-
1
,
cur_offset
,
cur_offset
+
num_bytes
-
1
,
locked_page
,
1
,
1
,
1
,
0
,
0
,
0
);
locked_page
,
1
,
1
,
1
,
0
,
0
,
0
,
1
);
cur_offset
=
extent_end
;
cur_offset
=
extent_end
;
if
(
cur_offset
>
end
)
if
(
cur_offset
>
end
)
break
;
break
;
...
@@ -1374,10 +1378,8 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
...
@@ -1374,10 +1378,8 @@ static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
lock_extent
(
&
BTRFS_I
(
inode
)
->
io_tree
,
page_start
,
page_end
,
GFP_NOFS
);
lock_extent
(
&
BTRFS_I
(
inode
)
->
io_tree
,
page_start
,
page_end
,
GFP_NOFS
);
/* already ordered? We're done */
/* already ordered? We're done */
if
(
test_range_bit
(
&
BTRFS_I
(
inode
)
->
io_tree
,
page_start
,
page_end
,
if
(
PagePrivate2
(
page
))
EXTENT_ORDERED
,
0
))
{
goto
out
;
goto
out
;
}
ordered
=
btrfs_lookup_ordered_extent
(
inode
,
page_start
);
ordered
=
btrfs_lookup_ordered_extent
(
inode
,
page_start
);
if
(
ordered
)
{
if
(
ordered
)
{
...
@@ -1413,11 +1415,9 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
...
@@ -1413,11 +1415,9 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
struct
inode
*
inode
=
page
->
mapping
->
host
;
struct
inode
*
inode
=
page
->
mapping
->
host
;
struct
btrfs_writepage_fixup
*
fixup
;
struct
btrfs_writepage_fixup
*
fixup
;
struct
btrfs_root
*
root
=
BTRFS_I
(
inode
)
->
root
;
struct
btrfs_root
*
root
=
BTRFS_I
(
inode
)
->
root
;
int
ret
;
ret
=
test_range_bit
(
&
BTRFS_I
(
inode
)
->
io_tree
,
start
,
end
,
/* this page is properly in the ordered list */
EXTENT_ORDERED
,
0
);
if
(
TestClearPagePrivate2
(
page
))
if
(
ret
)
return
0
;
return
0
;
if
(
PageChecked
(
page
))
if
(
PageChecked
(
page
))
...
@@ -1455,9 +1455,19 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
...
@@ -1455,9 +1455,19 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
BUG_ON
(
!
path
);
BUG_ON
(
!
path
);
path
->
leave_spinning
=
1
;
path
->
leave_spinning
=
1
;
/*
* we may be replacing one extent in the tree with another.
* The new extent is pinned in the extent map, and we don't want
* to drop it from the cache until it is completely in the btree.
*
* So, tell btrfs_drop_extents to leave this extent in the cache.
* the caller is expected to unpin it and allow it to be merged
* with the others.
*/
ret
=
btrfs_drop_extents
(
trans
,
root
,
inode
,
file_pos
,
ret
=
btrfs_drop_extents
(
trans
,
root
,
inode
,
file_pos
,
file_pos
+
num_bytes
,
locked_end
,
file_pos
+
num_bytes
,
locked_end
,
file_pos
,
&
hint
);
file_pos
,
&
hint
,
0
);
BUG_ON
(
ret
);
BUG_ON
(
ret
);
ins
.
objectid
=
inode
->
i_ino
;
ins
.
objectid
=
inode
->
i_ino
;
...
@@ -1485,7 +1495,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
...
@@ -1485,7 +1495,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty
(
leaf
);
btrfs_mark_buffer_dirty
(
leaf
);
inode_add_bytes
(
inode
,
num_bytes
);
inode_add_bytes
(
inode
,
num_bytes
);
btrfs_drop_extent_cache
(
inode
,
file_pos
,
file_pos
+
num_bytes
-
1
,
0
);
ins
.
objectid
=
disk_bytenr
;
ins
.
objectid
=
disk_bytenr
;
ins
.
offset
=
disk_num_bytes
;
ins
.
offset
=
disk_num_bytes
;
...
@@ -1596,6 +1605,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
...
@@ -1596,6 +1605,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
ordered_extent
->
len
,
ordered_extent
->
len
,
compressed
,
0
,
0
,
compressed
,
0
,
0
,
BTRFS_FILE_EXTENT_REG
);
BTRFS_FILE_EXTENT_REG
);
unpin_extent_cache
(
&
BTRFS_I
(
inode
)
->
extent_tree
,
ordered_extent
->
file_offset
,
ordered_extent
->
len
);
BUG_ON
(
ret
);
BUG_ON
(
ret
);
}
}
unlock_extent
(
io_tree
,
ordered_extent
->
file_offset
,
unlock_extent
(
io_tree
,
ordered_extent
->
file_offset
,
...
@@ -1623,6 +1635,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
...
@@ -1623,6 +1635,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
static
int
btrfs_writepage_end_io_hook
(
struct
page
*
page
,
u64
start
,
u64
end
,
static
int
btrfs_writepage_end_io_hook
(
struct
page
*
page
,
u64
start
,
u64
end
,
struct
extent_state
*
state
,
int
uptodate
)
struct
extent_state
*
state
,
int
uptodate
)
{
{
ClearPagePrivate2
(
page
);
return
btrfs_finish_ordered_io
(
page
->
mapping
->
host
,
start
,
end
);
return
btrfs_finish_ordered_io
(
page
->
mapping
->
host
,
start
,
end
);
}
}
...
@@ -1669,13 +1682,13 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
...
@@ -1669,13 +1682,13 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
failrec
->
last_mirror
=
0
;
failrec
->
last_mirror
=
0
;
failrec
->
bio_flags
=
0
;
failrec
->
bio_flags
=
0
;
spin
_lock
(
&
em_tree
->
lock
);
read
_lock
(
&
em_tree
->
lock
);
em
=
lookup_extent_mapping
(
em_tree
,
start
,
failrec
->
len
);
em
=
lookup_extent_mapping
(
em_tree
,
start
,
failrec
->
len
);
if
(
em
->
start
>
start
||
em
->
start
+
em
->
len
<
start
)
{
if
(
em
->
start
>
start
||
em
->
start
+
em
->
len
<
start
)
{
free_extent_map
(
em
);
free_extent_map
(
em
);
em
=
NULL
;
em
=
NULL
;
}
}
spin
_unlock
(
&
em_tree
->
lock
);
read
_unlock
(
&
em_tree
->
lock
);
if
(
!
em
||
IS_ERR
(
em
))
{
if
(
!
em
||
IS_ERR
(
em
))
{
kfree
(
failrec
);
kfree
(
failrec
);
...
@@ -1794,7 +1807,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
...
@@ -1794,7 +1807,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
return
0
;
return
0
;
if
(
root
->
root_key
.
objectid
==
BTRFS_DATA_RELOC_TREE_OBJECTID
&&
if
(
root
->
root_key
.
objectid
==
BTRFS_DATA_RELOC_TREE_OBJECTID
&&
test_range_bit
(
io_tree
,
start
,
end
,
EXTENT_NODATASUM
,
1
))
{
test_range_bit
(
io_tree
,
start
,
end
,
EXTENT_NODATASUM
,
1
,
NULL
))
{
clear_extent_bits
(
io_tree
,
start
,
end
,
EXTENT_NODATASUM
,
clear_extent_bits
(
io_tree
,
start
,
end
,
EXTENT_NODATASUM
,
GFP_NOFS
);
GFP_NOFS
);
return
0
;
return
0
;
...
@@ -2935,7 +2948,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
...
@@ -2935,7 +2948,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t size)
cur_offset
,
cur_offset
,
cur_offset
+
hole_size
,
cur_offset
+
hole_size
,
block_end
,
block_end
,
cur_offset
,
&
hint_byte
);
cur_offset
,
&
hint_byte
,
1
);
if
(
err
)
if
(
err
)
break
;
break
;
err
=
btrfs_insert_file_extent
(
trans
,
root
,
err
=
btrfs_insert_file_extent
(
trans
,
root
,
...
@@ -4064,11 +4077,11 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
...
@@ -4064,11 +4077,11 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
int
compressed
;
int
compressed
;
again:
again:
spin
_lock
(
&
em_tree
->
lock
);
read
_lock
(
&
em_tree
->
lock
);
em
=
lookup_extent_mapping
(
em_tree
,
start
,
len
);
em
=
lookup_extent_mapping
(
em_tree
,
start
,
len
);
if
(
em
)
if
(
em
)
em
->
bdev
=
root
->
fs_info
->
fs_devices
->
latest_bdev
;
em
->
bdev
=
root
->
fs_info
->
fs_devices
->
latest_bdev
;
spin
_unlock
(
&
em_tree
->
lock
);
read
_unlock
(
&
em_tree
->
lock
);
if
(
em
)
{
if
(
em
)
{
if
(
em
->
start
>
start
||
em
->
start
+
em
->
len
<=
start
)
if
(
em
->
start
>
start
||
em
->
start
+
em
->
len
<=
start
)
...
@@ -4215,6 +4228,11 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
...
@@ -4215,6 +4228,11 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
map
=
kmap
(
page
);
map
=
kmap
(
page
);
read_extent_buffer
(
leaf
,
map
+
pg_offset
,
ptr
,
read_extent_buffer
(
leaf
,
map
+
pg_offset
,
ptr
,
copy_size
);
copy_size
);
if
(
pg_offset
+
copy_size
<
PAGE_CACHE_SIZE
)
{
memset
(
map
+
pg_offset
+
copy_size
,
0
,
PAGE_CACHE_SIZE
-
pg_offset
-
copy_size
);
}
kunmap
(
page
);
kunmap
(
page
);
}
}
flush_dcache_page
(
page
);
flush_dcache_page
(
page
);
...
@@ -4259,7 +4277,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
...
@@ -4259,7 +4277,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
}
}
err
=
0
;
err
=
0
;
spin
_lock
(
&
em_tree
->
lock
);
write
_lock
(
&
em_tree
->
lock
);
ret
=
add_extent_mapping
(
em_tree
,
em
);
ret
=
add_extent_mapping
(
em_tree
,
em
);
/* it is possible that someone inserted the extent into the tree
/* it is possible that someone inserted the extent into the tree
* while we had the lock dropped. It is also possible that
* while we had the lock dropped. It is also possible that
...
@@ -4299,7 +4317,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
...
@@ -4299,7 +4317,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
err
=
0
;
err
=
0
;
}
}
}
}
spin
_unlock
(
&
em_tree
->
lock
);
write
_unlock
(
&
em_tree
->
lock
);
out:
out:
if
(
path
)
if
(
path
)
btrfs_free_path
(
path
);
btrfs_free_path
(
path
);
...
@@ -4398,13 +4416,21 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
...
@@ -4398,13 +4416,21 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
u64
page_start
=
page_offset
(
page
);
u64
page_start
=
page_offset
(
page
);
u64
page_end
=
page_start
+
PAGE_CACHE_SIZE
-
1
;
u64
page_end
=
page_start
+
PAGE_CACHE_SIZE
-
1
;
/*
* we have the page locked, so new writeback can't start,
* and the dirty bit won't be cleared while we are here.
*
* Wait for IO on this page so that we can safely clear
* the PagePrivate2 bit and do ordered accounting
*/
wait_on_page_writeback
(
page
);
wait_on_page_writeback
(
page
);
tree
=
&
BTRFS_I
(
page
->
mapping
->
host
)
->
io_tree
;
tree
=
&
BTRFS_I
(
page
->
mapping
->
host
)
->
io_tree
;
if
(
offset
)
{
if
(
offset
)
{
btrfs_releasepage
(
page
,
GFP_NOFS
);
btrfs_releasepage
(
page
,
GFP_NOFS
);
return
;
return
;
}
}
lock_extent
(
tree
,
page_start
,
page_end
,
GFP_NOFS
);
lock_extent
(
tree
,
page_start
,
page_end
,
GFP_NOFS
);
ordered
=
btrfs_lookup_ordered_extent
(
page
->
mapping
->
host
,
ordered
=
btrfs_lookup_ordered_extent
(
page
->
mapping
->
host
,
page_offset
(
page
));
page_offset
(
page
));
...
@@ -4415,16 +4441,21 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
...
@@ -4415,16 +4441,21 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
*/
*/
clear_extent_bit
(
tree
,
page_start
,
page_end
,
clear_extent_bit
(
tree
,
page_start
,
page_end
,
EXTENT_DIRTY
|
EXTENT_DELALLOC
|
EXTENT_DIRTY
|
EXTENT_DELALLOC
|
EXTENT_LOCKED
,
1
,
0
,
GFP_NOFS
);
EXTENT_LOCKED
,
1
,
0
,
NULL
,
GFP_NOFS
);
/*
* whoever cleared the private bit is responsible
* for the finish_ordered_io
*/
if
(
TestClearPagePrivate2
(
page
))
{
btrfs_finish_ordered_io
(
page
->
mapping
->
host
,
btrfs_finish_ordered_io
(
page
->
mapping
->
host
,
page_start
,
page_end
);
page_start
,
page_end
);
}
btrfs_put_ordered_extent
(
ordered
);
btrfs_put_ordered_extent
(
ordered
);
lock_extent
(
tree
,
page_start
,
page_end
,
GFP_NOFS
);
lock_extent
(
tree
,
page_start
,
page_end
,
GFP_NOFS
);
}
}
clear_extent_bit
(
tree
,
page_start
,
page_end
,
clear_extent_bit
(
tree
,
page_start
,
page_end
,
EXTENT_LOCKED
|
EXTENT_DIRTY
|
EXTENT_DELALLOC
|
EXTENT_LOCKED
|
EXTENT_DIRTY
|
EXTENT_DELALLOC
,
EXTENT_ORDERED
,
1
,
1
,
NULL
,
GFP_NOFS
);
1
,
1
,
GFP_NOFS
);
__btrfs_releasepage
(
page
,
GFP_NOFS
);
__btrfs_releasepage
(
page
,
GFP_NOFS
);
ClearPageChecked
(
page
);
ClearPageChecked
(
page
);
...
@@ -4521,11 +4552,14 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
...
@@ -4521,11 +4552,14 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
}
}
ClearPageChecked
(
page
);
ClearPageChecked
(
page
);
set_page_dirty
(
page
);
set_page_dirty
(
page
);
SetPageUptodate
(
page
);
BTRFS_I
(
inode
)
->
last_trans
=
root
->
fs_info
->
generation
+
1
;
BTRFS_I
(
inode
)
->
last_trans
=
root
->
fs_info
->
generation
+
1
;
unlock_extent
(
io_tree
,
page_start
,
page_end
,
GFP_NOFS
);
unlock_extent
(
io_tree
,
page_start
,
page_end
,
GFP_NOFS
);
out_unlock:
out_unlock:
if
(
!
ret
)
return
VM_FAULT_LOCKED
;
unlock_page
(
page
);
unlock_page
(
page
);
out:
out:
return
ret
;
return
ret
;
...
@@ -5058,6 +5092,8 @@ static int prealloc_file_range(struct btrfs_trans_handle *trans,
...
@@ -5058,6 +5092,8 @@ static int prealloc_file_range(struct btrfs_trans_handle *trans,
0
,
0
,
0
,
0
,
0
,
0
,
BTRFS_FILE_EXTENT_PREALLOC
);
BTRFS_FILE_EXTENT_PREALLOC
);
BUG_ON
(
ret
);
BUG_ON
(
ret
);
btrfs_drop_extent_cache
(
inode
,
cur_offset
,
cur_offset
+
ins
.
offset
-
1
,
0
);
num_bytes
-=
ins
.
offset
;
num_bytes
-=
ins
.
offset
;
cur_offset
+=
ins
.
offset
;
cur_offset
+=
ins
.
offset
;
alloc_hint
=
ins
.
objectid
+
ins
.
offset
;
alloc_hint
=
ins
.
objectid
+
ins
.
offset
;
...
...
fs/btrfs/ioctl.c
View file @
83ebade3
...
@@ -596,9 +596,8 @@ static int btrfs_defrag_file(struct file *file)
...
@@ -596,9 +596,8 @@ static int btrfs_defrag_file(struct file *file)
clear_page_dirty_for_io
(
page
);
clear_page_dirty_for_io
(
page
);
btrfs_set_extent_delalloc
(
inode
,
page_start
,
page_end
);
btrfs_set_extent_delalloc
(
inode
,
page_start
,
page_end
);
unlock_extent
(
io_tree
,
page_start
,
page_end
,
GFP_NOFS
);
set_page_dirty
(
page
);
set_page_dirty
(
page
);
unlock_extent
(
io_tree
,
page_start
,
page_end
,
GFP_NOFS
);
unlock_page
(
page
);
unlock_page
(
page
);
page_cache_release
(
page
);
page_cache_release
(
page
);
balance_dirty_pages_ratelimited_nr
(
inode
->
i_mapping
,
1
);
balance_dirty_pages_ratelimited_nr
(
inode
->
i_mapping
,
1
);
...
@@ -976,7 +975,7 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
...
@@ -976,7 +975,7 @@ static long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
/* punch hole in destination first */
/* punch hole in destination first */
btrfs_drop_extents
(
trans
,
root
,
inode
,
off
,
off
+
len
,
btrfs_drop_extents
(
trans
,
root
,
inode
,
off
,
off
+
len
,
off
+
len
,
0
,
&
hint_byte
);
off
+
len
,
0
,
&
hint_byte
,
1
);
/* clone data */
/* clone data */
key
.
objectid
=
src
->
i_ino
;
key
.
objectid
=
src
->
i_ino
;
...
...
fs/btrfs/ordered-data.c
View file @
83ebade3
...
@@ -159,8 +159,6 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
...
@@ -159,8 +159,6 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
*
*
* len is the length of the extent
* len is the length of the extent
*
*
* This also sets the EXTENT_ORDERED bit on the range in the inode.
*
* The tree is given a single reference on the ordered extent that was
* The tree is given a single reference on the ordered extent that was
* inserted.
* inserted.
*/
*/
...
@@ -181,6 +179,7 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
...
@@ -181,6 +179,7 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
entry
->
start
=
start
;
entry
->
start
=
start
;
entry
->
len
=
len
;
entry
->
len
=
len
;
entry
->
disk_len
=
disk_len
;
entry
->
disk_len
=
disk_len
;
entry
->
bytes_left
=
len
;
entry
->
inode
=
inode
;
entry
->
inode
=
inode
;
if
(
type
!=
BTRFS_ORDERED_IO_DONE
&&
type
!=
BTRFS_ORDERED_COMPLETE
)
if
(
type
!=
BTRFS_ORDERED_IO_DONE
&&
type
!=
BTRFS_ORDERED_COMPLETE
)
set_bit
(
type
,
&
entry
->
flags
);
set_bit
(
type
,
&
entry
->
flags
);
...
@@ -195,9 +194,6 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
...
@@ -195,9 +194,6 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
&
entry
->
rb_node
);
&
entry
->
rb_node
);
BUG_ON
(
node
);
BUG_ON
(
node
);
set_extent_ordered
(
&
BTRFS_I
(
inode
)
->
io_tree
,
file_offset
,
entry_end
(
entry
)
-
1
,
GFP_NOFS
);
spin_lock
(
&
BTRFS_I
(
inode
)
->
root
->
fs_info
->
ordered_extent_lock
);
spin_lock
(
&
BTRFS_I
(
inode
)
->
root
->
fs_info
->
ordered_extent_lock
);
list_add_tail
(
&
entry
->
root_extent_list
,
list_add_tail
(
&
entry
->
root_extent_list
,
&
BTRFS_I
(
inode
)
->
root
->
fs_info
->
ordered_extents
);
&
BTRFS_I
(
inode
)
->
root
->
fs_info
->
ordered_extents
);
...
@@ -241,13 +237,10 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
...
@@ -241,13 +237,10 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
struct
btrfs_ordered_inode_tree
*
tree
;
struct
btrfs_ordered_inode_tree
*
tree
;
struct
rb_node
*
node
;
struct
rb_node
*
node
;
struct
btrfs_ordered_extent
*
entry
;
struct
btrfs_ordered_extent
*
entry
;
struct
extent_io_tree
*
io_tree
=
&
BTRFS_I
(
inode
)
->
io_tree
;
int
ret
;
int
ret
;
tree
=
&
BTRFS_I
(
inode
)
->
ordered_tree
;
tree
=
&
BTRFS_I
(
inode
)
->
ordered_tree
;
mutex_lock
(
&
tree
->
mutex
);
mutex_lock
(
&
tree
->
mutex
);
clear_extent_ordered
(
io_tree
,
file_offset
,
file_offset
+
io_size
-
1
,
GFP_NOFS
);
node
=
tree_search
(
tree
,
file_offset
);
node
=
tree_search
(
tree
,
file_offset
);
if
(
!
node
)
{
if
(
!
node
)
{
ret
=
1
;
ret
=
1
;
...
@@ -260,11 +253,16 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
...
@@ -260,11 +253,16 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
goto
out
;
goto
out
;
}
}
ret
=
test_range_bit
(
io_tree
,
entry
->
file_offset
,
if
(
io_size
>
entry
->
bytes_left
)
{
entry
->
file_offset
+
entry
->
len
-
1
,
printk
(
KERN_CRIT
"bad ordered accounting left %llu size %llu
\n
"
,
EXTENT_ORDERED
,
0
);
(
unsigned
long
long
)
entry
->
bytes_left
,
if
(
ret
==
0
)
(
unsigned
long
long
)
io_size
);
}
entry
->
bytes_left
-=
io_size
;
if
(
entry
->
bytes_left
==
0
)
ret
=
test_and_set_bit
(
BTRFS_ORDERED_IO_DONE
,
&
entry
->
flags
);
ret
=
test_and_set_bit
(
BTRFS_ORDERED_IO_DONE
,
&
entry
->
flags
);
else
ret
=
1
;
out:
out:
mutex_unlock
(
&
tree
->
mutex
);
mutex_unlock
(
&
tree
->
mutex
);
return
ret
==
0
;
return
ret
==
0
;
...
@@ -476,6 +474,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
...
@@ -476,6 +474,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
u64
orig_end
;
u64
orig_end
;
u64
wait_end
;
u64
wait_end
;
struct
btrfs_ordered_extent
*
ordered
;
struct
btrfs_ordered_extent
*
ordered
;
int
found
;
if
(
start
+
len
<
start
)
{
if
(
start
+
len
<
start
)
{
orig_end
=
INT_LIMIT
(
loff_t
);
orig_end
=
INT_LIMIT
(
loff_t
);
...
@@ -502,6 +501,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
...
@@ -502,6 +501,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
orig_end
>>
PAGE_CACHE_SHIFT
);
orig_end
>>
PAGE_CACHE_SHIFT
);
end
=
orig_end
;
end
=
orig_end
;
found
=
0
;
while
(
1
)
{
while
(
1
)
{
ordered
=
btrfs_lookup_first_ordered_extent
(
inode
,
end
);
ordered
=
btrfs_lookup_first_ordered_extent
(
inode
,
end
);
if
(
!
ordered
)
if
(
!
ordered
)
...
@@ -514,6 +514,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
...
@@ -514,6 +514,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
btrfs_put_ordered_extent
(
ordered
);
btrfs_put_ordered_extent
(
ordered
);
break
;
break
;
}
}
found
++
;
btrfs_start_ordered_extent
(
inode
,
ordered
,
1
);
btrfs_start_ordered_extent
(
inode
,
ordered
,
1
);
end
=
ordered
->
file_offset
;
end
=
ordered
->
file_offset
;
btrfs_put_ordered_extent
(
ordered
);
btrfs_put_ordered_extent
(
ordered
);
...
@@ -521,8 +522,8 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
...
@@ -521,8 +522,8 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
break
;
break
;
end
--
;
end
--
;
}
}
if
(
test_range_bit
(
&
BTRFS_I
(
inode
)
->
io_tree
,
start
,
orig_end
,
if
(
found
||
test_range_bit
(
&
BTRFS_I
(
inode
)
->
io_tree
,
start
,
orig_end
,
EXTENT_
ORDERED
|
EXTENT_DELALLOC
,
0
))
{
EXTENT_
DELALLOC
,
0
,
NULL
))
{
schedule_timeout
(
1
);
schedule_timeout
(
1
);
goto
again
;
goto
again
;
}
}
...
@@ -613,7 +614,7 @@ int btrfs_ordered_update_i_size(struct inode *inode,
...
@@ -613,7 +614,7 @@ int btrfs_ordered_update_i_size(struct inode *inode,
*/
*/
if
(
test_range_bit
(
io_tree
,
disk_i_size
,
if
(
test_range_bit
(
io_tree
,
disk_i_size
,
ordered
->
file_offset
+
ordered
->
len
-
1
,
ordered
->
file_offset
+
ordered
->
len
-
1
,
EXTENT_DELALLOC
,
0
))
{
EXTENT_DELALLOC
,
0
,
NULL
))
{
goto
out
;
goto
out
;
}
}
/*
/*
...
@@ -664,7 +665,7 @@ int btrfs_ordered_update_i_size(struct inode *inode,
...
@@ -664,7 +665,7 @@ int btrfs_ordered_update_i_size(struct inode *inode,
*/
*/
if
(
i_size_test
>
entry_end
(
ordered
)
&&
if
(
i_size_test
>
entry_end
(
ordered
)
&&
!
test_range_bit
(
io_tree
,
entry_end
(
ordered
),
i_size_test
-
1
,
!
test_range_bit
(
io_tree
,
entry_end
(
ordered
),
i_size_test
-
1
,
EXTENT_DELALLOC
,
0
))
{
EXTENT_DELALLOC
,
0
,
NULL
))
{
new_i_size
=
min_t
(
u64
,
i_size_test
,
i_size_read
(
inode
));
new_i_size
=
min_t
(
u64
,
i_size_test
,
i_size_read
(
inode
));
}
}
BTRFS_I
(
inode
)
->
disk_i_size
=
new_i_size
;
BTRFS_I
(
inode
)
->
disk_i_size
=
new_i_size
;
...
...
fs/btrfs/ordered-data.h
View file @
83ebade3
...
@@ -85,6 +85,9 @@ struct btrfs_ordered_extent {
...
@@ -85,6 +85,9 @@ struct btrfs_ordered_extent {
/* extent length on disk */
/* extent length on disk */
u64
disk_len
;
u64
disk_len
;
/* number of bytes that still need writing */
u64
bytes_left
;
/* flags (described above) */
/* flags (described above) */
unsigned
long
flags
;
unsigned
long
flags
;
...
...
fs/btrfs/relocation.c
View file @
83ebade3
...
@@ -2180,7 +2180,7 @@ static int tree_block_processed(u64 bytenr, u32 blocksize,
...
@@ -2180,7 +2180,7 @@ static int tree_block_processed(u64 bytenr, u32 blocksize,
struct
reloc_control
*
rc
)
struct
reloc_control
*
rc
)
{
{
if
(
test_range_bit
(
&
rc
->
processed_blocks
,
bytenr
,
if
(
test_range_bit
(
&
rc
->
processed_blocks
,
bytenr
,
bytenr
+
blocksize
-
1
,
EXTENT_DIRTY
,
1
))
bytenr
+
blocksize
-
1
,
EXTENT_DIRTY
,
1
,
NULL
))
return
1
;
return
1
;
return
0
;
return
0
;
}
}
...
@@ -2646,9 +2646,9 @@ int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key)
...
@@ -2646,9 +2646,9 @@ int relocate_data_extent(struct inode *inode, struct btrfs_key *extent_key)
lock_extent
(
&
BTRFS_I
(
inode
)
->
io_tree
,
start
,
end
,
GFP_NOFS
);
lock_extent
(
&
BTRFS_I
(
inode
)
->
io_tree
,
start
,
end
,
GFP_NOFS
);
while
(
1
)
{
while
(
1
)
{
int
ret
;
int
ret
;
spin
_lock
(
&
em_tree
->
lock
);
write
_lock
(
&
em_tree
->
lock
);
ret
=
add_extent_mapping
(
em_tree
,
em
);
ret
=
add_extent_mapping
(
em_tree
,
em
);
spin
_unlock
(
&
em_tree
->
lock
);
write
_unlock
(
&
em_tree
->
lock
);
if
(
ret
!=
-
EEXIST
)
{
if
(
ret
!=
-
EEXIST
)
{
free_extent_map
(
em
);
free_extent_map
(
em
);
break
;
break
;
...
...
fs/btrfs/tree-log.c
View file @
83ebade3
...
@@ -534,7 +534,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
...
@@ -534,7 +534,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
saved_nbytes
=
inode_get_bytes
(
inode
);
saved_nbytes
=
inode_get_bytes
(
inode
);
/* drop any overlapping extents */
/* drop any overlapping extents */
ret
=
btrfs_drop_extents
(
trans
,
root
,
inode
,
ret
=
btrfs_drop_extents
(
trans
,
root
,
inode
,
start
,
extent_end
,
extent_end
,
start
,
&
alloc_hint
);
start
,
extent_end
,
extent_end
,
start
,
&
alloc_hint
,
1
);
BUG_ON
(
ret
);
BUG_ON
(
ret
);
if
(
found_type
==
BTRFS_FILE_EXTENT_REG
||
if
(
found_type
==
BTRFS_FILE_EXTENT_REG
||
...
...
fs/btrfs/volumes.c
View file @
83ebade3
...
@@ -276,7 +276,7 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
...
@@ -276,7 +276,7 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
* is now congested. Back off and let other work structs
* is now congested. Back off and let other work structs
* run instead
* run instead
*/
*/
if
(
pending
&&
bdi_write_congested
(
bdi
)
&&
batch_run
>
32
&&
if
(
pending
&&
bdi_write_congested
(
bdi
)
&&
batch_run
>
8
&&
fs_info
->
fs_devices
->
open_devices
>
1
)
{
fs_info
->
fs_devices
->
open_devices
>
1
)
{
struct
io_context
*
ioc
;
struct
io_context
*
ioc
;
...
@@ -1749,9 +1749,9 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
...
@@ -1749,9 +1749,9 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
* step two, delete the device extents and the
* step two, delete the device extents and the
* chunk tree entries
* chunk tree entries
*/
*/
spin
_lock
(
&
em_tree
->
lock
);
read
_lock
(
&
em_tree
->
lock
);
em
=
lookup_extent_mapping
(
em_tree
,
chunk_offset
,
1
);
em
=
lookup_extent_mapping
(
em_tree
,
chunk_offset
,
1
);
spin
_unlock
(
&
em_tree
->
lock
);
read
_unlock
(
&
em_tree
->
lock
);
BUG_ON
(
em
->
start
>
chunk_offset
||
BUG_ON
(
em
->
start
>
chunk_offset
||
em
->
start
+
em
->
len
<
chunk_offset
);
em
->
start
+
em
->
len
<
chunk_offset
);
...
@@ -1780,9 +1780,9 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
...
@@ -1780,9 +1780,9 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
ret
=
btrfs_remove_block_group
(
trans
,
extent_root
,
chunk_offset
);
ret
=
btrfs_remove_block_group
(
trans
,
extent_root
,
chunk_offset
);
BUG_ON
(
ret
);
BUG_ON
(
ret
);
spin
_lock
(
&
em_tree
->
lock
);
write
_lock
(
&
em_tree
->
lock
);
remove_extent_mapping
(
em_tree
,
em
);
remove_extent_mapping
(
em_tree
,
em
);
spin
_unlock
(
&
em_tree
->
lock
);
write
_unlock
(
&
em_tree
->
lock
);
kfree
(
map
);
kfree
(
map
);
em
->
bdev
=
NULL
;
em
->
bdev
=
NULL
;
...
@@ -2294,9 +2294,9 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
...
@@ -2294,9 +2294,9 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
em
->
block_len
=
em
->
len
;
em
->
block_len
=
em
->
len
;
em_tree
=
&
extent_root
->
fs_info
->
mapping_tree
.
map_tree
;
em_tree
=
&
extent_root
->
fs_info
->
mapping_tree
.
map_tree
;
spin
_lock
(
&
em_tree
->
lock
);
write
_lock
(
&
em_tree
->
lock
);
ret
=
add_extent_mapping
(
em_tree
,
em
);
ret
=
add_extent_mapping
(
em_tree
,
em
);
spin
_unlock
(
&
em_tree
->
lock
);
write
_unlock
(
&
em_tree
->
lock
);
BUG_ON
(
ret
);
BUG_ON
(
ret
);
free_extent_map
(
em
);
free_extent_map
(
em
);
...
@@ -2491,9 +2491,9 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
...
@@ -2491,9 +2491,9 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
int
readonly
=
0
;
int
readonly
=
0
;
int
i
;
int
i
;
spin
_lock
(
&
map_tree
->
map_tree
.
lock
);
read
_lock
(
&
map_tree
->
map_tree
.
lock
);
em
=
lookup_extent_mapping
(
&
map_tree
->
map_tree
,
chunk_offset
,
1
);
em
=
lookup_extent_mapping
(
&
map_tree
->
map_tree
,
chunk_offset
,
1
);
spin
_unlock
(
&
map_tree
->
map_tree
.
lock
);
read
_unlock
(
&
map_tree
->
map_tree
.
lock
);
if
(
!
em
)
if
(
!
em
)
return
1
;
return
1
;
...
@@ -2518,11 +2518,11 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
...
@@ -2518,11 +2518,11 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
struct
extent_map
*
em
;
struct
extent_map
*
em
;
while
(
1
)
{
while
(
1
)
{
spin
_lock
(
&
tree
->
map_tree
.
lock
);
write
_lock
(
&
tree
->
map_tree
.
lock
);
em
=
lookup_extent_mapping
(
&
tree
->
map_tree
,
0
,
(
u64
)
-
1
);
em
=
lookup_extent_mapping
(
&
tree
->
map_tree
,
0
,
(
u64
)
-
1
);
if
(
em
)
if
(
em
)
remove_extent_mapping
(
&
tree
->
map_tree
,
em
);
remove_extent_mapping
(
&
tree
->
map_tree
,
em
);
spin
_unlock
(
&
tree
->
map_tree
.
lock
);
write
_unlock
(
&
tree
->
map_tree
.
lock
);
if
(
!
em
)
if
(
!
em
)
break
;
break
;
kfree
(
em
->
bdev
);
kfree
(
em
->
bdev
);
...
@@ -2540,9 +2540,9 @@ int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
...
@@ -2540,9 +2540,9 @@ int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
struct
extent_map_tree
*
em_tree
=
&
map_tree
->
map_tree
;
struct
extent_map_tree
*
em_tree
=
&
map_tree
->
map_tree
;
int
ret
;
int
ret
;
spin
_lock
(
&
em_tree
->
lock
);
read
_lock
(
&
em_tree
->
lock
);
em
=
lookup_extent_mapping
(
em_tree
,
logical
,
len
);
em
=
lookup_extent_mapping
(
em_tree
,
logical
,
len
);
spin
_unlock
(
&
em_tree
->
lock
);
read
_unlock
(
&
em_tree
->
lock
);
BUG_ON
(
!
em
);
BUG_ON
(
!
em
);
BUG_ON
(
em
->
start
>
logical
||
em
->
start
+
em
->
len
<
logical
);
BUG_ON
(
em
->
start
>
logical
||
em
->
start
+
em
->
len
<
logical
);
...
@@ -2604,9 +2604,9 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
...
@@ -2604,9 +2604,9 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
atomic_set
(
&
multi
->
error
,
0
);
atomic_set
(
&
multi
->
error
,
0
);
}
}
spin
_lock
(
&
em_tree
->
lock
);
read
_lock
(
&
em_tree
->
lock
);
em
=
lookup_extent_mapping
(
em_tree
,
logical
,
*
length
);
em
=
lookup_extent_mapping
(
em_tree
,
logical
,
*
length
);
spin
_unlock
(
&
em_tree
->
lock
);
read
_unlock
(
&
em_tree
->
lock
);
if
(
!
em
&&
unplug_page
)
if
(
!
em
&&
unplug_page
)
return
0
;
return
0
;
...
@@ -2763,9 +2763,9 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
...
@@ -2763,9 +2763,9 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
u64
stripe_nr
;
u64
stripe_nr
;
int
i
,
j
,
nr
=
0
;
int
i
,
j
,
nr
=
0
;
spin
_lock
(
&
em_tree
->
lock
);
read
_lock
(
&
em_tree
->
lock
);
em
=
lookup_extent_mapping
(
em_tree
,
chunk_start
,
1
);
em
=
lookup_extent_mapping
(
em_tree
,
chunk_start
,
1
);
spin
_unlock
(
&
em_tree
->
lock
);
read
_unlock
(
&
em_tree
->
lock
);
BUG_ON
(
!
em
||
em
->
start
!=
chunk_start
);
BUG_ON
(
!
em
||
em
->
start
!=
chunk_start
);
map
=
(
struct
map_lookup
*
)
em
->
bdev
;
map
=
(
struct
map_lookup
*
)
em
->
bdev
;
...
@@ -3053,9 +3053,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
...
@@ -3053,9 +3053,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
logical
=
key
->
offset
;
logical
=
key
->
offset
;
length
=
btrfs_chunk_length
(
leaf
,
chunk
);
length
=
btrfs_chunk_length
(
leaf
,
chunk
);
spin
_lock
(
&
map_tree
->
map_tree
.
lock
);
read
_lock
(
&
map_tree
->
map_tree
.
lock
);
em
=
lookup_extent_mapping
(
&
map_tree
->
map_tree
,
logical
,
1
);
em
=
lookup_extent_mapping
(
&
map_tree
->
map_tree
,
logical
,
1
);
spin
_unlock
(
&
map_tree
->
map_tree
.
lock
);
read
_unlock
(
&
map_tree
->
map_tree
.
lock
);
/* already mapped? */
/* already mapped? */
if
(
em
&&
em
->
start
<=
logical
&&
em
->
start
+
em
->
len
>
logical
)
{
if
(
em
&&
em
->
start
<=
logical
&&
em
->
start
+
em
->
len
>
logical
)
{
...
@@ -3114,9 +3114,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
...
@@ -3114,9 +3114,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
map
->
stripes
[
i
].
dev
->
in_fs_metadata
=
1
;
map
->
stripes
[
i
].
dev
->
in_fs_metadata
=
1
;
}
}
spin
_lock
(
&
map_tree
->
map_tree
.
lock
);
write
_lock
(
&
map_tree
->
map_tree
.
lock
);
ret
=
add_extent_mapping
(
&
map_tree
->
map_tree
,
em
);
ret
=
add_extent_mapping
(
&
map_tree
->
map_tree
,
em
);
spin
_unlock
(
&
map_tree
->
map_tree
.
lock
);
write
_unlock
(
&
map_tree
->
map_tree
.
lock
);
BUG_ON
(
ret
);
BUG_ON
(
ret
);
free_extent_map
(
em
);
free_extent_map
(
em
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment