Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
7f2f12d9
Commit
7f2f12d9
authored
Feb 19, 2010
by
Trond Myklebust
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
NFS: Simplify nfs_wb_page()
Signed-off-by:
Trond Myklebust
<
Trond.Myklebust@netapp.com
>
parent
acdc53b2
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
23 additions
and
98 deletions
+23
-98
fs/nfs/write.c
fs/nfs/write.c
+23
-97
include/linux/nfs_fs.h
include/linux/nfs_fs.h
+0
-1
No files found.
fs/nfs/write.c
View file @
7f2f12d9
...
...
@@ -502,44 +502,6 @@ int nfs_reschedule_unstable_write(struct nfs_page *req)
}
#endif
/*
* Wait for a request to complete.
*
* Interruptible by fatal signals only.
*/
static
int
nfs_wait_on_requests_locked
(
struct
inode
*
inode
,
pgoff_t
idx_start
,
unsigned
int
npages
)
{
struct
nfs_inode
*
nfsi
=
NFS_I
(
inode
);
struct
nfs_page
*
req
;
pgoff_t
idx_end
,
next
;
unsigned
int
res
=
0
;
int
error
;
if
(
npages
==
0
)
idx_end
=
~
0
;
else
idx_end
=
idx_start
+
npages
-
1
;
next
=
idx_start
;
while
(
radix_tree_gang_lookup_tag
(
&
nfsi
->
nfs_page_tree
,
(
void
**
)
&
req
,
next
,
1
,
NFS_PAGE_TAG_LOCKED
))
{
if
(
req
->
wb_index
>
idx_end
)
break
;
next
=
req
->
wb_index
+
1
;
BUG_ON
(
!
NFS_WBACK_BUSY
(
req
));
kref_get
(
&
req
->
wb_kref
);
spin_unlock
(
&
inode
->
i_lock
);
error
=
nfs_wait_on_request
(
req
);
nfs_release_request
(
req
);
spin_lock
(
&
inode
->
i_lock
);
if
(
error
<
0
)
return
error
;
res
++
;
}
return
res
;
}
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
static
int
nfs_need_commit
(
struct
nfs_inode
*
nfsi
)
...
...
@@ -1432,7 +1394,7 @@ static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_contr
return
ret
;
}
#else
static
in
line
int
nfs_commit_list
(
struct
inode
*
inode
,
struct
list_head
*
head
,
int
how
)
static
in
t
nfs_commit_inode
(
struct
inode
*
inode
,
int
how
)
{
return
0
;
}
...
...
@@ -1448,46 +1410,6 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
return
nfs_commit_unstable_pages
(
inode
,
wbc
);
}
long
nfs_sync_mapping_wait
(
struct
address_space
*
mapping
,
struct
writeback_control
*
wbc
,
int
how
)
{
struct
inode
*
inode
=
mapping
->
host
;
pgoff_t
idx_start
,
idx_end
;
unsigned
int
npages
=
0
;
LIST_HEAD
(
head
);
long
pages
,
ret
;
/* FIXME */
if
(
wbc
->
range_cyclic
)
idx_start
=
0
;
else
{
idx_start
=
wbc
->
range_start
>>
PAGE_CACHE_SHIFT
;
idx_end
=
wbc
->
range_end
>>
PAGE_CACHE_SHIFT
;
if
(
idx_end
>
idx_start
)
{
pgoff_t
l_npages
=
1
+
idx_end
-
idx_start
;
npages
=
l_npages
;
if
(
sizeof
(
npages
)
!=
sizeof
(
l_npages
)
&&
(
pgoff_t
)
npages
!=
l_npages
)
npages
=
0
;
}
}
spin_lock
(
&
inode
->
i_lock
);
do
{
ret
=
nfs_wait_on_requests_locked
(
inode
,
idx_start
,
npages
);
if
(
ret
!=
0
)
continue
;
pages
=
nfs_scan_commit
(
inode
,
&
head
,
idx_start
,
npages
);
if
(
pages
==
0
)
break
;
pages
+=
nfs_scan_commit
(
inode
,
&
head
,
0
,
0
);
spin_unlock
(
&
inode
->
i_lock
);
ret
=
nfs_commit_list
(
inode
,
&
head
,
how
);
spin_lock
(
&
inode
->
i_lock
);
}
while
(
ret
>=
0
);
spin_unlock
(
&
inode
->
i_lock
);
return
ret
;
}
/*
* flush the inode to disk.
*/
...
...
@@ -1531,45 +1453,49 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
return
ret
;
}
static
int
nfs_wb_page_priority
(
struct
inode
*
inode
,
struct
page
*
page
,
int
how
)
/*
* Write back all requests on one page - we do this before reading it.
*/
int
nfs_wb_page
(
struct
inode
*
inode
,
struct
page
*
page
)
{
loff_t
range_start
=
page_offset
(
page
);
loff_t
range_end
=
range_start
+
(
loff_t
)(
PAGE_CACHE_SIZE
-
1
);
struct
writeback_control
wbc
=
{
.
bdi
=
page
->
mapping
->
backing_dev_info
,
.
sync_mode
=
WB_SYNC_ALL
,
.
nr_to_write
=
LONG_MAX
,
.
nr_to_write
=
0
,
.
range_start
=
range_start
,
.
range_end
=
range_end
,
};
struct
nfs_page
*
req
;
int
need_commit
;
int
ret
;
do
{
while
(
PagePrivate
(
page
))
{
if
(
clear_page_dirty_for_io
(
page
))
{
ret
=
nfs_writepage_locked
(
page
,
&
wbc
);
if
(
ret
<
0
)
goto
out_error
;
}
else
if
(
!
PagePrivate
(
page
))
}
req
=
nfs_find_and_lock_request
(
page
);
if
(
!
req
)
break
;
ret
=
nfs_sync_mapping_wait
(
page
->
mapping
,
&
wbc
,
how
);
if
(
IS_ERR
(
req
))
{
ret
=
PTR_ERR
(
req
);
goto
out_error
;
}
need_commit
=
test_bit
(
PG_CLEAN
,
&
req
->
wb_flags
);
nfs_clear_page_tag_locked
(
req
);
if
(
need_commit
)
{
ret
=
nfs_commit_inode
(
inode
,
FLUSH_SYNC
);
if
(
ret
<
0
)
goto
out_error
;
}
while
(
PagePrivate
(
page
));
}
}
return
0
;
out_error:
__mark_inode_dirty
(
inode
,
I_DIRTY_PAGES
);
return
ret
;
}
/*
* Write back all requests on one page - we do this before reading it.
*/
int
nfs_wb_page
(
struct
inode
*
inode
,
struct
page
*
page
)
{
return
nfs_wb_page_priority
(
inode
,
page
,
FLUSH_STABLE
);
}
#ifdef CONFIG_MIGRATION
int
nfs_migrate_page
(
struct
address_space
*
mapping
,
struct
page
*
newpage
,
struct
page
*
page
)
...
...
include/linux/nfs_fs.h
View file @
7f2f12d9
...
...
@@ -475,7 +475,6 @@ extern int nfs_writeback_done(struct rpc_task *, struct nfs_write_data *);
* Try to write back everything synchronously (but check the
* return value!)
*/
extern
long
nfs_sync_mapping_wait
(
struct
address_space
*
,
struct
writeback_control
*
,
int
);
extern
int
nfs_wb_all
(
struct
inode
*
inode
);
extern
int
nfs_wb_page
(
struct
inode
*
inode
,
struct
page
*
page
);
extern
int
nfs_wb_page_cancel
(
struct
inode
*
inode
,
struct
page
*
page
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment