Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
c54e4da3
Commit
c54e4da3
authored
Apr 22, 2004
by
Christoph Hellwig
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[XFS] clarify pagebuf page lookup logic
SGI Modid: xfs-linux:xfs-kern:168168a
parent
1463079f
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
114 additions
and
140 deletions
+114
-140
fs/xfs/linux/xfs_buf.c
fs/xfs/linux/xfs_buf.c
+114
-139
fs/xfs/linux/xfs_buf.h
fs/xfs/linux/xfs_buf.h
+0
-1
No files found.
fs/xfs/linux/xfs_buf.c
View file @
c54e4da3
...
@@ -356,165 +356,130 @@ pagebuf_free(
...
@@ -356,165 +356,130 @@ pagebuf_free(
}
}
/*
/*
* _pagebuf_lookup_pages
* Finds all pages for buffer in question and builds it's page list.
*
* _pagebuf_lookup_pages finds all pages which match the buffer
* in question and the range of file offsets supplied,
* and builds the page list for the buffer, if the
* page list is not already formed or if not all of the pages are
* already in the list. Invalid pages (pages which have not yet been
* read in from disk) are assigned for any pages which are not found.
*/
*/
STATIC
int
STATIC
int
_pagebuf_lookup_pages
(
_pagebuf_lookup_pages
(
xfs_buf_t
*
pb
,
xfs_buf_t
*
bp
,
struct
address_space
*
aspace
,
uint
flags
)
page_buf_flags_t
flags
)
{
{
loff_t
next_buffer_offset
;
struct
address_space
*
mapping
=
bp
->
pb_target
->
pbr_mapping
;
unsigned
long
page_count
,
pi
,
index
;
size_t
blocksize
=
bp
->
pb_target
->
pbr_bsize
;
struct
page
*
page
;
size_t
size
=
bp
->
pb_count_desired
,
nbytes
;
size_t
offset
=
bp
->
pb_offset
;
int
gfp_mask
=
pb_to_gfp
(
flags
);
int
gfp_mask
=
pb_to_gfp
(
flags
);
int
all_mapped
,
good_pages
,
nbytes
,
rval
,
retries
;
unsigned
long
page_count
,
i
;
unsigned
int
blocksize
,
sectorshift
;
pgoff_t
first
;
size_t
size
,
offset
;
loff_t
end
;
int
error
;
next_buffer_offset
=
pb
->
pb_file_offset
+
pb
->
pb_buffer_length
;
good_pages
=
page_count
=
(
page_buf_btoc
(
next_buffer_offset
)
-
page_buf_btoct
(
pb
->
pb_file_offset
));
if
(
pb
->
pb_flags
&
_PBF_ALL_PAGES_MAPPED
)
{
/* Bring pages forward in cache */
for
(
pi
=
0
;
pi
<
page_count
;
pi
++
)
{
mark_page_accessed
(
pb
->
pb_pages
[
pi
]);
}
if
((
flags
&
PBF_MAPPED
)
&&
!
(
pb
->
pb_flags
&
PBF_MAPPED
))
{
all_mapped
=
1
;
rval
=
0
;
goto
mapit
;
}
return
0
;
}
/* Ensure pb_pages field has been initialised */
first
=
(
bp
->
pb_file_offset
-
bp
->
pb_offset
)
>>
PAGE_CACHE_SHIFT
;
rval
=
_pagebuf_get_pages
(
pb
,
page_count
,
flags
);
end
=
bp
->
pb_file_offset
+
bp
->
pb_buffer_length
;
if
(
rval
)
page_count
=
page_buf_btoc
(
end
)
-
page_buf_btoct
(
bp
->
pb_file_offset
);
return
rval
;
all_mapped
=
1
;
error
=
_pagebuf_get_pages
(
bp
,
page_count
,
flags
);
blocksize
=
pb
->
pb_target
->
pbr_bsize
;
if
(
unlikely
(
error
))
sectorshift
=
pb
->
pb_target
->
pbr_sshift
;
return
error
;
size
=
pb
->
pb_count_desired
;
offset
=
pb
->
pb_offset
;
for
(
i
=
0
;
i
<
bp
->
pb_page_count
;
i
++
)
{
struct
page
*
page
;
/* Enter the pages in the page list */
uint
retries
=
0
;
index
=
(
pb
->
pb_file_offset
-
pb
->
pb_offset
)
>>
PAGE_CACHE_SHIFT
;
for
(
pi
=
0
;
pi
<
page_count
;
pi
++
,
index
++
)
{
retry:
if
(
pb
->
pb_pages
[
pi
]
==
0
)
{
page
=
find_or_create_page
(
mapping
,
first
+
i
,
gfp_mask
);
retries
=
0
;
if
(
unlikely
(
page
==
NULL
))
{
retry:
if
(
flags
&
PBF_READ_AHEAD
)
page
=
find_or_create_page
(
aspace
,
index
,
gfp_mask
);
return
-
ENOMEM
;
if
(
!
page
)
{
if
(
flags
&
PBF_READ_AHEAD
)
/*
return
-
ENOMEM
;
* This could deadlock.
/*
*
* This could deadlock. But until all the
* But until all the XFS lowlevel code is revamped to
* XFS lowlevel code is revamped to handle
* handle buffer allocation failures we can't do much.
* buffer allocation failures we can't do
*/
* much.
if
(
!
(
++
retries
%
100
))
{
*/
printk
(
KERN_ERR
"possibly deadlocking in %s
\n
"
,
if
(
!
(
++
retries
%
100
))
{
__FUNCTION__
);
printk
(
KERN_ERR
"possibly deadlocking in %s
\n
"
,
__FUNCTION__
);
}
XFS_STATS_INC
(
pb_page_retries
);
pagebuf_daemon_wakeup
();
current
->
state
=
TASK_UNINTERRUPTIBLE
;
schedule_timeout
(
10
);
goto
retry
;
}
}
XFS_STATS_INC
(
pb_page_found
);
mark_page_accessed
(
page
);
XFS_STATS_INC
(
pb_page_retries
);
p
b
->
pb_pages
[
pi
]
=
page
;
p
agebuf_daemon_wakeup
()
;
}
else
{
current
->
state
=
TASK_UNINTERRUPTIBLE
;
page
=
pb
->
pb_pages
[
pi
]
;
schedule_timeout
(
10
)
;
lock_page
(
page
)
;
goto
retry
;
}
}
nbytes
=
PAGE_CACHE_SIZE
-
offset
;
XFS_STATS_INC
(
pb_page_found
)
;
if
(
nbytes
>
size
)
nbytes
=
size
;
nbytes
=
min_t
(
size_t
,
size
,
PAGE_CACHE_SIZE
-
offset
)
;
size
-=
nbytes
;
size
-=
nbytes
;
offset
=
0
;
if
(
!
PageUptodate
(
page
))
{
if
(
!
PageUptodate
(
page
))
{
page_count
--
;
if
(
blocksize
==
PAGE_CACHE_SIZE
)
{
if
(
blocksize
==
PAGE_CACHE_SIZE
)
{
if
(
flags
&
PBF_READ
)
if
(
flags
&
PBF_READ
)
pb
->
pb_locked
=
1
;
bp
->
pb_locked
=
1
;
good_pages
--
;
}
else
if
(
!
PagePrivate
(
page
))
{
}
else
if
(
!
PagePrivate
(
page
))
{
unsigned
long
i
,
range
;
uint
sectorshift
=
bp
->
pb_target
->
pbr_sshift
;
ulong
range
,
i
;
/*
/*
* In this case page->private holds a bitmap
* In this case page->private holds a bitmap
* of uptodate sectors within the page
* of uptodate sectors within the page
*/
*/
ASSERT
(
blocksize
<
PAGE_CACHE_SIZE
);
range
=
(
offset
+
nbytes
)
>>
sectorshift
;
range
=
(
offset
+
nbytes
)
>>
sectorshift
;
for
(
i
=
offset
>>
sectorshift
;
i
<
range
;
i
++
)
for
(
i
=
offset
>>
sectorshift
;
i
<
range
;
i
++
)
if
(
!
test_bit
(
i
,
&
page
->
private
))
if
(
!
test_bit
(
i
,
&
page
->
private
))
break
;
break
;
if
(
i
!=
range
)
if
(
i
==
range
)
good_pages
--
;
page_count
++
;
}
else
{
good_pages
--
;
}
}
}
}
offset
=
0
;
}
if
(
!
pb
->
pb_locked
)
{
bp
->
pb_pages
[
i
]
=
page
;
for
(
pi
=
0
;
pi
<
page_count
;
pi
++
)
{
if
(
pb
->
pb_pages
[
pi
])
unlock_page
(
pb
->
pb_pages
[
pi
]);
}
}
}
pb
->
pb_flags
|=
_PBF_PAGECACHE
;
if
(
!
bp
->
pb_locked
)
{
mapit:
for
(
i
=
0
;
i
<
bp
->
pb_page_count
;
i
++
)
pb
->
pb_flags
|=
_PBF_MEM_ALLOCATED
;
unlock_page
(
bp
->
pb_pages
[
i
]);
if
(
all_mapped
)
{
pb
->
pb_flags
|=
_PBF_ALL_PAGES_MAPPED
;
/* A single page buffer is always mappable */
if
(
page_count
==
1
)
{
pb
->
pb_addr
=
(
caddr_t
)
page_address
(
pb
->
pb_pages
[
0
])
+
pb
->
pb_offset
;
pb
->
pb_flags
|=
PBF_MAPPED
;
}
else
if
(
flags
&
PBF_MAPPED
)
{
if
(
as_list_len
>
64
)
purge_addresses
();
pb
->
pb_addr
=
vmap
(
pb
->
pb_pages
,
page_count
,
VM_MAP
,
PAGE_KERNEL
);
if
(
pb
->
pb_addr
==
NULL
)
return
-
ENOMEM
;
pb
->
pb_addr
+=
pb
->
pb_offset
;
pb
->
pb_flags
|=
PBF_MAPPED
|
_PBF_ADDR_ALLOCATED
;
}
}
/* If some pages were found with data in them
* we are not in PBF_NONE state.
*/
if
(
good_pages
!=
0
)
{
pb
->
pb_flags
&=
~
(
PBF_NONE
);
if
(
good_pages
!=
page_count
)
{
pb
->
pb_flags
|=
PBF_PARTIAL
;
}
}
}
PB_TRACE
(
pb
,
"lookup_pages"
,
(
long
)
good_pages
);
bp
->
pb_flags
&=
~
PBF_NONE
;
bp
->
pb_flags
|=
(
_PBF_PAGECACHE
|
_PBF_MEM_ALLOCATED
);
return
rval
;
/* if some pages aren't uptodate mark that in the buffer */
if
(
page_count
!=
bp
->
pb_page_count
)
bp
->
pb_flags
|=
PBF_PARTIAL
;
PB_TRACE
(
bp
,
"lookup_pages"
,
(
long
)
page_count
);
return
error
;
}
/*
* Map buffer into kernel address-space if nessecary.
*/
STATIC
int
_pagebuf_map_pages
(
xfs_buf_t
*
bp
,
uint
flags
)
{
/* A single page buffer is always mappable */
if
(
bp
->
pb_page_count
==
1
)
{
bp
->
pb_addr
=
page_address
(
bp
->
pb_pages
[
0
])
+
bp
->
pb_offset
;
bp
->
pb_flags
|=
PBF_MAPPED
;
}
else
if
(
flags
&
PBF_MAPPED
)
{
if
(
as_list_len
>
64
)
purge_addresses
();
bp
->
pb_addr
=
vmap
(
bp
->
pb_pages
,
bp
->
pb_page_count
,
VM_MAP
,
PAGE_KERNEL
);
if
(
unlikely
(
bp
->
pb_addr
==
NULL
))
return
-
ENOMEM
;
bp
->
pb_addr
+=
bp
->
pb_offset
;
bp
->
pb_flags
|=
PBF_MAPPED
|
_PBF_ADDR_ALLOCATED
;
}
return
0
;
}
}
/*
/*
...
@@ -621,7 +586,6 @@ _pagebuf_find( /* find buffer for block */
...
@@ -621,7 +586,6 @@ _pagebuf_find( /* find buffer for block */
if
(
pb
->
pb_flags
&
PBF_STALE
)
if
(
pb
->
pb_flags
&
PBF_STALE
)
pb
->
pb_flags
&=
PBF_MAPPED
|
\
pb
->
pb_flags
&=
PBF_MAPPED
|
\
_PBF_ALL_PAGES_MAPPED
|
\
_PBF_ADDR_ALLOCATED
|
\
_PBF_ADDR_ALLOCATED
|
\
_PBF_MEM_ALLOCATED
|
\
_PBF_MEM_ALLOCATED
|
\
_PBF_MEM_SLAB
;
_PBF_MEM_SLAB
;
...
@@ -669,29 +633,40 @@ pagebuf_get( /* allocate a buffer */
...
@@ -669,29 +633,40 @@ pagebuf_get( /* allocate a buffer */
page_buf_flags_t
flags
)
/* PBF_TRYLOCK */
page_buf_flags_t
flags
)
/* PBF_TRYLOCK */
{
{
xfs_buf_t
*
pb
,
*
new_pb
;
xfs_buf_t
*
pb
,
*
new_pb
;
int
error
;
int
error
=
0
,
i
;
new_pb
=
pagebuf_allocate
(
flags
);
new_pb
=
pagebuf_allocate
(
flags
);
if
(
unlikely
(
!
new_pb
))
if
(
unlikely
(
!
new_pb
))
return
(
NULL
)
;
return
NULL
;
pb
=
_pagebuf_find
(
target
,
ioff
,
isize
,
flags
,
new_pb
);
pb
=
_pagebuf_find
(
target
,
ioff
,
isize
,
flags
,
new_pb
);
if
(
pb
!=
new_pb
)
{
if
(
pb
==
new_pb
)
{
error
=
_pagebuf_lookup_pages
(
pb
,
flags
);
if
(
unlikely
(
error
))
{
printk
(
KERN_WARNING
"pagebuf_get: failed to lookup pages
\n
"
);
goto
no_buffer
;
}
}
else
{
pagebuf_deallocate
(
new_pb
);
pagebuf_deallocate
(
new_pb
);
if
(
unlikely
(
!
pb
))
if
(
unlikely
(
pb
==
NULL
))
return
(
NULL
)
;
return
NULL
;
}
}
XFS_STATS_INC
(
pb_get
);
for
(
i
=
0
;
i
<
pb
->
pb_page_count
;
i
++
)
mark_page_accessed
(
pb
->
pb_pages
[
i
]);
/* fill in any missing pages */
if
(
!
(
pb
->
pb_flags
&
PBF_MAPPED
))
{
error
=
_pagebuf_lookup_pages
(
pb
,
pb
->
pb_target
->
pbr_mapping
,
flags
);
error
=
_pagebuf_map_pages
(
pb
,
flags
);
if
(
unlikely
(
error
))
{
if
(
unlikely
(
error
))
{
printk
(
KERN_WARNING
printk
(
KERN_WARNING
"pagebuf_get: warning, failed to lookup pages
\n
"
);
"pagebuf_get: failed to map pages
\n
"
);
goto
no_buffer
;
goto
no_buffer
;
}
}
}
XFS_STATS_INC
(
pb_get
);
/*
/*
* Always fill in the block number now, the mapped cases can do
* Always fill in the block number now, the mapped cases can do
* their own overlay of this later.
* their own overlay of this later.
...
...
fs/xfs/linux/xfs_buf.h
View file @
c54e4da3
...
@@ -84,7 +84,6 @@ typedef enum page_buf_flags_e { /* pb_flags values */
...
@@ -84,7 +84,6 @@ typedef enum page_buf_flags_e { /* pb_flags values */
/* flags used only internally */
/* flags used only internally */
_PBF_PAGECACHE
=
(
1
<<
16
),
/* backed by pagecache */
_PBF_PAGECACHE
=
(
1
<<
16
),
/* backed by pagecache */
_PBF_ALL_PAGES_MAPPED
=
(
1
<<
18
),
/* all pages in range mapped */
_PBF_ADDR_ALLOCATED
=
(
1
<<
19
),
/* pb_addr space was allocated */
_PBF_ADDR_ALLOCATED
=
(
1
<<
19
),
/* pb_addr space was allocated */
_PBF_MEM_ALLOCATED
=
(
1
<<
20
),
/* underlying pages are allocated */
_PBF_MEM_ALLOCATED
=
(
1
<<
20
),
/* underlying pages are allocated */
_PBF_MEM_SLAB
=
(
1
<<
21
),
/* underlying pages are slab allocated */
_PBF_MEM_SLAB
=
(
1
<<
21
),
/* underlying pages are slab allocated */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment