Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
c54e4da3
Commit
c54e4da3
authored
Apr 22, 2004
by
Christoph Hellwig
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[XFS] clarify pagebuf page lookup logic
SGI Modid: xfs-linux:xfs-kern:168168a
parent
1463079f
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
114 additions
and
140 deletions
+114
-140
fs/xfs/linux/xfs_buf.c
fs/xfs/linux/xfs_buf.c
+114
-139
fs/xfs/linux/xfs_buf.h
fs/xfs/linux/xfs_buf.h
+0
-1
No files found.
fs/xfs/linux/xfs_buf.c
View file @
c54e4da3
...
@@ -356,165 +356,130 @@ pagebuf_free(
...
@@ -356,165 +356,130 @@ pagebuf_free(
}
}
/*
/*
* _pagebuf_lookup_pages
* Finds all pages for buffer in question and builds it's page list.
*
* _pagebuf_lookup_pages finds all pages which match the buffer
* in question and the range of file offsets supplied,
* and builds the page list for the buffer, if the
* page list is not already formed or if not all of the pages are
* already in the list. Invalid pages (pages which have not yet been
* read in from disk) are assigned for any pages which are not found.
*/
*/
STATIC
int
STATIC
int
_pagebuf_lookup_pages
(
_pagebuf_lookup_pages
(
xfs_buf_t
*
pb
,
xfs_buf_t
*
bp
,
struct
address_space
*
aspace
,
uint
flags
)
page_buf_flags_t
flags
)
{
{
loff_t
next_buffer_offset
;
struct
address_space
*
mapping
=
bp
->
pb_target
->
pbr_mapping
;
unsigned
long
page_count
,
pi
,
index
;
size_t
blocksize
=
bp
->
pb_target
->
pbr_bsize
;
struct
page
*
page
;
size_t
size
=
bp
->
pb_count_desired
,
nbytes
;
size_t
offset
=
bp
->
pb_offset
;
int
gfp_mask
=
pb_to_gfp
(
flags
);
int
gfp_mask
=
pb_to_gfp
(
flags
);
int
all_mapped
,
good_pages
,
nbytes
,
rval
,
retries
;
unsigned
long
page_count
,
i
;
unsigned
int
blocksize
,
sectorshift
;
pgoff_t
first
;
size_t
size
,
offset
;
loff_t
end
;
int
error
;
next_buffer_offset
=
pb
->
pb_file_offset
+
pb
->
pb_buffer_length
;
first
=
(
bp
->
pb_file_offset
-
bp
->
pb_offset
)
>>
PAGE_CACHE_SHIFT
;
good_pages
=
page_count
=
(
page_buf_btoc
(
next_buffer_offset
)
-
end
=
bp
->
pb_file_offset
+
bp
->
pb_buffer_length
;
page_buf_btoct
(
pb
->
pb_file_offset
)
);
page_count
=
page_buf_btoc
(
end
)
-
page_buf_btoct
(
bp
->
pb_file_offset
);
if
(
pb
->
pb_flags
&
_PBF_ALL_PAGES_MAPPED
)
{
error
=
_pagebuf_get_pages
(
bp
,
page_count
,
flags
);
/* Bring pages forward in cache */
if
(
unlikely
(
error
))
for
(
pi
=
0
;
pi
<
page_count
;
pi
++
)
{
return
error
;
mark_page_accessed
(
pb
->
pb_pages
[
pi
]);
}
if
((
flags
&
PBF_MAPPED
)
&&
!
(
pb
->
pb_flags
&
PBF_MAPPED
))
{
all_mapped
=
1
;
rval
=
0
;
goto
mapit
;
}
return
0
;
}
/* Ensure pb_pages field has been initialised */
rval
=
_pagebuf_get_pages
(
pb
,
page_count
,
flags
);
if
(
rval
)
return
rval
;
all_mapped
=
1
;
for
(
i
=
0
;
i
<
bp
->
pb_page_count
;
i
++
)
{
blocksize
=
pb
->
pb_target
->
pbr_bsize
;
struct
page
*
page
;
sectorshift
=
pb
->
pb_target
->
pbr_sshift
;
uint
retries
=
0
;
size
=
pb
->
pb_count_desired
;
offset
=
pb
->
pb_offset
;
/* Enter the pages in the page list */
index
=
(
pb
->
pb_file_offset
-
pb
->
pb_offset
)
>>
PAGE_CACHE_SHIFT
;
for
(
pi
=
0
;
pi
<
page_count
;
pi
++
,
index
++
)
{
if
(
pb
->
pb_pages
[
pi
]
==
0
)
{
retries
=
0
;
retry:
retry:
page
=
find_or_create_page
(
aspace
,
index
,
gfp_mask
);
page
=
find_or_create_page
(
mapping
,
first
+
i
,
gfp_mask
);
if
(
!
page
)
{
if
(
unlikely
(
page
==
NULL
)
)
{
if
(
flags
&
PBF_READ_AHEAD
)
if
(
flags
&
PBF_READ_AHEAD
)
return
-
ENOMEM
;
return
-
ENOMEM
;
/*
/*
* This could deadlock. But until all the
* This could deadlock.
* XFS lowlevel code is revamped to handle
*
* buffer allocation failures we can't d
o
* But until all the XFS lowlevel code is revamped t
o
*
much.
* handle buffer allocation failures we can't do
much.
*/
*/
if
(
!
(
++
retries
%
100
))
{
if
(
!
(
++
retries
%
100
))
{
printk
(
KERN_ERR
printk
(
KERN_ERR
"possibly deadlocking in %s
\n
"
,
"possibly deadlocking in %s
\n
"
,
__FUNCTION__
);
__FUNCTION__
);
}
}
XFS_STATS_INC
(
pb_page_retries
);
XFS_STATS_INC
(
pb_page_retries
);
pagebuf_daemon_wakeup
();
pagebuf_daemon_wakeup
();
current
->
state
=
TASK_UNINTERRUPTIBLE
;
current
->
state
=
TASK_UNINTERRUPTIBLE
;
schedule_timeout
(
10
);
schedule_timeout
(
10
);
goto
retry
;
goto
retry
;
}
}
XFS_STATS_INC
(
pb_page_found
);
XFS_STATS_INC
(
pb_page_found
);
mark_page_accessed
(
page
);
pb
->
pb_pages
[
pi
]
=
page
;
}
else
{
page
=
pb
->
pb_pages
[
pi
];
lock_page
(
page
);
}
nbytes
=
PAGE_CACHE_SIZE
-
offset
;
nbytes
=
min_t
(
size_t
,
size
,
PAGE_CACHE_SIZE
-
offset
);
if
(
nbytes
>
size
)
nbytes
=
size
;
size
-=
nbytes
;
size
-=
nbytes
;
offset
=
0
;
if
(
!
PageUptodate
(
page
))
{
if
(
!
PageUptodate
(
page
))
{
page_count
--
;
if
(
blocksize
==
PAGE_CACHE_SIZE
)
{
if
(
blocksize
==
PAGE_CACHE_SIZE
)
{
if
(
flags
&
PBF_READ
)
if
(
flags
&
PBF_READ
)
pb
->
pb_locked
=
1
;
bp
->
pb_locked
=
1
;
good_pages
--
;
}
else
if
(
!
PagePrivate
(
page
))
{
}
else
if
(
!
PagePrivate
(
page
))
{
unsigned
long
i
,
range
;
uint
sectorshift
=
bp
->
pb_target
->
pbr_sshift
;
ulong
range
,
i
;
/*
/*
* In this case page->private holds a bitmap
* In this case page->private holds a bitmap
* of uptodate sectors within the page
* of uptodate sectors within the page
*/
*/
ASSERT
(
blocksize
<
PAGE_CACHE_SIZE
);
range
=
(
offset
+
nbytes
)
>>
sectorshift
;
range
=
(
offset
+
nbytes
)
>>
sectorshift
;
for
(
i
=
offset
>>
sectorshift
;
i
<
range
;
i
++
)
for
(
i
=
offset
>>
sectorshift
;
i
<
range
;
i
++
)
if
(
!
test_bit
(
i
,
&
page
->
private
))
if
(
!
test_bit
(
i
,
&
page
->
private
))
break
;
break
;
if
(
i
!=
range
)
if
(
i
==
range
)
good_pages
--
;
page_count
++
;
}
else
{
good_pages
--
;
}
}
}
offset
=
0
;
}
}
if
(
!
pb
->
pb_locked
)
{
bp
->
pb_pages
[
i
]
=
page
;
for
(
pi
=
0
;
pi
<
page_count
;
pi
++
)
{
if
(
pb
->
pb_pages
[
pi
])
unlock_page
(
pb
->
pb_pages
[
pi
]);
}
}
if
(
!
bp
->
pb_locked
)
{
for
(
i
=
0
;
i
<
bp
->
pb_page_count
;
i
++
)
unlock_page
(
bp
->
pb_pages
[
i
]);
}
}
pb
->
pb_flags
|=
_PBF_PAGECACHE
;
bp
->
pb_flags
&=
~
PBF_NONE
;
mapit:
bp
->
pb_flags
|=
(
_PBF_PAGECACHE
|
_PBF_MEM_ALLOCATED
);
pb
->
pb_flags
|=
_PBF_MEM_ALLOCATED
;
if
(
all_mapped
)
{
/* if some pages aren't uptodate mark that in the buffer */
pb
->
pb_flags
|=
_PBF_ALL_PAGES_MAPPED
;
if
(
page_count
!=
bp
->
pb_page_count
)
bp
->
pb_flags
|=
PBF_PARTIAL
;
PB_TRACE
(
bp
,
"lookup_pages"
,
(
long
)
page_count
);
return
error
;
}
/*
* Map buffer into kernel address-space if nessecary.
*/
STATIC
int
_pagebuf_map_pages
(
xfs_buf_t
*
bp
,
uint
flags
)
{
/* A single page buffer is always mappable */
/* A single page buffer is always mappable */
if
(
page_count
==
1
)
{
if
(
bp
->
pb_page_count
==
1
)
{
pb
->
pb_addr
=
(
caddr_t
)
bp
->
pb_addr
=
page_address
(
bp
->
pb_pages
[
0
])
+
bp
->
pb_offset
;
page_address
(
pb
->
pb_pages
[
0
])
+
pb
->
pb_offset
;
bp
->
pb_flags
|=
PBF_MAPPED
;
pb
->
pb_flags
|=
PBF_MAPPED
;
}
else
if
(
flags
&
PBF_MAPPED
)
{
}
else
if
(
flags
&
PBF_MAPPED
)
{
if
(
as_list_len
>
64
)
if
(
as_list_len
>
64
)
purge_addresses
();
purge_addresses
();
pb
->
pb_addr
=
vmap
(
pb
->
pb_pages
,
page_count
,
bp
->
pb_addr
=
vmap
(
bp
->
pb_pages
,
bp
->
pb_
page_count
,
VM_MAP
,
PAGE_KERNEL
);
VM_MAP
,
PAGE_KERNEL
);
if
(
pb
->
pb_addr
==
NULL
)
if
(
unlikely
(
bp
->
pb_addr
==
NULL
)
)
return
-
ENOMEM
;
return
-
ENOMEM
;
pb
->
pb_addr
+=
pb
->
pb_offset
;
bp
->
pb_addr
+=
bp
->
pb_offset
;
pb
->
pb_flags
|=
PBF_MAPPED
|
_PBF_ADDR_ALLOCATED
;
bp
->
pb_flags
|=
PBF_MAPPED
|
_PBF_ADDR_ALLOCATED
;
}
}
/* If some pages were found with data in them
* we are not in PBF_NONE state.
*/
if
(
good_pages
!=
0
)
{
pb
->
pb_flags
&=
~
(
PBF_NONE
);
if
(
good_pages
!=
page_count
)
{
pb
->
pb_flags
|=
PBF_PARTIAL
;
}
}
}
PB_TRACE
(
pb
,
"lookup_pages"
,
(
long
)
good_pages
);
return
rval
;
return
0
;
}
}
/*
/*
...
@@ -621,7 +586,6 @@ _pagebuf_find( /* find buffer for block */
...
@@ -621,7 +586,6 @@ _pagebuf_find( /* find buffer for block */
if
(
pb
->
pb_flags
&
PBF_STALE
)
if
(
pb
->
pb_flags
&
PBF_STALE
)
pb
->
pb_flags
&=
PBF_MAPPED
|
\
pb
->
pb_flags
&=
PBF_MAPPED
|
\
_PBF_ALL_PAGES_MAPPED
|
\
_PBF_ADDR_ALLOCATED
|
\
_PBF_ADDR_ALLOCATED
|
\
_PBF_MEM_ALLOCATED
|
\
_PBF_MEM_ALLOCATED
|
\
_PBF_MEM_SLAB
;
_PBF_MEM_SLAB
;
...
@@ -669,28 +633,39 @@ pagebuf_get( /* allocate a buffer */
...
@@ -669,28 +633,39 @@ pagebuf_get( /* allocate a buffer */
page_buf_flags_t
flags
)
/* PBF_TRYLOCK */
page_buf_flags_t
flags
)
/* PBF_TRYLOCK */
{
{
xfs_buf_t
*
pb
,
*
new_pb
;
xfs_buf_t
*
pb
,
*
new_pb
;
int
error
;
int
error
=
0
,
i
;
new_pb
=
pagebuf_allocate
(
flags
);
new_pb
=
pagebuf_allocate
(
flags
);
if
(
unlikely
(
!
new_pb
))
if
(
unlikely
(
!
new_pb
))
return
(
NULL
)
;
return
NULL
;
pb
=
_pagebuf_find
(
target
,
ioff
,
isize
,
flags
,
new_pb
);
pb
=
_pagebuf_find
(
target
,
ioff
,
isize
,
flags
,
new_pb
);
if
(
pb
!=
new_pb
)
{
if
(
pb
==
new_pb
)
{
error
=
_pagebuf_lookup_pages
(
pb
,
flags
);
if
(
unlikely
(
error
))
{
printk
(
KERN_WARNING
"pagebuf_get: failed to lookup pages
\n
"
);
goto
no_buffer
;
}
}
else
{
pagebuf_deallocate
(
new_pb
);
pagebuf_deallocate
(
new_pb
);
if
(
unlikely
(
!
pb
))
if
(
unlikely
(
pb
==
NULL
))
return
(
NULL
)
;
return
NULL
;
}
}
XFS_STATS_INC
(
pb_get
);
for
(
i
=
0
;
i
<
pb
->
pb_page_count
;
i
++
)
mark_page_accessed
(
pb
->
pb_pages
[
i
]);
/* fill in any missing pages */
if
(
!
(
pb
->
pb_flags
&
PBF_MAPPED
))
{
error
=
_pagebuf_lookup_pages
(
pb
,
pb
->
pb_target
->
pbr_mapping
,
flags
);
error
=
_pagebuf_map_pages
(
pb
,
flags
);
if
(
unlikely
(
error
))
{
if
(
unlikely
(
error
))
{
printk
(
KERN_WARNING
printk
(
KERN_WARNING
"pagebuf_get: warning, failed to looku
p pages
\n
"
);
"pagebuf_get: failed to ma
p pages
\n
"
);
goto
no_buffer
;
goto
no_buffer
;
}
}
}
XFS_STATS_INC
(
pb_get
);
/*
/*
* Always fill in the block number now, the mapped cases can do
* Always fill in the block number now, the mapped cases can do
...
...
fs/xfs/linux/xfs_buf.h
View file @
c54e4da3
...
@@ -84,7 +84,6 @@ typedef enum page_buf_flags_e { /* pb_flags values */
...
@@ -84,7 +84,6 @@ typedef enum page_buf_flags_e { /* pb_flags values */
/* flags used only internally */
/* flags used only internally */
_PBF_PAGECACHE
=
(
1
<<
16
),
/* backed by pagecache */
_PBF_PAGECACHE
=
(
1
<<
16
),
/* backed by pagecache */
_PBF_ALL_PAGES_MAPPED
=
(
1
<<
18
),
/* all pages in range mapped */
_PBF_ADDR_ALLOCATED
=
(
1
<<
19
),
/* pb_addr space was allocated */
_PBF_ADDR_ALLOCATED
=
(
1
<<
19
),
/* pb_addr space was allocated */
_PBF_MEM_ALLOCATED
=
(
1
<<
20
),
/* underlying pages are allocated */
_PBF_MEM_ALLOCATED
=
(
1
<<
20
),
/* underlying pages are allocated */
_PBF_MEM_SLAB
=
(
1
<<
21
),
/* underlying pages are slab allocated */
_PBF_MEM_SLAB
=
(
1
<<
21
),
/* underlying pages are slab allocated */
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment