Commit c54e4da3 authored by Christoph Hellwig's avatar Christoph Hellwig

[XFS] clarify pagebuf page lookup logic

SGI Modid: xfs-linux:xfs-kern:168168a
parent 1463079f
......@@ -356,165 +356,130 @@ pagebuf_free(
}
/*
* _pagebuf_lookup_pages
*
* _pagebuf_lookup_pages finds all pages which match the buffer
* in question and the range of file offsets supplied,
* and builds the page list for the buffer, if the
* page list is not already formed or if not all of the pages are
* already in the list. Invalid pages (pages which have not yet been
* read in from disk) are assigned for any pages which are not found.
* Finds all pages for buffer in question and builds it's page list.
*/
STATIC int
_pagebuf_lookup_pages(
xfs_buf_t *pb,
struct address_space *aspace,
page_buf_flags_t flags)
xfs_buf_t *bp,
uint flags)
{
loff_t next_buffer_offset;
unsigned long page_count, pi, index;
struct page *page;
struct address_space *mapping = bp->pb_target->pbr_mapping;
size_t blocksize = bp->pb_target->pbr_bsize;
size_t size = bp->pb_count_desired, nbytes;
size_t offset = bp->pb_offset;
int gfp_mask = pb_to_gfp(flags);
int all_mapped, good_pages, nbytes, rval, retries;
unsigned int blocksize, sectorshift;
size_t size, offset;
next_buffer_offset = pb->pb_file_offset + pb->pb_buffer_length;
good_pages = page_count = (page_buf_btoc(next_buffer_offset) -
page_buf_btoct(pb->pb_file_offset));
if (pb->pb_flags & _PBF_ALL_PAGES_MAPPED) {
/* Bring pages forward in cache */
for (pi = 0; pi < page_count; pi++) {
mark_page_accessed(pb->pb_pages[pi]);
}
if ((flags & PBF_MAPPED) && !(pb->pb_flags & PBF_MAPPED)) {
all_mapped = 1;
rval = 0;
goto mapit;
}
return 0;
}
unsigned long page_count, i;
pgoff_t first;
loff_t end;
int error;
/* Ensure pb_pages field has been initialised */
rval = _pagebuf_get_pages(pb, page_count, flags);
if (rval)
return rval;
first = (bp->pb_file_offset - bp->pb_offset) >> PAGE_CACHE_SHIFT;
end = bp->pb_file_offset + bp->pb_buffer_length;
page_count = page_buf_btoc(end) - page_buf_btoct(bp->pb_file_offset);
all_mapped = 1;
blocksize = pb->pb_target->pbr_bsize;
sectorshift = pb->pb_target->pbr_sshift;
size = pb->pb_count_desired;
offset = pb->pb_offset;
/* Enter the pages in the page list */
index = (pb->pb_file_offset - pb->pb_offset) >> PAGE_CACHE_SHIFT;
for (pi = 0; pi < page_count; pi++, index++) {
if (pb->pb_pages[pi] == 0) {
retries = 0;
retry:
page = find_or_create_page(aspace, index, gfp_mask);
if (!page) {
if (flags & PBF_READ_AHEAD)
return -ENOMEM;
/*
* This could deadlock. But until all the
* XFS lowlevel code is revamped to handle
* buffer allocation failures we can't do
* much.
*/
if (!(++retries % 100)) {
printk(KERN_ERR
"possibly deadlocking in %s\n",
__FUNCTION__);
}
XFS_STATS_INC(pb_page_retries);
pagebuf_daemon_wakeup();
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(10);
goto retry;
error = _pagebuf_get_pages(bp, page_count, flags);
if (unlikely(error))
return error;
for (i = 0; i < bp->pb_page_count; i++) {
struct page *page;
uint retries = 0;
retry:
page = find_or_create_page(mapping, first + i, gfp_mask);
if (unlikely(page == NULL)) {
if (flags & PBF_READ_AHEAD)
return -ENOMEM;
/*
* This could deadlock.
*
* But until all the XFS lowlevel code is revamped to
* handle buffer allocation failures we can't do much.
*/
if (!(++retries % 100)) {
printk(KERN_ERR "possibly deadlocking in %s\n",
__FUNCTION__);
}
XFS_STATS_INC(pb_page_found);
mark_page_accessed(page);
pb->pb_pages[pi] = page;
} else {
page = pb->pb_pages[pi];
lock_page(page);
XFS_STATS_INC(pb_page_retries);
pagebuf_daemon_wakeup();
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(10);
goto retry;
}
nbytes = PAGE_CACHE_SIZE - offset;
if (nbytes > size)
nbytes = size;
XFS_STATS_INC(pb_page_found);
nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
size -= nbytes;
offset = 0;
if (!PageUptodate(page)) {
page_count--;
if (blocksize == PAGE_CACHE_SIZE) {
if (flags & PBF_READ)
pb->pb_locked = 1;
good_pages--;
bp->pb_locked = 1;
} else if (!PagePrivate(page)) {
unsigned long i, range;
uint sectorshift = bp->pb_target->pbr_sshift;
ulong range, i;
/*
* In this case page->private holds a bitmap
* of uptodate sectors within the page
*/
ASSERT(blocksize < PAGE_CACHE_SIZE);
range = (offset + nbytes) >> sectorshift;
for (i = offset >> sectorshift; i < range; i++)
if (!test_bit(i, &page->private))
break;
if (i != range)
good_pages--;
} else {
good_pages--;
if (i == range)
page_count++;
}
}
offset = 0;
}
if (!pb->pb_locked) {
for (pi = 0; pi < page_count; pi++) {
if (pb->pb_pages[pi])
unlock_page(pb->pb_pages[pi]);
}
bp->pb_pages[i] = page;
}
pb->pb_flags |= _PBF_PAGECACHE;
mapit:
pb->pb_flags |= _PBF_MEM_ALLOCATED;
if (all_mapped) {
pb->pb_flags |= _PBF_ALL_PAGES_MAPPED;
/* A single page buffer is always mappable */
if (page_count == 1) {
pb->pb_addr = (caddr_t)
page_address(pb->pb_pages[0]) + pb->pb_offset;
pb->pb_flags |= PBF_MAPPED;
} else if (flags & PBF_MAPPED) {
if (as_list_len > 64)
purge_addresses();
pb->pb_addr = vmap(pb->pb_pages, page_count,
VM_MAP, PAGE_KERNEL);
if (pb->pb_addr == NULL)
return -ENOMEM;
pb->pb_addr += pb->pb_offset;
pb->pb_flags |= PBF_MAPPED | _PBF_ADDR_ALLOCATED;
}
}
/* If some pages were found with data in them
* we are not in PBF_NONE state.
*/
if (good_pages != 0) {
pb->pb_flags &= ~(PBF_NONE);
if (good_pages != page_count) {
pb->pb_flags |= PBF_PARTIAL;
}
if (!bp->pb_locked) {
for (i = 0; i < bp->pb_page_count; i++)
unlock_page(bp->pb_pages[i]);
}
PB_TRACE(pb, "lookup_pages", (long)good_pages);
bp->pb_flags &= ~PBF_NONE;
bp->pb_flags |= (_PBF_PAGECACHE|_PBF_MEM_ALLOCATED);
return rval;
/* if some pages aren't uptodate mark that in the buffer */
if (page_count != bp->pb_page_count)
bp->pb_flags |= PBF_PARTIAL;
PB_TRACE(bp, "lookup_pages", (long)page_count);
return error;
}
/*
* Map buffer into kernel address-space if nessecary.
*/
STATIC int
_pagebuf_map_pages(
xfs_buf_t *bp,
uint flags)
{
/* A single page buffer is always mappable */
if (bp->pb_page_count == 1) {
bp->pb_addr = page_address(bp->pb_pages[0]) + bp->pb_offset;
bp->pb_flags |= PBF_MAPPED;
} else if (flags & PBF_MAPPED) {
if (as_list_len > 64)
purge_addresses();
bp->pb_addr = vmap(bp->pb_pages, bp->pb_page_count,
VM_MAP, PAGE_KERNEL);
if (unlikely(bp->pb_addr == NULL))
return -ENOMEM;
bp->pb_addr += bp->pb_offset;
bp->pb_flags |= PBF_MAPPED | _PBF_ADDR_ALLOCATED;
}
return 0;
}
/*
......@@ -621,7 +586,6 @@ _pagebuf_find( /* find buffer for block */
if (pb->pb_flags & PBF_STALE)
pb->pb_flags &= PBF_MAPPED | \
_PBF_ALL_PAGES_MAPPED | \
_PBF_ADDR_ALLOCATED | \
_PBF_MEM_ALLOCATED | \
_PBF_MEM_SLAB;
......@@ -669,29 +633,40 @@ pagebuf_get( /* allocate a buffer */
page_buf_flags_t flags) /* PBF_TRYLOCK */
{
xfs_buf_t *pb, *new_pb;
int error;
int error = 0, i;
new_pb = pagebuf_allocate(flags);
if (unlikely(!new_pb))
return (NULL);
return NULL;
pb = _pagebuf_find(target, ioff, isize, flags, new_pb);
if (pb != new_pb) {
if (pb == new_pb) {
error = _pagebuf_lookup_pages(pb, flags);
if (unlikely(error)) {
printk(KERN_WARNING
"pagebuf_get: failed to lookup pages\n");
goto no_buffer;
}
} else {
pagebuf_deallocate(new_pb);
if (unlikely(!pb))
return (NULL);
if (unlikely(pb == NULL))
return NULL;
}
XFS_STATS_INC(pb_get);
for (i = 0; i < pb->pb_page_count; i++)
mark_page_accessed(pb->pb_pages[i]);
/* fill in any missing pages */
error = _pagebuf_lookup_pages(pb, pb->pb_target->pbr_mapping, flags);
if (unlikely(error)) {
printk(KERN_WARNING
"pagebuf_get: warning, failed to lookup pages\n");
goto no_buffer;
if (!(pb->pb_flags & PBF_MAPPED)) {
error = _pagebuf_map_pages(pb, flags);
if (unlikely(error)) {
printk(KERN_WARNING
"pagebuf_get: failed to map pages\n");
goto no_buffer;
}
}
XFS_STATS_INC(pb_get);
/*
* Always fill in the block number now, the mapped cases can do
* their own overlay of this later.
......
......@@ -84,7 +84,6 @@ typedef enum page_buf_flags_e { /* pb_flags values */
/* flags used only internally */
_PBF_PAGECACHE = (1 << 16), /* backed by pagecache */
_PBF_ALL_PAGES_MAPPED = (1 << 18), /* all pages in range mapped */
_PBF_ADDR_ALLOCATED = (1 << 19), /* pb_addr space was allocated */
_PBF_MEM_ALLOCATED = (1 << 20), /* underlying pages are allocated */
_PBF_MEM_SLAB = (1 << 21), /* underlying pages are slab allocated */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment