Commit e7d236a6 authored by Dave Chinner's avatar Dave Chinner Committed by Dave Chinner

xfs: move page freeing into _xfs_buf_free_pages()

Rather than open coding it just before we call
_xfs_buf_free_pages(). Also, rename the function to
xfs_buf_free_pages() as the leading underscore has no useful
meaning.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
parent 02c51173
...@@ -272,25 +272,30 @@ _xfs_buf_alloc( ...@@ -272,25 +272,30 @@ _xfs_buf_alloc(
return 0; return 0;
} }
/* static void
* Frees b_pages if it was allocated. xfs_buf_free_pages(
*/
STATIC void
_xfs_buf_free_pages(
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
uint i;
ASSERT(bp->b_flags & _XBF_PAGES);
if (xfs_buf_is_vmapped(bp))
vm_unmap_ram(bp->b_addr - bp->b_offset, bp->b_page_count);
for (i = 0; i < bp->b_page_count; i++) {
if (bp->b_pages[i])
__free_page(bp->b_pages[i]);
}
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += bp->b_page_count;
if (bp->b_pages != bp->b_page_array) if (bp->b_pages != bp->b_page_array)
kmem_free(bp->b_pages); kmem_free(bp->b_pages);
bp->b_pages = NULL; bp->b_pages = NULL;
bp->b_flags &= ~_XBF_PAGES;
} }
/*
* Releases the specified buffer.
*
* The modification state of any associated pages is left unchanged.
* The buffer must not be on any hash - use xfs_buf_rele instead for
* hashed and refcounted buffers
*/
static void static void
xfs_buf_free( xfs_buf_free(
struct xfs_buf *bp) struct xfs_buf *bp)
...@@ -299,24 +304,11 @@ xfs_buf_free( ...@@ -299,24 +304,11 @@ xfs_buf_free(
ASSERT(list_empty(&bp->b_lru)); ASSERT(list_empty(&bp->b_lru));
if (bp->b_flags & _XBF_PAGES) { if (bp->b_flags & _XBF_PAGES)
uint i; xfs_buf_free_pages(bp);
else if (bp->b_flags & _XBF_KMEM)
if (xfs_buf_is_vmapped(bp))
vm_unmap_ram(bp->b_addr - bp->b_offset,
bp->b_page_count);
for (i = 0; i < bp->b_page_count; i++) {
struct page *page = bp->b_pages[i];
__free_page(page);
}
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab +=
bp->b_page_count;
} else if (bp->b_flags & _XBF_KMEM)
kmem_free(bp->b_addr); kmem_free(bp->b_addr);
_xfs_buf_free_pages(bp);
xfs_buf_free_maps(bp); xfs_buf_free_maps(bp);
kmem_cache_free(xfs_buf_zone, bp); kmem_cache_free(xfs_buf_zone, bp);
} }
...@@ -361,7 +353,6 @@ xfs_buf_alloc_pages( ...@@ -361,7 +353,6 @@ xfs_buf_alloc_pages(
{ {
gfp_t gfp_mask = xb_to_gfp(flags); gfp_t gfp_mask = xb_to_gfp(flags);
long filled = 0; long filled = 0;
int error;
/* Make sure that we have a page list */ /* Make sure that we have a page list */
bp->b_page_count = page_count; bp->b_page_count = page_count;
...@@ -398,20 +389,14 @@ xfs_buf_alloc_pages( ...@@ -398,20 +389,14 @@ xfs_buf_alloc_pages(
continue; continue;
if (flags & XBF_READ_AHEAD) { if (flags & XBF_READ_AHEAD) {
error = -ENOMEM; xfs_buf_free_pages(bp);
goto out_free_pages; return -ENOMEM;
} }
XFS_STATS_INC(bp->b_mount, xb_page_retries); XFS_STATS_INC(bp->b_mount, xb_page_retries);
congestion_wait(BLK_RW_ASYNC, HZ / 50); congestion_wait(BLK_RW_ASYNC, HZ / 50);
} }
return 0; return 0;
out_free_pages:
while (--filled >= 0)
__free_page(bp->b_pages[filled]);
bp->b_flags &= ~_XBF_PAGES;
return error;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment