Commit 02c51173 authored by Dave Chinner's avatar Dave Chinner Committed by Dave Chinner

xfs: merge _xfs_buf_get_pages()

Only called from one place now, so merge it into
xfs_buf_alloc_pages(). Because page array allocation is dependent on
bp->b_pages being null, always ensure that when the pages array is
freed we always set bp->b_pages to null.

Also convert the page array to use kmalloc() rather than
kmem_alloc() so we can use the gfp flags we've already calculated
for the allocation context instead of hard coding KM_NOFS semantics.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
parent c9fa5630
...@@ -272,31 +272,6 @@ _xfs_buf_alloc( ...@@ -272,31 +272,6 @@ _xfs_buf_alloc(
return 0; return 0;
} }
/*
* Allocate a page array capable of holding a specified number
* of pages, and point the page buf at it.
*/
STATIC int
_xfs_buf_get_pages(
struct xfs_buf *bp,
int page_count)
{
/* Make sure that we have a page list */
if (bp->b_pages == NULL) {
bp->b_page_count = page_count;
if (page_count <= XB_PAGES) {
bp->b_pages = bp->b_page_array;
} else {
bp->b_pages = kmem_alloc(sizeof(struct page *) *
page_count, KM_NOFS);
if (bp->b_pages == NULL)
return -ENOMEM;
}
memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
}
return 0;
}
/* /*
* Frees b_pages if it was allocated. * Frees b_pages if it was allocated.
*/ */
...@@ -304,10 +279,9 @@ STATIC void ...@@ -304,10 +279,9 @@ STATIC void
_xfs_buf_free_pages( _xfs_buf_free_pages(
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
if (bp->b_pages != bp->b_page_array) { if (bp->b_pages != bp->b_page_array)
kmem_free(bp->b_pages); kmem_free(bp->b_pages);
bp->b_pages = NULL; bp->b_pages = NULL;
}
} }
/* /*
...@@ -389,16 +363,22 @@ xfs_buf_alloc_pages( ...@@ -389,16 +363,22 @@ xfs_buf_alloc_pages(
long filled = 0; long filled = 0;
int error; int error;
/* Make sure that we have a page list */
bp->b_page_count = page_count;
if (bp->b_page_count <= XB_PAGES) {
bp->b_pages = bp->b_page_array;
} else {
bp->b_pages = kzalloc(sizeof(struct page *) * bp->b_page_count,
gfp_mask);
if (!bp->b_pages)
return -ENOMEM;
}
bp->b_flags |= _XBF_PAGES;
/* Assure zeroed buffer for non-read cases. */ /* Assure zeroed buffer for non-read cases. */
if (!(flags & XBF_READ)) if (!(flags & XBF_READ))
gfp_mask |= __GFP_ZERO; gfp_mask |= __GFP_ZERO;
error = _xfs_buf_get_pages(bp, page_count);
if (unlikely(error))
return error;
bp->b_flags |= _XBF_PAGES;
/* /*
* Bulk filling of pages can take multiple calls. Not filling the entire * Bulk filling of pages can take multiple calls. Not filling the entire
* array is not an allocation failure, so don't back off if we get at * array is not an allocation failure, so don't back off if we get at
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment