Commit 822e10da authored by Nathan Scott's avatar Nathan Scott Committed by Christoph Hellwig

[XFS] cleanup pagebuf flag usage and simplify pagebuf_free.

SGI Modid: xfs-linux:xfs-kern:169276a
parent 63c642fd
......@@ -165,8 +165,6 @@ _bhash(
* Mapping of multi-page buffers into contiguous virtual space
*/
STATIC void *pagebuf_mapout_locked(xfs_buf_t *);
typedef struct a_list {
void *vm_addr;
struct a_list *next;
......@@ -288,67 +286,53 @@ _pagebuf_get_pages(
}
/*
* Walk a pagebuf releasing all the pages contained within it.
* Frees pb_pages if it was malloced.
*/
STATIC inline void
_pagebuf_freepages(
xfs_buf_t *pb)
STATIC void
_pagebuf_free_pages(
xfs_buf_t *bp)
{
int buf_index;
for (buf_index = 0; buf_index < pb->pb_page_count; buf_index++) {
struct page *page = pb->pb_pages[buf_index];
if (page) {
pb->pb_pages[buf_index] = NULL;
page_cache_release(page);
}
if (bp->pb_pages != bp->pb_page_array) {
kmem_free(bp->pb_pages,
bp->pb_page_count * sizeof(struct page *));
}
}
/*
* pagebuf_free
* Releases the specified buffer.
*
* pagebuf_free releases the specified buffer. The modification
* state of any associated pages is left unchanged.
* The modification state of any associated pages is left unchanged.
* The buffer most not be on any hash - use pagebuf_rele instead for
* hashed and refcounted buffers
*/
void
pagebuf_free(
xfs_buf_t *pb)
xfs_buf_t *bp)
{
PB_TRACE(pb, "free", 0);
ASSERT(list_empty(&pb->pb_hash_list));
/* release any virtual mapping */ ;
if (pb->pb_flags & _PBF_ADDR_ALLOCATED) {
void *vaddr = pagebuf_mapout_locked(pb);
if (vaddr) {
free_address(vaddr);
}
}
PB_TRACE(bp, "free", 0);
if (pb->pb_flags & _PBF_MEM_ALLOCATED) {
if (pb->pb_pages) {
if (pb->pb_flags & _PBF_MEM_SLAB) {
/*
* XXX: bp->pb_count_desired might be incorrect
* (see pagebuf_associate_memory for details),
* but fortunately the Linux version of
* kmem_free ignores the len argument..
*/
kmem_free(pb->pb_addr, pb->pb_count_desired);
} else {
_pagebuf_freepages(pb);
}
if (pb->pb_pages != pb->pb_page_array)
kfree(pb->pb_pages);
pb->pb_pages = NULL;
}
pb->pb_flags &= ~(_PBF_MEM_ALLOCATED|_PBF_MEM_SLAB);
ASSERT(list_empty(&bp->pb_hash_list));
if (bp->pb_flags & _PBF_PAGE_CACHE) {
uint i;
if ((bp->pb_flags & PBF_MAPPED) && (bp->pb_page_count > 1))
free_address(bp->pb_addr - bp->pb_offset);
for (i = 0; i < bp->pb_page_count; i++)
page_cache_release(bp->pb_pages[i]);
_pagebuf_free_pages(bp);
} else if (bp->pb_flags & _PBF_KMEM_ALLOC) {
/*
* XXX(hch): bp->pb_count_desired might be incorrect (see
* pagebuf_associate_memory for details), but fortunately
* the Linux version of kmem_free ignores the len argument..
*/
kmem_free(bp->pb_addr, bp->pb_count_desired);
_pagebuf_free_pages(bp);
}
pagebuf_deallocate(pb);
pagebuf_deallocate(bp);
}
/*
......@@ -443,7 +427,7 @@ _pagebuf_lookup_pages(
unlock_page(bp->pb_pages[i]);
}
bp->pb_flags |= (_PBF_PAGECACHE|_PBF_MEM_ALLOCATED);
bp->pb_flags |= _PBF_PAGE_CACHE;
if (page_count) {
/* if we have any uptodate pages, mark that in the buffer */
......@@ -478,7 +462,7 @@ _pagebuf_map_pages(
if (unlikely(bp->pb_addr == NULL))
return -ENOMEM;
bp->pb_addr += bp->pb_offset;
bp->pb_flags |= PBF_MAPPED | _PBF_ADDR_ALLOCATED;
bp->pb_flags |= PBF_MAPPED;
}
return 0;
......@@ -584,10 +568,7 @@ _pagebuf_find( /* find buffer for block */
}
if (pb->pb_flags & PBF_STALE)
pb->pb_flags &= PBF_MAPPED | \
_PBF_ADDR_ALLOCATED | \
_PBF_MEM_ALLOCATED | \
_PBF_MEM_SLAB;
pb->pb_flags &= PBF_MAPPED;
PB_TRACE(pb, "got_lock", 0);
XFS_STATS_INC(pb_get_locked);
return (pb);
......@@ -789,9 +770,9 @@ pagebuf_associate_memory(
page_count++;
/* Free any previous set of page pointers */
if (pb->pb_pages && (pb->pb_pages != pb->pb_page_array)) {
kfree(pb->pb_pages);
}
if (pb->pb_pages)
_pagebuf_free_pages(pb);
pb->pb_pages = NULL;
pb->pb_addr = mem;
......@@ -856,7 +837,7 @@ pagebuf_get_no_daddr(
error = pagebuf_associate_memory(bp, data, len);
if (error)
goto fail_free_mem;
bp->pb_flags |= (_PBF_MEM_ALLOCATED | _PBF_MEM_SLAB);
bp->pb_flags |= _PBF_KMEM_ALLOC;
pagebuf_unlock(bp);
......@@ -1189,9 +1170,9 @@ pagebuf_iostart( /* start I/O on a buffer */
}
pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC | PBF_DELWRI | \
PBF_READ_AHEAD | PBF_RUN_QUEUES);
PBF_READ_AHEAD | _PBF_RUN_QUEUES);
pb->pb_flags |= flags & (PBF_READ | PBF_WRITE | PBF_ASYNC | \
PBF_READ_AHEAD | PBF_RUN_QUEUES);
PBF_READ_AHEAD | _PBF_RUN_QUEUES);
BUG_ON(pb->pb_bn == XFS_BUF_DADDR_NULL);
......@@ -1378,8 +1359,8 @@ _pagebuf_ioapply(
pagebuf_ioerror(pb, EIO);
}
if (pb->pb_flags & PBF_RUN_QUEUES) {
pb->pb_flags &= ~PBF_RUN_QUEUES;
if (pb->pb_flags & _PBF_RUN_QUEUES) {
pb->pb_flags &= ~_PBF_RUN_QUEUES;
if (atomic_read(&pb->pb_io_remaining) > 1)
blk_run_address_space(pb->pb_target->pbr_mapping);
}
......@@ -1436,25 +1417,6 @@ pagebuf_iowait(
return pb->pb_error;
}
STATIC void *
pagebuf_mapout_locked(
xfs_buf_t *pb)
{
void *old_addr = NULL;
if (pb->pb_flags & PBF_MAPPED) {
if (pb->pb_flags & _PBF_ADDR_ALLOCATED)
old_addr = pb->pb_addr - pb->pb_offset;
pb->pb_addr = NULL;
pb->pb_flags &= ~(PBF_MAPPED | _PBF_ADDR_ALLOCATED);
}
return old_addr; /* Caller must free the address space,
* we are under a spin lock, probably
* not safe to do vfree here
*/
}
caddr_t
pagebuf_offset(
xfs_buf_t *pb,
......
......@@ -73,26 +73,22 @@ typedef enum page_buf_flags_e { /* pb_flags values */
PBF_ASYNC = (1 << 4), /* initiator will not wait for completion */
PBF_NONE = (1 << 5), /* buffer not read at all */
PBF_DELWRI = (1 << 6), /* buffer has dirty pages */
PBF_STALE = (1 << 10), /* buffer has been staled, do not find it */
PBF_FS_MANAGED = (1 << 11), /* filesystem controls freeing memory */
PBF_FS_DATAIOD = (1 << 12), /* schedule IO completion on fs datad */
PBF_STALE = (1 << 7), /* buffer has been staled, do not find it */
PBF_FS_MANAGED = (1 << 8), /* filesystem controls freeing memory */
PBF_FS_DATAIOD = (1 << 9), /* schedule IO completion on fs datad */
PBF_FORCEIO = (1 << 10), /* ignore any cache state */
PBF_FLUSH = (1 << 11), /* flush disk write cache */
PBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead */
/* flags used only as arguments to access routines */
PBF_LOCK = (1 << 13), /* lock requested */
PBF_TRYLOCK = (1 << 14), /* lock requested, but do not wait */
PBF_DONT_BLOCK = (1 << 15), /* do not block in current thread */
PBF_LOCK = (1 << 14), /* lock requested */
PBF_TRYLOCK = (1 << 15), /* lock requested, but do not wait */
PBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */
/* flags used only internally */
_PBF_PAGECACHE = (1 << 16), /* backed by pagecache */
_PBF_ADDR_ALLOCATED = (1 << 19), /* pb_addr space was allocated */
_PBF_MEM_ALLOCATED = (1 << 20), /* underlying pages are allocated */
_PBF_MEM_SLAB = (1 << 21), /* underlying pages are slab allocated */
PBF_FORCEIO = (1 << 22), /* ignore any cache state */
PBF_FLUSH = (1 << 23), /* flush disk write cache */
PBF_READ_AHEAD = (1 << 24), /* asynchronous read-ahead */
PBF_RUN_QUEUES = (1 << 25), /* run block device task queue */
_PBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */
_PBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc() */
_PBF_RUN_QUEUES = (1 << 19),/* run block device task queue */
} page_buf_flags_t;
#define PBF_UPDATE (PBF_READ | PBF_WRITE)
......@@ -508,7 +504,7 @@ static inline int xfs_bawrite(void *mp, xfs_buf_t *bp)
bp->pb_fspriv3 = mp;
bp->pb_strat = xfs_bdstrat_cb;
xfs_buf_undelay(bp);
return pagebuf_iostart(bp, PBF_WRITE | PBF_ASYNC | PBF_RUN_QUEUES);
return pagebuf_iostart(bp, PBF_WRITE | PBF_ASYNC | _PBF_RUN_QUEUES);
}
static inline void xfs_buf_relse(xfs_buf_t *bp)
......@@ -545,7 +541,7 @@ static inline int XFS_bwrite(xfs_buf_t *pb)
int error = 0;
if (!iowait)
pb->pb_flags |= PBF_RUN_QUEUES;
pb->pb_flags |= _PBF_RUN_QUEUES;
xfs_buf_undelay(pb);
pagebuf_iostrategy(pb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment