Commit ce8e922c authored by Nathan Scott's avatar Nathan Scott

[XFS] Complete the pagebuf -> xfs_buf naming convention transition,

finally.

SGI-PV: 947038
SGI-Modid: xfs-linux-melb:xfs-kern:24866a
Signed-off-by: default avatarNathan Scott <nathans@sgi.com>
parent 68bdb6ea
...@@ -273,7 +273,7 @@ xfs_map_at_offset( ...@@ -273,7 +273,7 @@ xfs_map_at_offset(
lock_buffer(bh); lock_buffer(bh);
bh->b_blocknr = bn; bh->b_blocknr = bn;
bh->b_bdev = iomapp->iomap_target->pbr_bdev; bh->b_bdev = iomapp->iomap_target->bt_bdev;
set_buffer_mapped(bh); set_buffer_mapped(bh);
clear_buffer_delay(bh); clear_buffer_delay(bh);
} }
...@@ -982,7 +982,7 @@ __linvfs_get_block( ...@@ -982,7 +982,7 @@ __linvfs_get_block(
} }
/* If this is a realtime file, data might be on a new device */ /* If this is a realtime file, data might be on a new device */
bh_result->b_bdev = iomap.iomap_target->pbr_bdev; bh_result->b_bdev = iomap.iomap_target->bt_bdev;
/* If we previously allocated a block out beyond eof and /* If we previously allocated a block out beyond eof and
* we are now coming back to use it then we will need to * we are now coming back to use it then we will need to
...@@ -1097,7 +1097,7 @@ linvfs_direct_IO( ...@@ -1097,7 +1097,7 @@ linvfs_direct_IO(
iocb->private = xfs_alloc_ioend(inode); iocb->private = xfs_alloc_ioend(inode);
ret = blockdev_direct_IO_own_locking(rw, iocb, inode, ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
iomap.iomap_target->pbr_bdev, iomap.iomap_target->bt_bdev,
iov, offset, nr_segs, iov, offset, nr_segs,
linvfs_get_blocks_direct, linvfs_get_blocks_direct,
linvfs_end_io_direct); linvfs_end_io_direct);
......
...@@ -31,64 +31,64 @@ ...@@ -31,64 +31,64 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include "xfs_linux.h" #include "xfs_linux.h"
STATIC kmem_cache_t *pagebuf_zone; STATIC kmem_zone_t *xfs_buf_zone;
STATIC kmem_shaker_t pagebuf_shake; STATIC kmem_shaker_t xfs_buf_shake;
STATIC int xfsbufd(void *); STATIC int xfsbufd(void *);
STATIC int xfsbufd_wakeup(int, gfp_t); STATIC int xfsbufd_wakeup(int, gfp_t);
STATIC void pagebuf_delwri_queue(xfs_buf_t *, int); STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
STATIC struct workqueue_struct *xfslogd_workqueue; STATIC struct workqueue_struct *xfslogd_workqueue;
struct workqueue_struct *xfsdatad_workqueue; struct workqueue_struct *xfsdatad_workqueue;
#ifdef PAGEBUF_TRACE #ifdef XFS_BUF_TRACE
void void
pagebuf_trace( xfs_buf_trace(
xfs_buf_t *pb, xfs_buf_t *bp,
char *id, char *id,
void *data, void *data,
void *ra) void *ra)
{ {
ktrace_enter(pagebuf_trace_buf, ktrace_enter(xfs_buf_trace_buf,
pb, id, bp, id,
(void *)(unsigned long)pb->pb_flags, (void *)(unsigned long)bp->b_flags,
(void *)(unsigned long)pb->pb_hold.counter, (void *)(unsigned long)bp->b_hold.counter,
(void *)(unsigned long)pb->pb_sema.count.counter, (void *)(unsigned long)bp->b_sema.count.counter,
(void *)current, (void *)current,
data, ra, data, ra,
(void *)(unsigned long)((pb->pb_file_offset>>32) & 0xffffffff), (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
(void *)(unsigned long)(pb->pb_file_offset & 0xffffffff), (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
(void *)(unsigned long)pb->pb_buffer_length, (void *)(unsigned long)bp->b_buffer_length,
NULL, NULL, NULL, NULL, NULL); NULL, NULL, NULL, NULL, NULL);
} }
ktrace_t *pagebuf_trace_buf; ktrace_t *xfs_buf_trace_buf;
#define PAGEBUF_TRACE_SIZE 4096 #define XFS_BUF_TRACE_SIZE 4096
#define PB_TRACE(pb, id, data) \ #define XB_TRACE(bp, id, data) \
pagebuf_trace(pb, id, (void *)data, (void *)__builtin_return_address(0)) xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
#else #else
#define PB_TRACE(pb, id, data) do { } while (0) #define XB_TRACE(bp, id, data) do { } while (0)
#endif #endif
#ifdef PAGEBUF_LOCK_TRACKING #ifdef XFS_BUF_LOCK_TRACKING
# define PB_SET_OWNER(pb) ((pb)->pb_last_holder = current->pid) # define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
# define PB_CLEAR_OWNER(pb) ((pb)->pb_last_holder = -1) # define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
# define PB_GET_OWNER(pb) ((pb)->pb_last_holder) # define XB_GET_OWNER(bp) ((bp)->b_last_holder)
#else #else
# define PB_SET_OWNER(pb) do { } while (0) # define XB_SET_OWNER(bp) do { } while (0)
# define PB_CLEAR_OWNER(pb) do { } while (0) # define XB_CLEAR_OWNER(bp) do { } while (0)
# define PB_GET_OWNER(pb) do { } while (0) # define XB_GET_OWNER(bp) do { } while (0)
#endif #endif
#define pb_to_gfp(flags) \ #define xb_to_gfp(flags) \
((((flags) & PBF_READ_AHEAD) ? __GFP_NORETRY : \ ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
((flags) & PBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN) ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
#define pb_to_km(flags) \ #define xb_to_km(flags) \
(((flags) & PBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP) (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
#define pagebuf_allocate(flags) \ #define xfs_buf_allocate(flags) \
kmem_zone_alloc(pagebuf_zone, pb_to_km(flags)) kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
#define pagebuf_deallocate(pb) \ #define xfs_buf_deallocate(bp) \
kmem_zone_free(pagebuf_zone, (pb)); kmem_zone_free(xfs_buf_zone, (bp));
/* /*
* Page Region interfaces. * Page Region interfaces.
...@@ -216,44 +216,44 @@ purge_addresses(void) ...@@ -216,44 +216,44 @@ purge_addresses(void)
} }
/* /*
* Internal pagebuf object manipulation * Internal xfs_buf_t object manipulation
*/ */
STATIC void STATIC void
_pagebuf_initialize( _xfs_buf_initialize(
xfs_buf_t *pb, xfs_buf_t *bp,
xfs_buftarg_t *target, xfs_buftarg_t *target,
loff_t range_base, loff_t range_base,
size_t range_length, size_t range_length,
page_buf_flags_t flags) xfs_buf_flags_t flags)
{ {
/* /*
* We don't want certain flags to appear in pb->pb_flags. * We don't want certain flags to appear in b_flags.
*/ */
flags &= ~(PBF_LOCK|PBF_MAPPED|PBF_DONT_BLOCK|PBF_READ_AHEAD); flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
memset(pb, 0, sizeof(xfs_buf_t)); memset(bp, 0, sizeof(xfs_buf_t));
atomic_set(&pb->pb_hold, 1); atomic_set(&bp->b_hold, 1);
init_MUTEX_LOCKED(&pb->pb_iodonesema); init_MUTEX_LOCKED(&bp->b_iodonesema);
INIT_LIST_HEAD(&pb->pb_list); INIT_LIST_HEAD(&bp->b_list);
INIT_LIST_HEAD(&pb->pb_hash_list); INIT_LIST_HEAD(&bp->b_hash_list);
init_MUTEX_LOCKED(&pb->pb_sema); /* held, no waiters */ init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
PB_SET_OWNER(pb); XB_SET_OWNER(bp);
pb->pb_target = target; bp->b_target = target;
pb->pb_file_offset = range_base; bp->b_file_offset = range_base;
/* /*
* Set buffer_length and count_desired to the same value initially. * Set buffer_length and count_desired to the same value initially.
* I/O routines should use count_desired, which will be the same in * I/O routines should use count_desired, which will be the same in
* most cases but may be reset (e.g. XFS recovery). * most cases but may be reset (e.g. XFS recovery).
*/ */
pb->pb_buffer_length = pb->pb_count_desired = range_length; bp->b_buffer_length = bp->b_count_desired = range_length;
pb->pb_flags = flags; bp->b_flags = flags;
pb->pb_bn = XFS_BUF_DADDR_NULL; bp->b_bn = XFS_BUF_DADDR_NULL;
atomic_set(&pb->pb_pin_count, 0); atomic_set(&bp->b_pin_count, 0);
init_waitqueue_head(&pb->pb_waiters); init_waitqueue_head(&bp->b_waiters);
XFS_STATS_INC(pb_create); XFS_STATS_INC(xb_create);
PB_TRACE(pb, "initialize", target); XB_TRACE(bp, "initialize", target);
} }
/* /*
...@@ -261,38 +261,38 @@ _pagebuf_initialize( ...@@ -261,38 +261,38 @@ _pagebuf_initialize(
* of pages, and point the page buf at it. * of pages, and point the page buf at it.
*/ */
STATIC int STATIC int
_pagebuf_get_pages( _xfs_buf_get_pages(
xfs_buf_t *pb, xfs_buf_t *bp,
int page_count, int page_count,
page_buf_flags_t flags) xfs_buf_flags_t flags)
{ {
/* Make sure that we have a page list */ /* Make sure that we have a page list */
if (pb->pb_pages == NULL) { if (bp->b_pages == NULL) {
pb->pb_offset = page_buf_poff(pb->pb_file_offset); bp->b_offset = xfs_buf_poff(bp->b_file_offset);
pb->pb_page_count = page_count; bp->b_page_count = page_count;
if (page_count <= PB_PAGES) { if (page_count <= XB_PAGES) {
pb->pb_pages = pb->pb_page_array; bp->b_pages = bp->b_page_array;
} else { } else {
pb->pb_pages = kmem_alloc(sizeof(struct page *) * bp->b_pages = kmem_alloc(sizeof(struct page *) *
page_count, pb_to_km(flags)); page_count, xb_to_km(flags));
if (pb->pb_pages == NULL) if (bp->b_pages == NULL)
return -ENOMEM; return -ENOMEM;
} }
memset(pb->pb_pages, 0, sizeof(struct page *) * page_count); memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
} }
return 0; return 0;
} }
/* /*
* Frees pb_pages if it was malloced. * Frees b_pages if it was allocated.
*/ */
STATIC void STATIC void
_pagebuf_free_pages( _xfs_buf_free_pages(
xfs_buf_t *bp) xfs_buf_t *bp)
{ {
if (bp->pb_pages != bp->pb_page_array) { if (bp->b_pages != bp->b_page_array) {
kmem_free(bp->pb_pages, kmem_free(bp->b_pages,
bp->pb_page_count * sizeof(struct page *)); bp->b_page_count * sizeof(struct page *));
} }
} }
...@@ -300,79 +300,79 @@ _pagebuf_free_pages( ...@@ -300,79 +300,79 @@ _pagebuf_free_pages(
* Releases the specified buffer. * Releases the specified buffer.
* *
* The modification state of any associated pages is left unchanged. * The modification state of any associated pages is left unchanged.
* The buffer most not be on any hash - use pagebuf_rele instead for * The buffer most not be on any hash - use xfs_buf_rele instead for
* hashed and refcounted buffers * hashed and refcounted buffers
*/ */
void void
pagebuf_free( xfs_buf_free(
xfs_buf_t *bp) xfs_buf_t *bp)
{ {
PB_TRACE(bp, "free", 0); XB_TRACE(bp, "free", 0);
ASSERT(list_empty(&bp->pb_hash_list)); ASSERT(list_empty(&bp->b_hash_list));
if (bp->pb_flags & _PBF_PAGE_CACHE) { if (bp->b_flags & _XBF_PAGE_CACHE) {
uint i; uint i;
if ((bp->pb_flags & PBF_MAPPED) && (bp->pb_page_count > 1)) if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
free_address(bp->pb_addr - bp->pb_offset); free_address(bp->b_addr - bp->b_offset);
for (i = 0; i < bp->pb_page_count; i++) for (i = 0; i < bp->b_page_count; i++)
page_cache_release(bp->pb_pages[i]); page_cache_release(bp->b_pages[i]);
_pagebuf_free_pages(bp); _xfs_buf_free_pages(bp);
} else if (bp->pb_flags & _PBF_KMEM_ALLOC) { } else if (bp->b_flags & _XBF_KMEM_ALLOC) {
/* /*
* XXX(hch): bp->pb_count_desired might be incorrect (see * XXX(hch): bp->b_count_desired might be incorrect (see
* pagebuf_associate_memory for details), but fortunately * xfs_buf_associate_memory for details), but fortunately
* the Linux version of kmem_free ignores the len argument.. * the Linux version of kmem_free ignores the len argument..
*/ */
kmem_free(bp->pb_addr, bp->pb_count_desired); kmem_free(bp->b_addr, bp->b_count_desired);
_pagebuf_free_pages(bp); _xfs_buf_free_pages(bp);
} }
pagebuf_deallocate(bp); xfs_buf_deallocate(bp);
} }
/* /*
* Finds all pages for buffer in question and builds it's page list. * Finds all pages for buffer in question and builds it's page list.
*/ */
STATIC int STATIC int
_pagebuf_lookup_pages( _xfs_buf_lookup_pages(
xfs_buf_t *bp, xfs_buf_t *bp,
uint flags) uint flags)
{ {
struct address_space *mapping = bp->pb_target->pbr_mapping; struct address_space *mapping = bp->b_target->bt_mapping;
size_t blocksize = bp->pb_target->pbr_bsize; size_t blocksize = bp->b_target->bt_bsize;
size_t size = bp->pb_count_desired; size_t size = bp->b_count_desired;
size_t nbytes, offset; size_t nbytes, offset;
gfp_t gfp_mask = pb_to_gfp(flags); gfp_t gfp_mask = xb_to_gfp(flags);
unsigned short page_count, i; unsigned short page_count, i;
pgoff_t first; pgoff_t first;
loff_t end; loff_t end;
int error; int error;
end = bp->pb_file_offset + bp->pb_buffer_length; end = bp->b_file_offset + bp->b_buffer_length;
page_count = page_buf_btoc(end) - page_buf_btoct(bp->pb_file_offset); page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
error = _pagebuf_get_pages(bp, page_count, flags); error = _xfs_buf_get_pages(bp, page_count, flags);
if (unlikely(error)) if (unlikely(error))
return error; return error;
bp->pb_flags |= _PBF_PAGE_CACHE; bp->b_flags |= _XBF_PAGE_CACHE;
offset = bp->pb_offset; offset = bp->b_offset;
first = bp->pb_file_offset >> PAGE_CACHE_SHIFT; first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
for (i = 0; i < bp->pb_page_count; i++) { for (i = 0; i < bp->b_page_count; i++) {
struct page *page; struct page *page;
uint retries = 0; uint retries = 0;
retry: retry:
page = find_or_create_page(mapping, first + i, gfp_mask); page = find_or_create_page(mapping, first + i, gfp_mask);
if (unlikely(page == NULL)) { if (unlikely(page == NULL)) {
if (flags & PBF_READ_AHEAD) { if (flags & XBF_READ_AHEAD) {
bp->pb_page_count = i; bp->b_page_count = i;
for (i = 0; i < bp->pb_page_count; i++) for (i = 0; i < bp->b_page_count; i++)
unlock_page(bp->pb_pages[i]); unlock_page(bp->b_pages[i]);
return -ENOMEM; return -ENOMEM;
} }
...@@ -388,13 +388,13 @@ _pagebuf_lookup_pages( ...@@ -388,13 +388,13 @@ _pagebuf_lookup_pages(
"deadlock in %s (mode:0x%x)\n", "deadlock in %s (mode:0x%x)\n",
__FUNCTION__, gfp_mask); __FUNCTION__, gfp_mask);
XFS_STATS_INC(pb_page_retries); XFS_STATS_INC(xb_page_retries);
xfsbufd_wakeup(0, gfp_mask); xfsbufd_wakeup(0, gfp_mask);
blk_congestion_wait(WRITE, HZ/50); blk_congestion_wait(WRITE, HZ/50);
goto retry; goto retry;
} }
XFS_STATS_INC(pb_page_found); XFS_STATS_INC(xb_page_found);
nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset); nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
size -= nbytes; size -= nbytes;
...@@ -402,27 +402,27 @@ _pagebuf_lookup_pages( ...@@ -402,27 +402,27 @@ _pagebuf_lookup_pages(
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
page_count--; page_count--;
if (blocksize >= PAGE_CACHE_SIZE) { if (blocksize >= PAGE_CACHE_SIZE) {
if (flags & PBF_READ) if (flags & XBF_READ)
bp->pb_locked = 1; bp->b_locked = 1;
} else if (!PagePrivate(page)) { } else if (!PagePrivate(page)) {
if (test_page_region(page, offset, nbytes)) if (test_page_region(page, offset, nbytes))
page_count++; page_count++;
} }
} }
bp->pb_pages[i] = page; bp->b_pages[i] = page;
offset = 0; offset = 0;
} }
if (!bp->pb_locked) { if (!bp->b_locked) {
for (i = 0; i < bp->pb_page_count; i++) for (i = 0; i < bp->b_page_count; i++)
unlock_page(bp->pb_pages[i]); unlock_page(bp->b_pages[i]);
} }
if (page_count == bp->pb_page_count) if (page_count == bp->b_page_count)
bp->pb_flags |= PBF_DONE; bp->b_flags |= XBF_DONE;
PB_TRACE(bp, "lookup_pages", (long)page_count); XB_TRACE(bp, "lookup_pages", (long)page_count);
return error; return error;
} }
...@@ -430,23 +430,23 @@ _pagebuf_lookup_pages( ...@@ -430,23 +430,23 @@ _pagebuf_lookup_pages(
* Map buffer into kernel address-space if nessecary. * Map buffer into kernel address-space if nessecary.
*/ */
STATIC int STATIC int
_pagebuf_map_pages( _xfs_buf_map_pages(
xfs_buf_t *bp, xfs_buf_t *bp,
uint flags) uint flags)
{ {
/* A single page buffer is always mappable */ /* A single page buffer is always mappable */
if (bp->pb_page_count == 1) { if (bp->b_page_count == 1) {
bp->pb_addr = page_address(bp->pb_pages[0]) + bp->pb_offset; bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
bp->pb_flags |= PBF_MAPPED; bp->b_flags |= XBF_MAPPED;
} else if (flags & PBF_MAPPED) { } else if (flags & XBF_MAPPED) {
if (as_list_len > 64) if (as_list_len > 64)
purge_addresses(); purge_addresses();
bp->pb_addr = vmap(bp->pb_pages, bp->pb_page_count, bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
VM_MAP, PAGE_KERNEL); VM_MAP, PAGE_KERNEL);
if (unlikely(bp->pb_addr == NULL)) if (unlikely(bp->b_addr == NULL))
return -ENOMEM; return -ENOMEM;
bp->pb_addr += bp->pb_offset; bp->b_addr += bp->b_offset;
bp->pb_flags |= PBF_MAPPED; bp->b_flags |= XBF_MAPPED;
} }
return 0; return 0;
...@@ -457,9 +457,7 @@ _pagebuf_map_pages( ...@@ -457,9 +457,7 @@ _pagebuf_map_pages(
*/ */
/* /*
* _pagebuf_find * Look up, and creates if absent, a lockable buffer for
*
* Looks up, and creates if absent, a lockable buffer for
* a given range of an inode. The buffer is returned * a given range of an inode. The buffer is returned
* locked. If other overlapping buffers exist, they are * locked. If other overlapping buffers exist, they are
* released before the new buffer is created and locked, * released before the new buffer is created and locked,
...@@ -467,55 +465,55 @@ _pagebuf_map_pages( ...@@ -467,55 +465,55 @@ _pagebuf_map_pages(
* are unlocked. No I/O is implied by this call. * are unlocked. No I/O is implied by this call.
*/ */
xfs_buf_t * xfs_buf_t *
_pagebuf_find( _xfs_buf_find(
xfs_buftarg_t *btp, /* block device target */ xfs_buftarg_t *btp, /* block device target */
loff_t ioff, /* starting offset of range */ loff_t ioff, /* starting offset of range */
size_t isize, /* length of range */ size_t isize, /* length of range */
page_buf_flags_t flags, /* PBF_TRYLOCK */ xfs_buf_flags_t flags,
xfs_buf_t *new_pb)/* newly allocated buffer */ xfs_buf_t *new_bp)
{ {
loff_t range_base; loff_t range_base;
size_t range_length; size_t range_length;
xfs_bufhash_t *hash; xfs_bufhash_t *hash;
xfs_buf_t *pb, *n; xfs_buf_t *bp, *n;
range_base = (ioff << BBSHIFT); range_base = (ioff << BBSHIFT);
range_length = (isize << BBSHIFT); range_length = (isize << BBSHIFT);
/* Check for IOs smaller than the sector size / not sector aligned */ /* Check for IOs smaller than the sector size / not sector aligned */
ASSERT(!(range_length < (1 << btp->pbr_sshift))); ASSERT(!(range_length < (1 << btp->bt_sshift)));
ASSERT(!(range_base & (loff_t)btp->pbr_smask)); ASSERT(!(range_base & (loff_t)btp->bt_smask));
hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)]; hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
spin_lock(&hash->bh_lock); spin_lock(&hash->bh_lock);
list_for_each_entry_safe(pb, n, &hash->bh_list, pb_hash_list) { list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
ASSERT(btp == pb->pb_target); ASSERT(btp == bp->b_target);
if (pb->pb_file_offset == range_base && if (bp->b_file_offset == range_base &&
pb->pb_buffer_length == range_length) { bp->b_buffer_length == range_length) {
/* /*
* If we look at something bring it to the * If we look at something, bring it to the
* front of the list for next time. * front of the list for next time.
*/ */
atomic_inc(&pb->pb_hold); atomic_inc(&bp->b_hold);
list_move(&pb->pb_hash_list, &hash->bh_list); list_move(&bp->b_hash_list, &hash->bh_list);
goto found; goto found;
} }
} }
/* No match found */ /* No match found */
if (new_pb) { if (new_bp) {
_pagebuf_initialize(new_pb, btp, range_base, _xfs_buf_initialize(new_bp, btp, range_base,
range_length, flags); range_length, flags);
new_pb->pb_hash = hash; new_bp->b_hash = hash;
list_add(&new_pb->pb_hash_list, &hash->bh_list); list_add(&new_bp->b_hash_list, &hash->bh_list);
} else { } else {
XFS_STATS_INC(pb_miss_locked); XFS_STATS_INC(xb_miss_locked);
} }
spin_unlock(&hash->bh_lock); spin_unlock(&hash->bh_lock);
return new_pb; return new_bp;
found: found:
spin_unlock(&hash->bh_lock); spin_unlock(&hash->bh_lock);
...@@ -524,74 +522,72 @@ _pagebuf_find( ...@@ -524,74 +522,72 @@ _pagebuf_find(
* if this does not work then we need to drop the * if this does not work then we need to drop the
* spinlock and do a hard attempt on the semaphore. * spinlock and do a hard attempt on the semaphore.
*/ */
if (down_trylock(&pb->pb_sema)) { if (down_trylock(&bp->b_sema)) {
if (!(flags & PBF_TRYLOCK)) { if (!(flags & XBF_TRYLOCK)) {
/* wait for buffer ownership */ /* wait for buffer ownership */
PB_TRACE(pb, "get_lock", 0); XB_TRACE(bp, "get_lock", 0);
pagebuf_lock(pb); xfs_buf_lock(bp);
XFS_STATS_INC(pb_get_locked_waited); XFS_STATS_INC(xb_get_locked_waited);
} else { } else {
/* We asked for a trylock and failed, no need /* We asked for a trylock and failed, no need
* to look at file offset and length here, we * to look at file offset and length here, we
* know that this pagebuf at least overlaps our * know that this buffer at least overlaps our
* pagebuf and is locked, therefore our buffer * buffer and is locked, therefore our buffer
* either does not exist, or is this buffer * either does not exist, or is this buffer.
*/ */
xfs_buf_rele(bp);
pagebuf_rele(pb); XFS_STATS_INC(xb_busy_locked);
XFS_STATS_INC(pb_busy_locked); return NULL;
return (NULL);
} }
} else { } else {
/* trylock worked */ /* trylock worked */
PB_SET_OWNER(pb); XB_SET_OWNER(bp);
} }
if (pb->pb_flags & PBF_STALE) { if (bp->b_flags & XBF_STALE) {
ASSERT((pb->pb_flags & _PBF_DELWRI_Q) == 0); ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
pb->pb_flags &= PBF_MAPPED; bp->b_flags &= XBF_MAPPED;
} }
PB_TRACE(pb, "got_lock", 0); XB_TRACE(bp, "got_lock", 0);
XFS_STATS_INC(pb_get_locked); XFS_STATS_INC(xb_get_locked);
return (pb); return bp;
} }
/* /*
* xfs_buf_get_flags assembles a buffer covering the specified range. * Assembles a buffer covering the specified range.
*
* Storage in memory for all portions of the buffer will be allocated, * Storage in memory for all portions of the buffer will be allocated,
* although backing storage may not be. * although backing storage may not be.
*/ */
xfs_buf_t * xfs_buf_t *
xfs_buf_get_flags( /* allocate a buffer */ xfs_buf_get_flags(
xfs_buftarg_t *target,/* target for buffer */ xfs_buftarg_t *target,/* target for buffer */
loff_t ioff, /* starting offset of range */ loff_t ioff, /* starting offset of range */
size_t isize, /* length of range */ size_t isize, /* length of range */
page_buf_flags_t flags) /* PBF_TRYLOCK */ xfs_buf_flags_t flags)
{ {
xfs_buf_t *pb, *new_pb; xfs_buf_t *bp, *new_bp;
int error = 0, i; int error = 0, i;
new_pb = pagebuf_allocate(flags); new_bp = xfs_buf_allocate(flags);
if (unlikely(!new_pb)) if (unlikely(!new_bp))
return NULL; return NULL;
pb = _pagebuf_find(target, ioff, isize, flags, new_pb); bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
if (pb == new_pb) { if (bp == new_bp) {
error = _pagebuf_lookup_pages(pb, flags); error = _xfs_buf_lookup_pages(bp, flags);
if (error) if (error)
goto no_buffer; goto no_buffer;
} else { } else {
pagebuf_deallocate(new_pb); xfs_buf_deallocate(new_bp);
if (unlikely(pb == NULL)) if (unlikely(bp == NULL))
return NULL; return NULL;
} }
for (i = 0; i < pb->pb_page_count; i++) for (i = 0; i < bp->b_page_count; i++)
mark_page_accessed(pb->pb_pages[i]); mark_page_accessed(bp->b_pages[i]);
if (!(pb->pb_flags & PBF_MAPPED)) { if (!(bp->b_flags & XBF_MAPPED)) {
error = _pagebuf_map_pages(pb, flags); error = _xfs_buf_map_pages(bp, flags);
if (unlikely(error)) { if (unlikely(error)) {
printk(KERN_WARNING "%s: failed to map pages\n", printk(KERN_WARNING "%s: failed to map pages\n",
__FUNCTION__); __FUNCTION__);
...@@ -599,22 +595,22 @@ xfs_buf_get_flags( /* allocate a buffer */ ...@@ -599,22 +595,22 @@ xfs_buf_get_flags( /* allocate a buffer */
} }
} }
XFS_STATS_INC(pb_get); XFS_STATS_INC(xb_get);
/* /*
* Always fill in the block number now, the mapped cases can do * Always fill in the block number now, the mapped cases can do
* their own overlay of this later. * their own overlay of this later.
*/ */
pb->pb_bn = ioff; bp->b_bn = ioff;
pb->pb_count_desired = pb->pb_buffer_length; bp->b_count_desired = bp->b_buffer_length;
PB_TRACE(pb, "get", (unsigned long)flags); XB_TRACE(bp, "get", (unsigned long)flags);
return pb; return bp;
no_buffer: no_buffer:
if (flags & (PBF_LOCK | PBF_TRYLOCK)) if (flags & (XBF_LOCK | XBF_TRYLOCK))
pagebuf_unlock(pb); xfs_buf_unlock(bp);
pagebuf_rele(pb); xfs_buf_rele(bp);
return NULL; return NULL;
} }
...@@ -623,38 +619,38 @@ xfs_buf_read_flags( ...@@ -623,38 +619,38 @@ xfs_buf_read_flags(
xfs_buftarg_t *target, xfs_buftarg_t *target,
loff_t ioff, loff_t ioff,
size_t isize, size_t isize,
page_buf_flags_t flags) xfs_buf_flags_t flags)
{ {
xfs_buf_t *pb; xfs_buf_t *bp;
flags |= PBF_READ; flags |= XBF_READ;
pb = xfs_buf_get_flags(target, ioff, isize, flags); bp = xfs_buf_get_flags(target, ioff, isize, flags);
if (pb) { if (bp) {
if (!XFS_BUF_ISDONE(pb)) { if (!XFS_BUF_ISDONE(bp)) {
PB_TRACE(pb, "read", (unsigned long)flags); XB_TRACE(bp, "read", (unsigned long)flags);
XFS_STATS_INC(pb_get_read); XFS_STATS_INC(xb_get_read);
pagebuf_iostart(pb, flags); xfs_buf_iostart(bp, flags);
} else if (flags & PBF_ASYNC) { } else if (flags & XBF_ASYNC) {
PB_TRACE(pb, "read_async", (unsigned long)flags); XB_TRACE(bp, "read_async", (unsigned long)flags);
/* /*
* Read ahead call which is already satisfied, * Read ahead call which is already satisfied,
* drop the buffer * drop the buffer
*/ */
goto no_buffer; goto no_buffer;
} else { } else {
PB_TRACE(pb, "read_done", (unsigned long)flags); XB_TRACE(bp, "read_done", (unsigned long)flags);
/* We do not want read in the flags */ /* We do not want read in the flags */
pb->pb_flags &= ~PBF_READ; bp->b_flags &= ~XBF_READ;
} }
} }
return pb; return bp;
no_buffer: no_buffer:
if (flags & (PBF_LOCK | PBF_TRYLOCK)) if (flags & (XBF_LOCK | XBF_TRYLOCK))
pagebuf_unlock(pb); xfs_buf_unlock(bp);
pagebuf_rele(pb); xfs_buf_rele(bp);
return NULL; return NULL;
} }
...@@ -663,33 +659,33 @@ xfs_buf_read_flags( ...@@ -663,33 +659,33 @@ xfs_buf_read_flags(
* safe manner. * safe manner.
*/ */
void void
pagebuf_readahead( xfs_buf_readahead(
xfs_buftarg_t *target, xfs_buftarg_t *target,
loff_t ioff, loff_t ioff,
size_t isize, size_t isize,
page_buf_flags_t flags) xfs_buf_flags_t flags)
{ {
struct backing_dev_info *bdi; struct backing_dev_info *bdi;
bdi = target->pbr_mapping->backing_dev_info; bdi = target->bt_mapping->backing_dev_info;
if (bdi_read_congested(bdi)) if (bdi_read_congested(bdi))
return; return;
flags |= (PBF_TRYLOCK|PBF_ASYNC|PBF_READ_AHEAD); flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
xfs_buf_read_flags(target, ioff, isize, flags); xfs_buf_read_flags(target, ioff, isize, flags);
} }
xfs_buf_t * xfs_buf_t *
pagebuf_get_empty( xfs_buf_get_empty(
size_t len, size_t len,
xfs_buftarg_t *target) xfs_buftarg_t *target)
{ {
xfs_buf_t *pb; xfs_buf_t *bp;
pb = pagebuf_allocate(0); bp = xfs_buf_allocate(0);
if (pb) if (bp)
_pagebuf_initialize(pb, target, 0, len, 0); _xfs_buf_initialize(bp, target, 0, len, 0);
return pb; return bp;
} }
static inline struct page * static inline struct page *
...@@ -705,8 +701,8 @@ mem_to_page( ...@@ -705,8 +701,8 @@ mem_to_page(
} }
int int
pagebuf_associate_memory( xfs_buf_associate_memory(
xfs_buf_t *pb, xfs_buf_t *bp,
void *mem, void *mem,
size_t len) size_t len)
{ {
...@@ -723,40 +719,40 @@ pagebuf_associate_memory( ...@@ -723,40 +719,40 @@ pagebuf_associate_memory(
page_count++; page_count++;
/* Free any previous set of page pointers */ /* Free any previous set of page pointers */
if (pb->pb_pages) if (bp->b_pages)
_pagebuf_free_pages(pb); _xfs_buf_free_pages(bp);
pb->pb_pages = NULL; bp->b_pages = NULL;
pb->pb_addr = mem; bp->b_addr = mem;
rval = _pagebuf_get_pages(pb, page_count, 0); rval = _xfs_buf_get_pages(bp, page_count, 0);
if (rval) if (rval)
return rval; return rval;
pb->pb_offset = offset; bp->b_offset = offset;
ptr = (size_t) mem & PAGE_CACHE_MASK; ptr = (size_t) mem & PAGE_CACHE_MASK;
end = PAGE_CACHE_ALIGN((size_t) mem + len); end = PAGE_CACHE_ALIGN((size_t) mem + len);
end_cur = end; end_cur = end;
/* set up first page */ /* set up first page */
pb->pb_pages[0] = mem_to_page(mem); bp->b_pages[0] = mem_to_page(mem);
ptr += PAGE_CACHE_SIZE; ptr += PAGE_CACHE_SIZE;
pb->pb_page_count = ++i; bp->b_page_count = ++i;
while (ptr < end) { while (ptr < end) {
pb->pb_pages[i] = mem_to_page((void *)ptr); bp->b_pages[i] = mem_to_page((void *)ptr);
pb->pb_page_count = ++i; bp->b_page_count = ++i;
ptr += PAGE_CACHE_SIZE; ptr += PAGE_CACHE_SIZE;
} }
pb->pb_locked = 0; bp->b_locked = 0;
pb->pb_count_desired = pb->pb_buffer_length = len; bp->b_count_desired = bp->b_buffer_length = len;
pb->pb_flags |= PBF_MAPPED; bp->b_flags |= XBF_MAPPED;
return 0; return 0;
} }
xfs_buf_t * xfs_buf_t *
pagebuf_get_no_daddr( xfs_buf_get_noaddr(
size_t len, size_t len,
xfs_buftarg_t *target) xfs_buftarg_t *target)
{ {
...@@ -765,10 +761,10 @@ pagebuf_get_no_daddr( ...@@ -765,10 +761,10 @@ pagebuf_get_no_daddr(
void *data; void *data;
int error; int error;
bp = pagebuf_allocate(0); bp = xfs_buf_allocate(0);
if (unlikely(bp == NULL)) if (unlikely(bp == NULL))
goto fail; goto fail;
_pagebuf_initialize(bp, target, 0, len, 0); _xfs_buf_initialize(bp, target, 0, len, 0);
try_again: try_again:
data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL); data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);
...@@ -777,78 +773,73 @@ pagebuf_get_no_daddr( ...@@ -777,78 +773,73 @@ pagebuf_get_no_daddr(
/* check whether alignment matches.. */ /* check whether alignment matches.. */
if ((__psunsigned_t)data != if ((__psunsigned_t)data !=
((__psunsigned_t)data & ~target->pbr_smask)) { ((__psunsigned_t)data & ~target->bt_smask)) {
/* .. else double the size and try again */ /* .. else double the size and try again */
kmem_free(data, malloc_len); kmem_free(data, malloc_len);
malloc_len <<= 1; malloc_len <<= 1;
goto try_again; goto try_again;
} }
error = pagebuf_associate_memory(bp, data, len); error = xfs_buf_associate_memory(bp, data, len);
if (error) if (error)
goto fail_free_mem; goto fail_free_mem;
bp->pb_flags |= _PBF_KMEM_ALLOC; bp->b_flags |= _XBF_KMEM_ALLOC;
pagebuf_unlock(bp); xfs_buf_unlock(bp);
PB_TRACE(bp, "no_daddr", data); XB_TRACE(bp, "no_daddr", data);
return bp; return bp;
fail_free_mem: fail_free_mem:
kmem_free(data, malloc_len); kmem_free(data, malloc_len);
fail_free_buf: fail_free_buf:
pagebuf_free(bp); xfs_buf_free(bp);
fail: fail:
return NULL; return NULL;
} }
/* /*
* pagebuf_hold
*
* Increment reference count on buffer, to hold the buffer concurrently * Increment reference count on buffer, to hold the buffer concurrently
* with another thread which may release (free) the buffer asynchronously. * with another thread which may release (free) the buffer asynchronously.
*
* Must hold the buffer already to call this function. * Must hold the buffer already to call this function.
*/ */
void void
pagebuf_hold( xfs_buf_hold(
xfs_buf_t *pb) xfs_buf_t *bp)
{ {
atomic_inc(&pb->pb_hold); atomic_inc(&bp->b_hold);
PB_TRACE(pb, "hold", 0); XB_TRACE(bp, "hold", 0);
} }
/* /*
* pagebuf_rele * Releases a hold on the specified buffer. If the
* * the hold count is 1, calls xfs_buf_free.
* pagebuf_rele releases a hold on the specified buffer. If the
* the hold count is 1, pagebuf_rele calls pagebuf_free.
*/ */
void void
pagebuf_rele( xfs_buf_rele(
xfs_buf_t *pb) xfs_buf_t *bp)
{ {
xfs_bufhash_t *hash = pb->pb_hash; xfs_bufhash_t *hash = bp->b_hash;
PB_TRACE(pb, "rele", pb->pb_relse); XB_TRACE(bp, "rele", bp->b_relse);
if (atomic_dec_and_lock(&pb->pb_hold, &hash->bh_lock)) { if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
if (pb->pb_relse) { if (bp->b_relse) {
atomic_inc(&pb->pb_hold); atomic_inc(&bp->b_hold);
spin_unlock(&hash->bh_lock); spin_unlock(&hash->bh_lock);
(*(pb->pb_relse)) (pb); (*(bp->b_relse)) (bp);
} else if (pb->pb_flags & PBF_FS_MANAGED) { } else if (bp->b_flags & XBF_FS_MANAGED) {
spin_unlock(&hash->bh_lock); spin_unlock(&hash->bh_lock);
} else { } else {
ASSERT(!(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q))); ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
list_del_init(&pb->pb_hash_list); list_del_init(&bp->b_hash_list);
spin_unlock(&hash->bh_lock); spin_unlock(&hash->bh_lock);
pagebuf_free(pb); xfs_buf_free(bp);
} }
} else { } else {
/* /*
* Catch reference count leaks * Catch reference count leaks
*/ */
ASSERT(atomic_read(&pb->pb_hold) >= 0); ASSERT(atomic_read(&bp->b_hold) >= 0);
} }
} }
...@@ -864,168 +855,122 @@ pagebuf_rele( ...@@ -864,168 +855,122 @@ pagebuf_rele(
*/ */
/* /*
* pagebuf_cond_lock * Locks a buffer object, if it is not already locked.
* * Note that this in no way locks the underlying pages, so it is only
* pagebuf_cond_lock locks a buffer object, if it is not already locked. * useful for synchronizing concurrent use of buffer objects, not for
* Note that this in no way * synchronizing independent access to the underlying pages.
* locks the underlying pages, so it is only useful for synchronizing
* concurrent use of page buffer objects, not for synchronizing independent
* access to the underlying pages.
*/ */
int int
pagebuf_cond_lock( /* lock buffer, if not locked */ xfs_buf_cond_lock(
/* returns -EBUSY if locked) */ xfs_buf_t *bp)
xfs_buf_t *pb)
{ {
int locked; int locked;
locked = down_trylock(&pb->pb_sema) == 0; locked = down_trylock(&bp->b_sema) == 0;
if (locked) { if (locked) {
PB_SET_OWNER(pb); XB_SET_OWNER(bp);
} }
PB_TRACE(pb, "cond_lock", (long)locked); XB_TRACE(bp, "cond_lock", (long)locked);
return(locked ? 0 : -EBUSY); return locked ? 0 : -EBUSY;
} }
#if defined(DEBUG) || defined(XFS_BLI_TRACE) #if defined(DEBUG) || defined(XFS_BLI_TRACE)
/*
* pagebuf_lock_value
*
* Return lock value for a pagebuf
*/
int int
pagebuf_lock_value( xfs_buf_lock_value(
xfs_buf_t *pb) xfs_buf_t *bp)
{ {
return(atomic_read(&pb->pb_sema.count)); return atomic_read(&bp->b_sema.count);
} }
#endif #endif
/* /*
* pagebuf_lock * Locks a buffer object.
* * Note that this in no way locks the underlying pages, so it is only
* pagebuf_lock locks a buffer object. Note that this in no way * useful for synchronizing concurrent use of buffer objects, not for
* locks the underlying pages, so it is only useful for synchronizing * synchronizing independent access to the underlying pages.
* concurrent use of page buffer objects, not for synchronizing independent
* access to the underlying pages.
*/ */
int void
pagebuf_lock( xfs_buf_lock(
xfs_buf_t *pb) xfs_buf_t *bp)
{ {
PB_TRACE(pb, "lock", 0); XB_TRACE(bp, "lock", 0);
if (atomic_read(&pb->pb_io_remaining)) if (atomic_read(&bp->b_io_remaining))
blk_run_address_space(pb->pb_target->pbr_mapping); blk_run_address_space(bp->b_target->bt_mapping);
down(&pb->pb_sema); down(&bp->b_sema);
PB_SET_OWNER(pb); XB_SET_OWNER(bp);
PB_TRACE(pb, "locked", 0); XB_TRACE(bp, "locked", 0);
return 0;
} }
/* /*
* pagebuf_unlock * Releases the lock on the buffer object.
*
* pagebuf_unlock releases the lock on the buffer object created by
* pagebuf_lock or pagebuf_cond_lock (not any pinning of underlying pages
* created by pagebuf_pin).
*
* If the buffer is marked delwri but is not queued, do so before we * If the buffer is marked delwri but is not queued, do so before we
* unlock the buffer as we need to set flags correctly. We also need to * unlock the buffer as we need to set flags correctly. We also need to
* take a reference for the delwri queue because the unlocker is going to * take a reference for the delwri queue because the unlocker is going to
* drop their's and they don't know we just queued it. * drop their's and they don't know we just queued it.
*/ */
void void
pagebuf_unlock( /* unlock buffer */ xfs_buf_unlock(
xfs_buf_t *pb) /* buffer to unlock */ xfs_buf_t *bp)
{ {
if ((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == PBF_DELWRI) { if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
atomic_inc(&pb->pb_hold); atomic_inc(&bp->b_hold);
pb->pb_flags |= PBF_ASYNC; bp->b_flags |= XBF_ASYNC;
pagebuf_delwri_queue(pb, 0); xfs_buf_delwri_queue(bp, 0);
} }
PB_CLEAR_OWNER(pb); XB_CLEAR_OWNER(bp);
up(&pb->pb_sema); up(&bp->b_sema);
PB_TRACE(pb, "unlock", 0); XB_TRACE(bp, "unlock", 0);
} }
/* /*
* Pinning Buffer Storage in Memory * Pinning Buffer Storage in Memory
*/ * Ensure that no attempt to force a buffer to disk will succeed.
/*
* pagebuf_pin
*
* pagebuf_pin locks all of the memory represented by a buffer in
* memory. Multiple calls to pagebuf_pin and pagebuf_unpin, for
* the same or different buffers affecting a given page, will
* properly count the number of outstanding "pin" requests. The
* buffer may be released after the pagebuf_pin and a different
* buffer used when calling pagebuf_unpin, if desired.
* pagebuf_pin should be used by the file system when it wants be
* assured that no attempt will be made to force the affected
* memory to disk. It does not assure that a given logical page
* will not be moved to a different physical page.
*/ */
void void
pagebuf_pin( xfs_buf_pin(
xfs_buf_t *pb) xfs_buf_t *bp)
{ {
atomic_inc(&pb->pb_pin_count); atomic_inc(&bp->b_pin_count);
PB_TRACE(pb, "pin", (long)pb->pb_pin_count.counter); XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
} }
/*
* pagebuf_unpin
*
* pagebuf_unpin reverses the locking of memory performed by
* pagebuf_pin. Note that both functions affected the logical
* pages associated with the buffer, not the buffer itself.
*/
void void
pagebuf_unpin( xfs_buf_unpin(
xfs_buf_t *pb) xfs_buf_t *bp)
{ {
if (atomic_dec_and_test(&pb->pb_pin_count)) { if (atomic_dec_and_test(&bp->b_pin_count))
wake_up_all(&pb->pb_waiters); wake_up_all(&bp->b_waiters);
} XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
PB_TRACE(pb, "unpin", (long)pb->pb_pin_count.counter);
} }
int int
pagebuf_ispin( xfs_buf_ispin(
xfs_buf_t *pb) xfs_buf_t *bp)
{ {
return atomic_read(&pb->pb_pin_count); return atomic_read(&bp->b_pin_count);
} }
/* STATIC void
* pagebuf_wait_unpin xfs_buf_wait_unpin(
* xfs_buf_t *bp)
* pagebuf_wait_unpin waits until all of the memory associated
* with the buffer is not longer locked in memory. It returns
* immediately if none of the affected pages are locked.
*/
static inline void
_pagebuf_wait_unpin(
xfs_buf_t *pb)
{ {
DECLARE_WAITQUEUE (wait, current); DECLARE_WAITQUEUE (wait, current);
if (atomic_read(&pb->pb_pin_count) == 0) if (atomic_read(&bp->b_pin_count) == 0)
return; return;
add_wait_queue(&pb->pb_waiters, &wait); add_wait_queue(&bp->b_waiters, &wait);
for (;;) { for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE); set_current_state(TASK_UNINTERRUPTIBLE);
if (atomic_read(&pb->pb_pin_count) == 0) if (atomic_read(&bp->b_pin_count) == 0)
break; break;
if (atomic_read(&pb->pb_io_remaining)) if (atomic_read(&bp->b_io_remaining))
blk_run_address_space(pb->pb_target->pbr_mapping); blk_run_address_space(bp->b_target->bt_mapping);
schedule(); schedule();
} }
remove_wait_queue(&pb->pb_waiters, &wait); remove_wait_queue(&bp->b_waiters, &wait);
set_current_state(TASK_RUNNING); set_current_state(TASK_RUNNING);
} }
...@@ -1033,241 +978,216 @@ _pagebuf_wait_unpin( ...@@ -1033,241 +978,216 @@ _pagebuf_wait_unpin(
* Buffer Utility Routines * Buffer Utility Routines
*/ */
/*
* pagebuf_iodone
*
* pagebuf_iodone marks a buffer for which I/O is in progress
* done with respect to that I/O. The pb_iodone routine, if
* present, will be called as a side-effect.
*/
STATIC void STATIC void
pagebuf_iodone_work( xfs_buf_iodone_work(
void *v) void *v)
{ {
xfs_buf_t *bp = (xfs_buf_t *)v; xfs_buf_t *bp = (xfs_buf_t *)v;
if (bp->pb_iodone) if (bp->b_iodone)
(*(bp->pb_iodone))(bp); (*(bp->b_iodone))(bp);
else if (bp->pb_flags & PBF_ASYNC) else if (bp->b_flags & XBF_ASYNC)
xfs_buf_relse(bp); xfs_buf_relse(bp);
} }
void void
pagebuf_iodone( xfs_buf_ioend(
xfs_buf_t *pb, xfs_buf_t *bp,
int schedule) int schedule)
{ {
pb->pb_flags &= ~(PBF_READ | PBF_WRITE); bp->b_flags &= ~(XBF_READ | XBF_WRITE);
if (pb->pb_error == 0) if (bp->b_error == 0)
pb->pb_flags |= PBF_DONE; bp->b_flags |= XBF_DONE;
PB_TRACE(pb, "iodone", pb->pb_iodone); XB_TRACE(bp, "iodone", bp->b_iodone);
if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) { if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
if (schedule) { if (schedule) {
INIT_WORK(&pb->pb_iodone_work, pagebuf_iodone_work, pb); INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp);
queue_work(xfslogd_workqueue, &pb->pb_iodone_work); queue_work(xfslogd_workqueue, &bp->b_iodone_work);
} else { } else {
pagebuf_iodone_work(pb); xfs_buf_iodone_work(bp);
} }
} else { } else {
up(&pb->pb_iodonesema); up(&bp->b_iodonesema);
} }
} }
/*
* pagebuf_ioerror
*
* pagebuf_ioerror sets the error code for a buffer.
*/
void void
pagebuf_ioerror( /* mark/clear buffer error flag */ xfs_buf_ioerror(
xfs_buf_t *pb, /* buffer to mark */ xfs_buf_t *bp,
int error) /* error to store (0 if none) */ int error)
{ {
ASSERT(error >= 0 && error <= 0xffff); ASSERT(error >= 0 && error <= 0xffff);
pb->pb_error = (unsigned short)error; bp->b_error = (unsigned short)error;
PB_TRACE(pb, "ioerror", (unsigned long)error); XB_TRACE(bp, "ioerror", (unsigned long)error);
} }
/* /*
* pagebuf_iostart * Initiate I/O on a buffer, based on the flags supplied.
* * The b_iodone routine in the buffer supplied will only be called
* pagebuf_iostart initiates I/O on a buffer, based on the flags supplied.
* If necessary, it will arrange for any disk space allocation required,
* and it will break up the request if the block mappings require it.
* The pb_iodone routine in the buffer supplied will only be called
* when all of the subsidiary I/O requests, if any, have been completed. * when all of the subsidiary I/O requests, if any, have been completed.
* pagebuf_iostart calls the pagebuf_ioinitiate routine or
* pagebuf_iorequest, if the former routine is not defined, to start
* the I/O on a given low-level request.
*/ */
int int
pagebuf_iostart( /* start I/O on a buffer */ xfs_buf_iostart(
xfs_buf_t *pb, /* buffer to start */ xfs_buf_t *bp,
page_buf_flags_t flags) /* PBF_LOCK, PBF_ASYNC, PBF_READ, */ xfs_buf_flags_t flags)
/* PBF_WRITE, PBF_DELWRI, */
/* PBF_DONT_BLOCK */
{ {
int status = 0; int status = 0;
PB_TRACE(pb, "iostart", (unsigned long)flags); XB_TRACE(bp, "iostart", (unsigned long)flags);
if (flags & PBF_DELWRI) { if (flags & XBF_DELWRI) {
pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC); bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
pb->pb_flags |= flags & (PBF_DELWRI | PBF_ASYNC); bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
pagebuf_delwri_queue(pb, 1); xfs_buf_delwri_queue(bp, 1);
return status; return status;
} }
pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC | PBF_DELWRI | \ bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
PBF_READ_AHEAD | _PBF_RUN_QUEUES); XBF_READ_AHEAD | _XBF_RUN_QUEUES);
pb->pb_flags |= flags & (PBF_READ | PBF_WRITE | PBF_ASYNC | \ bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
PBF_READ_AHEAD | _PBF_RUN_QUEUES); XBF_READ_AHEAD | _XBF_RUN_QUEUES);
BUG_ON(pb->pb_bn == XFS_BUF_DADDR_NULL); BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
/* For writes allow an alternate strategy routine to precede /* For writes allow an alternate strategy routine to precede
* the actual I/O request (which may not be issued at all in * the actual I/O request (which may not be issued at all in
* a shutdown situation, for example). * a shutdown situation, for example).
*/ */
status = (flags & PBF_WRITE) ? status = (flags & XBF_WRITE) ?
pagebuf_iostrategy(pb) : pagebuf_iorequest(pb); xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
/* Wait for I/O if we are not an async request. /* Wait for I/O if we are not an async request.
* Note: async I/O request completion will release the buffer, * Note: async I/O request completion will release the buffer,
* and that can already be done by this point. So using the * and that can already be done by this point. So using the
* buffer pointer from here on, after async I/O, is invalid. * buffer pointer from here on, after async I/O, is invalid.
*/ */
if (!status && !(flags & PBF_ASYNC)) if (!status && !(flags & XBF_ASYNC))
status = pagebuf_iowait(pb); status = xfs_buf_iowait(bp);
return status; return status;
} }
/*
* Helper routine for pagebuf_iorequest
*/
STATIC __inline__ int STATIC __inline__ int
_pagebuf_iolocked( _xfs_buf_iolocked(
xfs_buf_t *pb) xfs_buf_t *bp)
{ {
ASSERT(pb->pb_flags & (PBF_READ|PBF_WRITE)); ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
if (pb->pb_flags & PBF_READ) if (bp->b_flags & XBF_READ)
return pb->pb_locked; return bp->b_locked;
return 0; return 0;
} }
STATIC __inline__ void STATIC __inline__ void
_pagebuf_iodone( _xfs_buf_ioend(
xfs_buf_t *pb, xfs_buf_t *bp,
int schedule) int schedule)
{ {
if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) { if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
pb->pb_locked = 0; bp->b_locked = 0;
pagebuf_iodone(pb, schedule); xfs_buf_ioend(bp, schedule);
} }
} }
STATIC int STATIC int
bio_end_io_pagebuf( xfs_buf_bio_end_io(
struct bio *bio, struct bio *bio,
unsigned int bytes_done, unsigned int bytes_done,
int error) int error)
{ {
xfs_buf_t *pb = (xfs_buf_t *)bio->bi_private; xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private;
unsigned int blocksize = pb->pb_target->pbr_bsize; unsigned int blocksize = bp->b_target->bt_bsize;
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
if (bio->bi_size) if (bio->bi_size)
return 1; return 1;
if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
pb->pb_error = EIO; bp->b_error = EIO;
do { do {
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
if (unlikely(pb->pb_error)) { if (unlikely(bp->b_error)) {
if (pb->pb_flags & PBF_READ) if (bp->b_flags & XBF_READ)
ClearPageUptodate(page); ClearPageUptodate(page);
SetPageError(page); SetPageError(page);
} else if (blocksize == PAGE_CACHE_SIZE) { } else if (blocksize >= PAGE_CACHE_SIZE) {
SetPageUptodate(page); SetPageUptodate(page);
} else if (!PagePrivate(page) && } else if (!PagePrivate(page) &&
(pb->pb_flags & _PBF_PAGE_CACHE)) { (bp->b_flags & _XBF_PAGE_CACHE)) {
set_page_region(page, bvec->bv_offset, bvec->bv_len); set_page_region(page, bvec->bv_offset, bvec->bv_len);
} }
if (--bvec >= bio->bi_io_vec) if (--bvec >= bio->bi_io_vec)
prefetchw(&bvec->bv_page->flags); prefetchw(&bvec->bv_page->flags);
if (_pagebuf_iolocked(pb)) { if (_xfs_buf_iolocked(bp)) {
unlock_page(page); unlock_page(page);
} }
} while (bvec >= bio->bi_io_vec); } while (bvec >= bio->bi_io_vec);
_pagebuf_iodone(pb, 1); _xfs_buf_ioend(bp, 1);
bio_put(bio); bio_put(bio);
return 0; return 0;
} }
STATIC void STATIC void
_pagebuf_ioapply( _xfs_buf_ioapply(
xfs_buf_t *pb) xfs_buf_t *bp)
{ {
int i, rw, map_i, total_nr_pages, nr_pages; int i, rw, map_i, total_nr_pages, nr_pages;
struct bio *bio; struct bio *bio;
int offset = pb->pb_offset; int offset = bp->b_offset;
int size = pb->pb_count_desired; int size = bp->b_count_desired;
sector_t sector = pb->pb_bn; sector_t sector = bp->b_bn;
unsigned int blocksize = pb->pb_target->pbr_bsize; unsigned int blocksize = bp->b_target->bt_bsize;
int locking = _pagebuf_iolocked(pb); int locking = _xfs_buf_iolocked(bp);
total_nr_pages = pb->pb_page_count; total_nr_pages = bp->b_page_count;
map_i = 0; map_i = 0;
if (pb->pb_flags & _PBF_RUN_QUEUES) { if (bp->b_flags & _XBF_RUN_QUEUES) {
pb->pb_flags &= ~_PBF_RUN_QUEUES; bp->b_flags &= ~_XBF_RUN_QUEUES;
rw = (pb->pb_flags & PBF_READ) ? READ_SYNC : WRITE_SYNC; rw = (bp->b_flags & XBF_READ) ? READ_SYNC : WRITE_SYNC;
} else { } else {
rw = (pb->pb_flags & PBF_READ) ? READ : WRITE; rw = (bp->b_flags & XBF_READ) ? READ : WRITE;
} }
if (pb->pb_flags & PBF_ORDERED) { if (bp->b_flags & XBF_ORDERED) {
ASSERT(!(pb->pb_flags & PBF_READ)); ASSERT(!(bp->b_flags & XBF_READ));
rw = WRITE_BARRIER; rw = WRITE_BARRIER;
} }
/* Special code path for reading a sub page size pagebuf in -- /* Special code path for reading a sub page size buffer in --
* we populate up the whole page, and hence the other metadata * we populate up the whole page, and hence the other metadata
* in the same page. This optimization is only valid when the * in the same page. This optimization is only valid when the
* filesystem block size and the page size are equal. * filesystem block size is not smaller than the page size.
*/ */
if ((pb->pb_buffer_length < PAGE_CACHE_SIZE) && if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
(pb->pb_flags & PBF_READ) && locking && (bp->b_flags & XBF_READ) && locking &&
(blocksize == PAGE_CACHE_SIZE)) { (blocksize >= PAGE_CACHE_SIZE)) {
bio = bio_alloc(GFP_NOIO, 1); bio = bio_alloc(GFP_NOIO, 1);
bio->bi_bdev = pb->pb_target->pbr_bdev; bio->bi_bdev = bp->b_target->bt_bdev;
bio->bi_sector = sector - (offset >> BBSHIFT); bio->bi_sector = sector - (offset >> BBSHIFT);
bio->bi_end_io = bio_end_io_pagebuf; bio->bi_end_io = xfs_buf_bio_end_io;
bio->bi_private = pb; bio->bi_private = bp;
bio_add_page(bio, pb->pb_pages[0], PAGE_CACHE_SIZE, 0); bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
size = 0; size = 0;
atomic_inc(&pb->pb_io_remaining); atomic_inc(&bp->b_io_remaining);
goto submit_io; goto submit_io;
} }
/* Lock down the pages which we need to for the request */ /* Lock down the pages which we need to for the request */
if (locking && (pb->pb_flags & PBF_WRITE) && (pb->pb_locked == 0)) { if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
for (i = 0; size; i++) { for (i = 0; size; i++) {
int nbytes = PAGE_CACHE_SIZE - offset; int nbytes = PAGE_CACHE_SIZE - offset;
struct page *page = pb->pb_pages[i]; struct page *page = bp->b_pages[i];
if (nbytes > size) if (nbytes > size)
nbytes = size; nbytes = size;
...@@ -1277,30 +1197,30 @@ _pagebuf_ioapply( ...@@ -1277,30 +1197,30 @@ _pagebuf_ioapply(
size -= nbytes; size -= nbytes;
offset = 0; offset = 0;
} }
offset = pb->pb_offset; offset = bp->b_offset;
size = pb->pb_count_desired; size = bp->b_count_desired;
} }
next_chunk: next_chunk:
atomic_inc(&pb->pb_io_remaining); atomic_inc(&bp->b_io_remaining);
nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT); nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
if (nr_pages > total_nr_pages) if (nr_pages > total_nr_pages)
nr_pages = total_nr_pages; nr_pages = total_nr_pages;
bio = bio_alloc(GFP_NOIO, nr_pages); bio = bio_alloc(GFP_NOIO, nr_pages);
bio->bi_bdev = pb->pb_target->pbr_bdev; bio->bi_bdev = bp->b_target->bt_bdev;
bio->bi_sector = sector; bio->bi_sector = sector;
bio->bi_end_io = bio_end_io_pagebuf; bio->bi_end_io = xfs_buf_bio_end_io;
bio->bi_private = pb; bio->bi_private = bp;
for (; size && nr_pages; nr_pages--, map_i++) { for (; size && nr_pages; nr_pages--, map_i++) {
int nbytes = PAGE_CACHE_SIZE - offset; int rbytes, nbytes = PAGE_CACHE_SIZE - offset;
if (nbytes > size) if (nbytes > size)
nbytes = size; nbytes = size;
if (bio_add_page(bio, pb->pb_pages[map_i], rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
nbytes, offset) < nbytes) if (rbytes < nbytes)
break; break;
offset = 0; offset = 0;
...@@ -1316,107 +1236,102 @@ _pagebuf_ioapply( ...@@ -1316,107 +1236,102 @@ _pagebuf_ioapply(
goto next_chunk; goto next_chunk;
} else { } else {
bio_put(bio); bio_put(bio);
pagebuf_ioerror(pb, EIO); xfs_buf_ioerror(bp, EIO);
} }
} }
/*
* pagebuf_iorequest -- the core I/O request routine.
*/
int int
pagebuf_iorequest( /* start real I/O */ xfs_buf_iorequest(
xfs_buf_t *pb) /* buffer to convey to device */ xfs_buf_t *bp)
{ {
PB_TRACE(pb, "iorequest", 0); XB_TRACE(bp, "iorequest", 0);
if (pb->pb_flags & PBF_DELWRI) { if (bp->b_flags & XBF_DELWRI) {
pagebuf_delwri_queue(pb, 1); xfs_buf_delwri_queue(bp, 1);
return 0; return 0;
} }
if (pb->pb_flags & PBF_WRITE) { if (bp->b_flags & XBF_WRITE) {
_pagebuf_wait_unpin(pb); xfs_buf_wait_unpin(bp);
} }
pagebuf_hold(pb); xfs_buf_hold(bp);
/* Set the count to 1 initially, this will stop an I/O /* Set the count to 1 initially, this will stop an I/O
* completion callout which happens before we have started * completion callout which happens before we have started
* all the I/O from calling pagebuf_iodone too early. * all the I/O from calling xfs_buf_ioend too early.
*/ */
atomic_set(&pb->pb_io_remaining, 1); atomic_set(&bp->b_io_remaining, 1);
_pagebuf_ioapply(pb); _xfs_buf_ioapply(bp);
_pagebuf_iodone(pb, 0); _xfs_buf_ioend(bp, 0);
pagebuf_rele(pb); xfs_buf_rele(bp);
return 0; return 0;
} }
/* /*
* pagebuf_iowait * Waits for I/O to complete on the buffer supplied.
* * It returns immediately if no I/O is pending.
* pagebuf_iowait waits for I/O to complete on the buffer supplied. * It returns the I/O error code, if any, or 0 if there was no error.
* It returns immediately if no I/O is pending. In any case, it returns
* the error code, if any, or 0 if there is no error.
*/ */
int int
pagebuf_iowait( xfs_buf_iowait(
xfs_buf_t *pb) xfs_buf_t *bp)
{ {
PB_TRACE(pb, "iowait", 0); XB_TRACE(bp, "iowait", 0);
if (atomic_read(&pb->pb_io_remaining)) if (atomic_read(&bp->b_io_remaining))
blk_run_address_space(pb->pb_target->pbr_mapping); blk_run_address_space(bp->b_target->bt_mapping);
down(&pb->pb_iodonesema); down(&bp->b_iodonesema);
PB_TRACE(pb, "iowaited", (long)pb->pb_error); XB_TRACE(bp, "iowaited", (long)bp->b_error);
return pb->pb_error; return bp->b_error;
} }
caddr_t xfs_caddr_t
pagebuf_offset( xfs_buf_offset(
xfs_buf_t *pb, xfs_buf_t *bp,
size_t offset) size_t offset)
{ {
struct page *page; struct page *page;
offset += pb->pb_offset; if (bp->b_flags & XBF_MAPPED)
return XFS_BUF_PTR(bp) + offset;
page = pb->pb_pages[offset >> PAGE_CACHE_SHIFT]; offset += bp->b_offset;
return (caddr_t) page_address(page) + (offset & (PAGE_CACHE_SIZE - 1)); page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
} }
/* /*
* pagebuf_iomove
*
* Move data into or out of a buffer. * Move data into or out of a buffer.
*/ */
void void
pagebuf_iomove( xfs_buf_iomove(
xfs_buf_t *pb, /* buffer to process */ xfs_buf_t *bp, /* buffer to process */
size_t boff, /* starting buffer offset */ size_t boff, /* starting buffer offset */
size_t bsize, /* length to copy */ size_t bsize, /* length to copy */
caddr_t data, /* data address */ caddr_t data, /* data address */
page_buf_rw_t mode) /* read/write flag */ xfs_buf_rw_t mode) /* read/write/zero flag */
{ {
size_t bend, cpoff, csize; size_t bend, cpoff, csize;
struct page *page; struct page *page;
bend = boff + bsize; bend = boff + bsize;
while (boff < bend) { while (boff < bend) {
page = pb->pb_pages[page_buf_btoct(boff + pb->pb_offset)]; page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
cpoff = page_buf_poff(boff + pb->pb_offset); cpoff = xfs_buf_poff(boff + bp->b_offset);
csize = min_t(size_t, csize = min_t(size_t,
PAGE_CACHE_SIZE-cpoff, pb->pb_count_desired-boff); PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE)); ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
switch (mode) { switch (mode) {
case PBRW_ZERO: case XBRW_ZERO:
memset(page_address(page) + cpoff, 0, csize); memset(page_address(page) + cpoff, 0, csize);
break; break;
case PBRW_READ: case XBRW_READ:
memcpy(data, page_address(page) + cpoff, csize); memcpy(data, page_address(page) + cpoff, csize);
break; break;
case PBRW_WRITE: case XBRW_WRITE:
memcpy(page_address(page) + cpoff, data, csize); memcpy(page_address(page) + cpoff, data, csize);
} }
...@@ -1426,7 +1341,7 @@ pagebuf_iomove( ...@@ -1426,7 +1341,7 @@ pagebuf_iomove(
} }
/* /*
* Handling of buftargs. * Handling of buffer targets (buftargs).
*/ */
/* /*
...@@ -1445,15 +1360,15 @@ xfs_wait_buftarg( ...@@ -1445,15 +1360,15 @@ xfs_wait_buftarg(
hash = &btp->bt_hash[i]; hash = &btp->bt_hash[i];
again: again:
spin_lock(&hash->bh_lock); spin_lock(&hash->bh_lock);
list_for_each_entry_safe(bp, n, &hash->bh_list, pb_hash_list) { list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
ASSERT(btp == bp->pb_target); ASSERT(btp == bp->b_target);
if (!(bp->pb_flags & PBF_FS_MANAGED)) { if (!(bp->b_flags & XBF_FS_MANAGED)) {
spin_unlock(&hash->bh_lock); spin_unlock(&hash->bh_lock);
/* /*
* Catch superblock reference count leaks * Catch superblock reference count leaks
* immediately * immediately
*/ */
BUG_ON(bp->pb_bn == 0); BUG_ON(bp->b_bn == 0);
delay(100); delay(100);
goto again; goto again;
} }
...@@ -1488,8 +1403,7 @@ STATIC void ...@@ -1488,8 +1403,7 @@ STATIC void
xfs_free_bufhash( xfs_free_bufhash(
xfs_buftarg_t *btp) xfs_buftarg_t *btp)
{ {
kmem_free(btp->bt_hash, kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t));
(1 << btp->bt_hashshift) * sizeof(xfs_bufhash_t));
btp->bt_hash = NULL; btp->bt_hash = NULL;
} }
...@@ -1524,12 +1438,13 @@ xfs_free_buftarg( ...@@ -1524,12 +1438,13 @@ xfs_free_buftarg(
{ {
xfs_flush_buftarg(btp, 1); xfs_flush_buftarg(btp, 1);
if (external) if (external)
xfs_blkdev_put(btp->pbr_bdev); xfs_blkdev_put(btp->bt_bdev);
xfs_free_bufhash(btp); xfs_free_bufhash(btp);
iput(btp->pbr_mapping->host); iput(btp->bt_mapping->host);
/* unregister the buftarg first so that we don't get a /* Unregister the buftarg first so that we don't get a
* wakeup finding a non-existent task */ * wakeup finding a non-existent task
*/
xfs_unregister_buftarg(btp); xfs_unregister_buftarg(btp);
kthread_stop(btp->bt_task); kthread_stop(btp->bt_task);
...@@ -1543,11 +1458,11 @@ xfs_setsize_buftarg_flags( ...@@ -1543,11 +1458,11 @@ xfs_setsize_buftarg_flags(
unsigned int sectorsize, unsigned int sectorsize,
int verbose) int verbose)
{ {
btp->pbr_bsize = blocksize; btp->bt_bsize = blocksize;
btp->pbr_sshift = ffs(sectorsize) - 1; btp->bt_sshift = ffs(sectorsize) - 1;
btp->pbr_smask = sectorsize - 1; btp->bt_smask = sectorsize - 1;
if (set_blocksize(btp->pbr_bdev, sectorsize)) { if (set_blocksize(btp->bt_bdev, sectorsize)) {
printk(KERN_WARNING printk(KERN_WARNING
"XFS: Cannot set_blocksize to %u on device %s\n", "XFS: Cannot set_blocksize to %u on device %s\n",
sectorsize, XFS_BUFTARG_NAME(btp)); sectorsize, XFS_BUFTARG_NAME(btp));
...@@ -1567,10 +1482,10 @@ xfs_setsize_buftarg_flags( ...@@ -1567,10 +1482,10 @@ xfs_setsize_buftarg_flags(
} }
/* /*
* When allocating the initial buffer target we have not yet * When allocating the initial buffer target we have not yet
* read in the superblock, so don't know what sized sectors * read in the superblock, so don't know what sized sectors
* are being used is at this early stage. Play safe. * are being used is at this early stage. Play safe.
*/ */
STATIC int STATIC int
xfs_setsize_buftarg_early( xfs_setsize_buftarg_early(
xfs_buftarg_t *btp, xfs_buftarg_t *btp,
...@@ -1618,7 +1533,7 @@ xfs_mapping_buftarg( ...@@ -1618,7 +1533,7 @@ xfs_mapping_buftarg(
mapping->a_ops = &mapping_aops; mapping->a_ops = &mapping_aops;
mapping->backing_dev_info = bdi; mapping->backing_dev_info = bdi;
mapping_set_gfp_mask(mapping, GFP_NOFS); mapping_set_gfp_mask(mapping, GFP_NOFS);
btp->pbr_mapping = mapping; btp->bt_mapping = mapping;
return 0; return 0;
} }
...@@ -1651,8 +1566,8 @@ xfs_alloc_buftarg( ...@@ -1651,8 +1566,8 @@ xfs_alloc_buftarg(
btp = kmem_zalloc(sizeof(*btp), KM_SLEEP); btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
btp->pbr_dev = bdev->bd_dev; btp->bt_dev = bdev->bd_dev;
btp->pbr_bdev = bdev; btp->bt_bdev = bdev;
if (xfs_setsize_buftarg_early(btp, bdev)) if (xfs_setsize_buftarg_early(btp, bdev))
goto error; goto error;
if (xfs_mapping_buftarg(btp, bdev)) if (xfs_mapping_buftarg(btp, bdev))
...@@ -1669,63 +1584,61 @@ xfs_alloc_buftarg( ...@@ -1669,63 +1584,61 @@ xfs_alloc_buftarg(
/* /*
* Pagebuf delayed write buffer handling * Delayed write buffer handling
*/ */
STATIC void STATIC void
pagebuf_delwri_queue( xfs_buf_delwri_queue(
xfs_buf_t *pb, xfs_buf_t *bp,
int unlock) int unlock)
{ {
struct list_head *dwq = &pb->pb_target->bt_delwrite_queue; struct list_head *dwq = &bp->b_target->bt_delwrite_queue;
spinlock_t *dwlk = &pb->pb_target->bt_delwrite_lock; spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
PB_TRACE(pb, "delwri_q", (long)unlock); XB_TRACE(bp, "delwri_q", (long)unlock);
ASSERT((pb->pb_flags & (PBF_DELWRI|PBF_ASYNC)) == ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
(PBF_DELWRI|PBF_ASYNC));
spin_lock(dwlk); spin_lock(dwlk);
/* If already in the queue, dequeue and place at tail */ /* If already in the queue, dequeue and place at tail */
if (!list_empty(&pb->pb_list)) { if (!list_empty(&bp->b_list)) {
ASSERT(pb->pb_flags & _PBF_DELWRI_Q); ASSERT(bp->b_flags & _XBF_DELWRI_Q);
if (unlock) { if (unlock)
atomic_dec(&pb->pb_hold); atomic_dec(&bp->b_hold);
} list_del(&bp->b_list);
list_del(&pb->pb_list);
} }
pb->pb_flags |= _PBF_DELWRI_Q; bp->b_flags |= _XBF_DELWRI_Q;
list_add_tail(&pb->pb_list, dwq); list_add_tail(&bp->b_list, dwq);
pb->pb_queuetime = jiffies; bp->b_queuetime = jiffies;
spin_unlock(dwlk); spin_unlock(dwlk);
if (unlock) if (unlock)
pagebuf_unlock(pb); xfs_buf_unlock(bp);
} }
void void
pagebuf_delwri_dequeue( xfs_buf_delwri_dequeue(
xfs_buf_t *pb) xfs_buf_t *bp)
{ {
spinlock_t *dwlk = &pb->pb_target->bt_delwrite_lock; spinlock_t *dwlk = &bp->b_target->bt_delwrite_lock;
int dequeued = 0; int dequeued = 0;
spin_lock(dwlk); spin_lock(dwlk);
if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) { if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
ASSERT(pb->pb_flags & _PBF_DELWRI_Q); ASSERT(bp->b_flags & _XBF_DELWRI_Q);
list_del_init(&pb->pb_list); list_del_init(&bp->b_list);
dequeued = 1; dequeued = 1;
} }
pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q); bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
spin_unlock(dwlk); spin_unlock(dwlk);
if (dequeued) if (dequeued)
pagebuf_rele(pb); xfs_buf_rele(bp);
PB_TRACE(pb, "delwri_dq", (long)dequeued); XB_TRACE(bp, "delwri_dq", (long)dequeued);
} }
STATIC void STATIC void
pagebuf_runall_queues( xfs_buf_runall_queues(
struct workqueue_struct *queue) struct workqueue_struct *queue)
{ {
flush_workqueue(queue); flush_workqueue(queue);
...@@ -1740,9 +1653,9 @@ xfsbufd_wakeup( ...@@ -1740,9 +1653,9 @@ xfsbufd_wakeup(
spin_lock(&xfs_buftarg_lock); spin_lock(&xfs_buftarg_lock);
list_for_each_entry_safe(btp, n, &xfs_buftarg_list, bt_list) { list_for_each_entry_safe(btp, n, &xfs_buftarg_list, bt_list) {
if (test_bit(BT_FORCE_SLEEP, &btp->bt_flags)) if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
continue; continue;
set_bit(BT_FORCE_FLUSH, &btp->bt_flags); set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
barrier(); barrier();
wake_up_process(btp->bt_task); wake_up_process(btp->bt_task);
} }
...@@ -1757,7 +1670,7 @@ xfsbufd( ...@@ -1757,7 +1670,7 @@ xfsbufd(
struct list_head tmp; struct list_head tmp;
unsigned long age; unsigned long age;
xfs_buftarg_t *target = (xfs_buftarg_t *)data; xfs_buftarg_t *target = (xfs_buftarg_t *)data;
xfs_buf_t *pb, *n; xfs_buf_t *bp, *n;
struct list_head *dwq = &target->bt_delwrite_queue; struct list_head *dwq = &target->bt_delwrite_queue;
spinlock_t *dwlk = &target->bt_delwrite_lock; spinlock_t *dwlk = &target->bt_delwrite_lock;
...@@ -1766,10 +1679,10 @@ xfsbufd( ...@@ -1766,10 +1679,10 @@ xfsbufd(
INIT_LIST_HEAD(&tmp); INIT_LIST_HEAD(&tmp);
do { do {
if (unlikely(freezing(current))) { if (unlikely(freezing(current))) {
set_bit(BT_FORCE_SLEEP, &target->bt_flags); set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
refrigerator(); refrigerator();
} else { } else {
clear_bit(BT_FORCE_SLEEP, &target->bt_flags); clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
} }
schedule_timeout_interruptible( schedule_timeout_interruptible(
...@@ -1777,40 +1690,40 @@ xfsbufd( ...@@ -1777,40 +1690,40 @@ xfsbufd(
age = xfs_buf_age_centisecs * msecs_to_jiffies(10); age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
spin_lock(dwlk); spin_lock(dwlk);
list_for_each_entry_safe(pb, n, dwq, pb_list) { list_for_each_entry_safe(bp, n, dwq, b_list) {
PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb)); XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
ASSERT(pb->pb_flags & PBF_DELWRI); ASSERT(bp->b_flags & XBF_DELWRI);
if (!pagebuf_ispin(pb) && !pagebuf_cond_lock(pb)) { if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
if (!test_bit(BT_FORCE_FLUSH, if (!test_bit(XBT_FORCE_FLUSH,
&target->bt_flags) && &target->bt_flags) &&
time_before(jiffies, time_before(jiffies,
pb->pb_queuetime + age)) { bp->b_queuetime + age)) {
pagebuf_unlock(pb); xfs_buf_unlock(bp);
break; break;
} }
pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q); bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
pb->pb_flags |= PBF_WRITE; bp->b_flags |= XBF_WRITE;
list_move(&pb->pb_list, &tmp); list_move(&bp->b_list, &tmp);
} }
} }
spin_unlock(dwlk); spin_unlock(dwlk);
while (!list_empty(&tmp)) { while (!list_empty(&tmp)) {
pb = list_entry(tmp.next, xfs_buf_t, pb_list); bp = list_entry(tmp.next, xfs_buf_t, b_list);
ASSERT(target == pb->pb_target); ASSERT(target == bp->b_target);
list_del_init(&pb->pb_list); list_del_init(&bp->b_list);
pagebuf_iostrategy(pb); xfs_buf_iostrategy(bp);
blk_run_address_space(target->pbr_mapping); blk_run_address_space(target->bt_mapping);
} }
if (as_list_len > 0) if (as_list_len > 0)
purge_addresses(); purge_addresses();
clear_bit(BT_FORCE_FLUSH, &target->bt_flags); clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
} while (!kthread_should_stop()); } while (!kthread_should_stop());
return 0; return 0;
...@@ -1827,73 +1740,72 @@ xfs_flush_buftarg( ...@@ -1827,73 +1740,72 @@ xfs_flush_buftarg(
int wait) int wait)
{ {
struct list_head tmp; struct list_head tmp;
xfs_buf_t *pb, *n; xfs_buf_t *bp, *n;
int pincount = 0; int pincount = 0;
struct list_head *dwq = &target->bt_delwrite_queue; struct list_head *dwq = &target->bt_delwrite_queue;
spinlock_t *dwlk = &target->bt_delwrite_lock; spinlock_t *dwlk = &target->bt_delwrite_lock;
pagebuf_runall_queues(xfsdatad_workqueue); xfs_buf_runall_queues(xfsdatad_workqueue);
pagebuf_runall_queues(xfslogd_workqueue); xfs_buf_runall_queues(xfslogd_workqueue);
INIT_LIST_HEAD(&tmp); INIT_LIST_HEAD(&tmp);
spin_lock(dwlk); spin_lock(dwlk);
list_for_each_entry_safe(pb, n, dwq, pb_list) { list_for_each_entry_safe(bp, n, dwq, b_list) {
ASSERT(bp->b_target == target);
ASSERT(pb->pb_target == target); ASSERT(bp->b_flags & (XBF_DELWRI | _XBF_DELWRI_Q));
ASSERT(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)); XB_TRACE(bp, "walkq2", (long)xfs_buf_ispin(bp));
PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb)); if (xfs_buf_ispin(bp)) {
if (pagebuf_ispin(pb)) {
pincount++; pincount++;
continue; continue;
} }
list_move(&pb->pb_list, &tmp); list_move(&bp->b_list, &tmp);
} }
spin_unlock(dwlk); spin_unlock(dwlk);
/* /*
* Dropped the delayed write list lock, now walk the temporary list * Dropped the delayed write list lock, now walk the temporary list
*/ */
list_for_each_entry_safe(pb, n, &tmp, pb_list) { list_for_each_entry_safe(bp, n, &tmp, b_list) {
pagebuf_lock(pb); xfs_buf_lock(bp);
pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q); bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
pb->pb_flags |= PBF_WRITE; bp->b_flags |= XBF_WRITE;
if (wait) if (wait)
pb->pb_flags &= ~PBF_ASYNC; bp->b_flags &= ~XBF_ASYNC;
else else
list_del_init(&pb->pb_list); list_del_init(&bp->b_list);
pagebuf_iostrategy(pb); xfs_buf_iostrategy(bp);
} }
/* /*
* Remaining list items must be flushed before returning * Remaining list items must be flushed before returning
*/ */
while (!list_empty(&tmp)) { while (!list_empty(&tmp)) {
pb = list_entry(tmp.next, xfs_buf_t, pb_list); bp = list_entry(tmp.next, xfs_buf_t, b_list);
list_del_init(&pb->pb_list); list_del_init(&bp->b_list);
xfs_iowait(pb); xfs_iowait(bp);
xfs_buf_relse(pb); xfs_buf_relse(bp);
} }
if (wait) if (wait)
blk_run_address_space(target->pbr_mapping); blk_run_address_space(target->bt_mapping);
return pincount; return pincount;
} }
int __init int __init
pagebuf_init(void) xfs_buf_init(void)
{ {
int error = -ENOMEM; int error = -ENOMEM;
#ifdef PAGEBUF_TRACE #ifdef XFS_BUF_TRACE
pagebuf_trace_buf = ktrace_alloc(PAGEBUF_TRACE_SIZE, KM_SLEEP); xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
#endif #endif
pagebuf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf"); xfs_buf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf");
if (!pagebuf_zone) if (!xfs_buf_zone)
goto out_free_trace_buf; goto out_free_trace_buf;
xfslogd_workqueue = create_workqueue("xfslogd"); xfslogd_workqueue = create_workqueue("xfslogd");
...@@ -1904,8 +1816,8 @@ pagebuf_init(void) ...@@ -1904,8 +1816,8 @@ pagebuf_init(void)
if (!xfsdatad_workqueue) if (!xfsdatad_workqueue)
goto out_destroy_xfslogd_workqueue; goto out_destroy_xfslogd_workqueue;
pagebuf_shake = kmem_shake_register(xfsbufd_wakeup); xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup);
if (!pagebuf_shake) if (!xfs_buf_shake)
goto out_destroy_xfsdatad_workqueue; goto out_destroy_xfsdatad_workqueue;
return 0; return 0;
...@@ -1915,22 +1827,22 @@ pagebuf_init(void) ...@@ -1915,22 +1827,22 @@ pagebuf_init(void)
out_destroy_xfslogd_workqueue: out_destroy_xfslogd_workqueue:
destroy_workqueue(xfslogd_workqueue); destroy_workqueue(xfslogd_workqueue);
out_free_buf_zone: out_free_buf_zone:
kmem_zone_destroy(pagebuf_zone); kmem_zone_destroy(xfs_buf_zone);
out_free_trace_buf: out_free_trace_buf:
#ifdef PAGEBUF_TRACE #ifdef XFS_BUF_TRACE
ktrace_free(pagebuf_trace_buf); ktrace_free(xfs_buf_trace_buf);
#endif #endif
return error; return error;
} }
void void
pagebuf_terminate(void) xfs_buf_terminate(void)
{ {
kmem_shake_deregister(pagebuf_shake); kmem_shake_deregister(xfs_buf_shake);
destroy_workqueue(xfsdatad_workqueue); destroy_workqueue(xfsdatad_workqueue);
destroy_workqueue(xfslogd_workqueue); destroy_workqueue(xfslogd_workqueue);
kmem_zone_destroy(pagebuf_zone); kmem_zone_destroy(xfs_buf_zone);
#ifdef PAGEBUF_TRACE #ifdef XFS_BUF_TRACE
ktrace_free(pagebuf_trace_buf); ktrace_free(xfs_buf_trace_buf);
#endif #endif
} }
...@@ -34,42 +34,45 @@ ...@@ -34,42 +34,45 @@
#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL)) #define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
#define page_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE) #define xfs_buf_ctob(pp) ((pp) * PAGE_CACHE_SIZE)
#define page_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) #define xfs_buf_btoc(dd) (((dd) + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT)
#define page_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT) #define xfs_buf_btoct(dd) ((dd) >> PAGE_CACHE_SHIFT)
#define page_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK) #define xfs_buf_poff(aa) ((aa) & ~PAGE_CACHE_MASK)
typedef enum page_buf_rw_e { typedef enum {
PBRW_READ = 1, /* transfer into target memory */ XBRW_READ = 1, /* transfer into target memory */
PBRW_WRITE = 2, /* transfer from target memory */ XBRW_WRITE = 2, /* transfer from target memory */
PBRW_ZERO = 3 /* Zero target memory */ XBRW_ZERO = 3, /* Zero target memory */
} page_buf_rw_t; } xfs_buf_rw_t;
typedef enum {
typedef enum page_buf_flags_e { /* pb_flags values */ XBF_READ = (1 << 0), /* buffer intended for reading from device */
PBF_READ = (1 << 0), /* buffer intended for reading from device */ XBF_WRITE = (1 << 1), /* buffer intended for writing to device */
PBF_WRITE = (1 << 1), /* buffer intended for writing to device */ XBF_MAPPED = (1 << 2), /* buffer mapped (b_addr valid) */
PBF_MAPPED = (1 << 2), /* buffer mapped (pb_addr valid) */ XBF_ASYNC = (1 << 4), /* initiator will not wait for completion */
PBF_ASYNC = (1 << 4), /* initiator will not wait for completion */ XBF_DONE = (1 << 5), /* all pages in the buffer uptodate */
PBF_DONE = (1 << 5), /* all pages in the buffer uptodate */ XBF_DELWRI = (1 << 6), /* buffer has dirty pages */
PBF_DELWRI = (1 << 6), /* buffer has dirty pages */ XBF_STALE = (1 << 7), /* buffer has been staled, do not find it */
PBF_STALE = (1 << 7), /* buffer has been staled, do not find it */ XBF_FS_MANAGED = (1 << 8), /* filesystem controls freeing memory */
PBF_FS_MANAGED = (1 << 8), /* filesystem controls freeing memory */ XBF_ORDERED = (1 << 11), /* use ordered writes */
PBF_ORDERED = (1 << 11), /* use ordered writes */ XBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead */
PBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead */
/* flags used only as arguments to access routines */ /* flags used only as arguments to access routines */
PBF_LOCK = (1 << 14), /* lock requested */ XBF_LOCK = (1 << 14), /* lock requested */
PBF_TRYLOCK = (1 << 15), /* lock requested, but do not wait */ XBF_TRYLOCK = (1 << 15), /* lock requested, but do not wait */
PBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */ XBF_DONT_BLOCK = (1 << 16), /* do not block in current thread */
/* flags used only internally */ /* flags used only internally */
_PBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */ _XBF_PAGE_CACHE = (1 << 17),/* backed by pagecache */
_PBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc() */ _XBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc() */
_PBF_RUN_QUEUES = (1 << 19),/* run block device task queue */ _XBF_RUN_QUEUES = (1 << 19),/* run block device task queue */
_PBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */ _XBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */
} page_buf_flags_t; } xfs_buf_flags_t;
typedef enum {
XBT_FORCE_SLEEP = (0 << 1),
XBT_FORCE_FLUSH = (1 << 1),
} xfs_buftarg_flags_t;
typedef struct xfs_bufhash { typedef struct xfs_bufhash {
struct list_head bh_list; struct list_head bh_list;
...@@ -77,14 +80,14 @@ typedef struct xfs_bufhash { ...@@ -77,14 +80,14 @@ typedef struct xfs_bufhash {
} xfs_bufhash_t; } xfs_bufhash_t;
typedef struct xfs_buftarg { typedef struct xfs_buftarg {
dev_t pbr_dev; dev_t bt_dev;
struct block_device *pbr_bdev; struct block_device *bt_bdev;
struct address_space *pbr_mapping; struct address_space *bt_mapping;
unsigned int pbr_bsize; unsigned int bt_bsize;
unsigned int pbr_sshift; unsigned int bt_sshift;
size_t pbr_smask; size_t bt_smask;
/* per-device buffer hash table */ /* per device buffer hash table */
uint bt_hashmask; uint bt_hashmask;
uint bt_hashshift; uint bt_hashshift;
xfs_bufhash_t *bt_hash; xfs_bufhash_t *bt_hash;
...@@ -94,469 +97,333 @@ typedef struct xfs_buftarg { ...@@ -94,469 +97,333 @@ typedef struct xfs_buftarg {
struct list_head bt_list; struct list_head bt_list;
struct list_head bt_delwrite_queue; struct list_head bt_delwrite_queue;
spinlock_t bt_delwrite_lock; spinlock_t bt_delwrite_lock;
uint bt_flags; unsigned long bt_flags;
#define BT_FORCE_SLEEP 1
#define BT_FORCE_FLUSH 2
} xfs_buftarg_t; } xfs_buftarg_t;
/* /*
* xfs_buf_t: Buffer structure for page cache-based buffers * xfs_buf_t: Buffer structure for pagecache-based buffers
* *
* This buffer structure is used by the page cache buffer management routines * This buffer structure is used by the pagecache buffer management routines
* to refer to an assembly of pages forming a logical buffer. The actual I/O * to refer to an assembly of pages forming a logical buffer.
* is performed with buffer_head structures, as required by drivers.
* *
* The buffer structure is used on temporary basis only, and discarded when * The buffer structure is used on a temporary basis only, and discarded when
* released. The real data storage is recorded in the page cache. Metadata is * released. The real data storage is recorded in the pagecache. Buffers are
* hashed to the block device on which the file system resides. * hashed to the block device on which the file system resides.
*/ */
struct xfs_buf; struct xfs_buf;
typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
typedef void (*xfs_buf_relse_t)(struct xfs_buf *);
typedef int (*xfs_buf_bdstrat_t)(struct xfs_buf *);
/* call-back function on I/O completion */ #define XB_PAGES 2
typedef void (*page_buf_iodone_t)(struct xfs_buf *);
/* call-back function on I/O completion */
typedef void (*page_buf_relse_t)(struct xfs_buf *);
/* pre-write function */
typedef int (*page_buf_bdstrat_t)(struct xfs_buf *);
#define PB_PAGES 2
typedef struct xfs_buf { typedef struct xfs_buf {
struct semaphore pb_sema; /* semaphore for lockables */ struct semaphore b_sema; /* semaphore for lockables */
unsigned long pb_queuetime; /* time buffer was queued */ unsigned long b_queuetime; /* time buffer was queued */
atomic_t pb_pin_count; /* pin count */ atomic_t b_pin_count; /* pin count */
wait_queue_head_t pb_waiters; /* unpin waiters */ wait_queue_head_t b_waiters; /* unpin waiters */
struct list_head pb_list; struct list_head b_list;
page_buf_flags_t pb_flags; /* status flags */ xfs_buf_flags_t b_flags; /* status flags */
struct list_head pb_hash_list; /* hash table list */ struct list_head b_hash_list; /* hash table list */
xfs_bufhash_t *pb_hash; /* hash table list start */ xfs_bufhash_t *b_hash; /* hash table list start */
xfs_buftarg_t *pb_target; /* buffer target (device) */ xfs_buftarg_t *b_target; /* buffer target (device) */
atomic_t pb_hold; /* reference count */ atomic_t b_hold; /* reference count */
xfs_daddr_t pb_bn; /* block number for I/O */ xfs_daddr_t b_bn; /* block number for I/O */
loff_t pb_file_offset; /* offset in file */ xfs_off_t b_file_offset; /* offset in file */
size_t pb_buffer_length; /* size of buffer in bytes */ size_t b_buffer_length;/* size of buffer in bytes */
size_t pb_count_desired; /* desired transfer size */ size_t b_count_desired;/* desired transfer size */
void *pb_addr; /* virtual address of buffer */ void *b_addr; /* virtual address of buffer */
struct work_struct pb_iodone_work; struct work_struct b_iodone_work;
atomic_t pb_io_remaining;/* #outstanding I/O requests */ atomic_t b_io_remaining; /* #outstanding I/O requests */
page_buf_iodone_t pb_iodone; /* I/O completion function */ xfs_buf_iodone_t b_iodone; /* I/O completion function */
page_buf_relse_t pb_relse; /* releasing function */ xfs_buf_relse_t b_relse; /* releasing function */
page_buf_bdstrat_t pb_strat; /* pre-write function */ xfs_buf_bdstrat_t b_strat; /* pre-write function */
struct semaphore pb_iodonesema; /* Semaphore for I/O waiters */ struct semaphore b_iodonesema; /* Semaphore for I/O waiters */
void *pb_fspriv; void *b_fspriv;
void *pb_fspriv2; void *b_fspriv2;
void *pb_fspriv3; void *b_fspriv3;
unsigned short pb_error; /* error code on I/O */ unsigned short b_error; /* error code on I/O */
unsigned short pb_locked; /* page array is locked */ unsigned short b_locked; /* page array is locked */
unsigned int pb_page_count; /* size of page array */ unsigned int b_page_count; /* size of page array */
unsigned int pb_offset; /* page offset in first page */ unsigned int b_offset; /* page offset in first page */
struct page **pb_pages; /* array of page pointers */ struct page **b_pages; /* array of page pointers */
struct page *pb_page_array[PB_PAGES]; /* inline pages */ struct page *b_page_array[XB_PAGES]; /* inline pages */
#ifdef PAGEBUF_LOCK_TRACKING #ifdef XFS_BUF_LOCK_TRACKING
int pb_last_holder; int b_last_holder;
#endif #endif
} xfs_buf_t; } xfs_buf_t;
/* Finding and Reading Buffers */ /* Finding and Reading Buffers */
extern xfs_buf_t *_xfs_buf_find(xfs_buftarg_t *, xfs_off_t, size_t,
extern xfs_buf_t *_pagebuf_find( /* find buffer for block if */ xfs_buf_flags_t, xfs_buf_t *);
/* the block is in memory */
xfs_buftarg_t *, /* inode for block */
loff_t, /* starting offset of range */
size_t, /* length of range */
page_buf_flags_t, /* PBF_LOCK */
xfs_buf_t *); /* newly allocated buffer */
#define xfs_incore(buftarg,blkno,len,lockit) \ #define xfs_incore(buftarg,blkno,len,lockit) \
_pagebuf_find(buftarg, blkno ,len, lockit, NULL) _xfs_buf_find(buftarg, blkno ,len, lockit, NULL)
extern xfs_buf_t *xfs_buf_get_flags( /* allocate a buffer */
xfs_buftarg_t *, /* inode for buffer */
loff_t, /* starting offset of range */
size_t, /* length of range */
page_buf_flags_t); /* PBF_LOCK, PBF_READ, */
/* PBF_ASYNC */
extern xfs_buf_t *xfs_buf_get_flags(xfs_buftarg_t *, xfs_off_t, size_t,
xfs_buf_flags_t);
#define xfs_buf_get(target, blkno, len, flags) \ #define xfs_buf_get(target, blkno, len, flags) \
xfs_buf_get_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED) xfs_buf_get_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
extern xfs_buf_t *xfs_buf_read_flags( /* allocate and read a buffer */
xfs_buftarg_t *, /* inode for buffer */
loff_t, /* starting offset of range */
size_t, /* length of range */
page_buf_flags_t); /* PBF_LOCK, PBF_ASYNC */
extern xfs_buf_t *xfs_buf_read_flags(xfs_buftarg_t *, xfs_off_t, size_t,
xfs_buf_flags_t);
#define xfs_buf_read(target, blkno, len, flags) \ #define xfs_buf_read(target, blkno, len, flags) \
xfs_buf_read_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED) xfs_buf_read_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
extern xfs_buf_t *pagebuf_get_empty( /* allocate pagebuf struct with */ extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *);
/* no memory or disk address */ extern xfs_buf_t *xfs_buf_get_noaddr(size_t, xfs_buftarg_t *);
size_t len, extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t);
xfs_buftarg_t *); /* mount point "fake" inode */ extern void xfs_buf_hold(xfs_buf_t *);
extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t,
extern xfs_buf_t *pagebuf_get_no_daddr(/* allocate pagebuf struct */ xfs_buf_flags_t);
/* without disk address */
size_t len,
xfs_buftarg_t *); /* mount point "fake" inode */
extern int pagebuf_associate_memory(
xfs_buf_t *,
void *,
size_t);
extern void pagebuf_hold( /* increment reference count */
xfs_buf_t *); /* buffer to hold */
extern void pagebuf_readahead( /* read ahead into cache */
xfs_buftarg_t *, /* target for buffer (or NULL) */
loff_t, /* starting offset of range */
size_t, /* length of range */
page_buf_flags_t); /* additional read flags */
/* Releasing Buffers */ /* Releasing Buffers */
extern void xfs_buf_free(xfs_buf_t *);
extern void pagebuf_free( /* deallocate a buffer */ extern void xfs_buf_rele(xfs_buf_t *);
xfs_buf_t *); /* buffer to deallocate */
extern void pagebuf_rele( /* release hold on a buffer */
xfs_buf_t *); /* buffer to release */
/* Locking and Unlocking Buffers */ /* Locking and Unlocking Buffers */
extern int xfs_buf_cond_lock(xfs_buf_t *);
extern int pagebuf_cond_lock( /* lock buffer, if not locked */ extern int xfs_buf_lock_value(xfs_buf_t *);
/* (returns -EBUSY if locked) */ extern void xfs_buf_lock(xfs_buf_t *);
xfs_buf_t *); /* buffer to lock */ extern void xfs_buf_unlock(xfs_buf_t *);
extern int pagebuf_lock_value( /* return count on lock */
xfs_buf_t *); /* buffer to check */
extern int pagebuf_lock( /* lock buffer */
xfs_buf_t *); /* buffer to lock */
extern void pagebuf_unlock( /* unlock buffer */
xfs_buf_t *); /* buffer to unlock */
/* Buffer Read and Write Routines */ /* Buffer Read and Write Routines */
extern void xfs_buf_ioend(xfs_buf_t *, int);
extern void pagebuf_iodone( /* mark buffer I/O complete */ extern void xfs_buf_ioerror(xfs_buf_t *, int);
xfs_buf_t *, /* buffer to mark */ extern int xfs_buf_iostart(xfs_buf_t *, xfs_buf_flags_t);
int); /* run completion locally, or in extern int xfs_buf_iorequest(xfs_buf_t *);
* a helper thread. */ extern int xfs_buf_iowait(xfs_buf_t *);
extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, xfs_caddr_t,
extern void pagebuf_ioerror( /* mark buffer in error (or not) */ xfs_buf_rw_t);
xfs_buf_t *, /* buffer to mark */
int); /* error to store (0 if none) */ static inline int xfs_buf_iostrategy(xfs_buf_t *bp)
extern int pagebuf_iostart( /* start I/O on a buffer */
xfs_buf_t *, /* buffer to start */
page_buf_flags_t); /* PBF_LOCK, PBF_ASYNC, */
/* PBF_READ, PBF_WRITE, */
/* PBF_DELWRI */
extern int pagebuf_iorequest( /* start real I/O */
xfs_buf_t *); /* buffer to convey to device */
extern int pagebuf_iowait( /* wait for buffer I/O done */
xfs_buf_t *); /* buffer to wait on */
extern void pagebuf_iomove( /* move data in/out of pagebuf */
xfs_buf_t *, /* buffer to manipulate */
size_t, /* starting buffer offset */
size_t, /* length in buffer */
caddr_t, /* data pointer */
page_buf_rw_t); /* direction */
static inline int pagebuf_iostrategy(xfs_buf_t *pb)
{ {
return pb->pb_strat ? pb->pb_strat(pb) : pagebuf_iorequest(pb); return bp->b_strat ? bp->b_strat(bp) : xfs_buf_iorequest(bp);
} }
static inline int pagebuf_geterror(xfs_buf_t *pb) static inline int xfs_buf_geterror(xfs_buf_t *bp)
{ {
return pb ? pb->pb_error : ENOMEM; return bp ? bp->b_error : ENOMEM;
} }
/* Buffer Utility Routines */ /* Buffer Utility Routines */
extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
extern caddr_t pagebuf_offset( /* pointer at offset in buffer */
xfs_buf_t *, /* buffer to offset into */
size_t); /* offset */
/* Pinning Buffer Storage in Memory */ /* Pinning Buffer Storage in Memory */
extern void xfs_buf_pin(xfs_buf_t *);
extern void pagebuf_pin( /* pin buffer in memory */ extern void xfs_buf_unpin(xfs_buf_t *);
xfs_buf_t *); /* buffer to pin */ extern int xfs_buf_ispin(xfs_buf_t *);
extern void pagebuf_unpin( /* unpin buffered data */
xfs_buf_t *); /* buffer to unpin */
extern int pagebuf_ispin( /* check if buffer is pinned */
xfs_buf_t *); /* buffer to check */
/* Delayed Write Buffer Routines */ /* Delayed Write Buffer Routines */
extern void xfs_buf_delwri_dequeue(xfs_buf_t *);
extern void pagebuf_delwri_dequeue(xfs_buf_t *);
/* Buffer Daemon Setup Routines */ /* Buffer Daemon Setup Routines */
extern int xfs_buf_init(void);
extern void xfs_buf_terminate(void);
extern int pagebuf_init(void); #ifdef XFS_BUF_TRACE
extern void pagebuf_terminate(void); extern ktrace_t *xfs_buf_trace_buf;
extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *);
#ifdef PAGEBUF_TRACE
extern ktrace_t *pagebuf_trace_buf;
extern void pagebuf_trace(
xfs_buf_t *, /* buffer being traced */
char *, /* description of operation */
void *, /* arbitrary diagnostic value */
void *); /* return address */
#else #else
# define pagebuf_trace(pb, id, ptr, ra) do { } while (0) #define xfs_buf_trace(bp,id,ptr,ra) do { } while (0)
#endif #endif
#define pagebuf_target_name(target) \ #define xfs_buf_target_name(target) \
({ char __b[BDEVNAME_SIZE]; bdevname((target)->pbr_bdev, __b); __b; }) ({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; })
#define XFS_B_ASYNC XBF_ASYNC
#define XFS_B_DELWRI XBF_DELWRI
#define XFS_B_READ XBF_READ
#define XFS_B_WRITE XBF_WRITE
#define XFS_B_STALE XBF_STALE
/* These are just for xfs_syncsub... it sets an internal variable #define XFS_BUF_TRYLOCK XBF_TRYLOCK
* then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t #define XFS_INCORE_TRYLOCK XBF_TRYLOCK
*/ #define XFS_BUF_LOCK XBF_LOCK
#define XFS_B_ASYNC PBF_ASYNC #define XFS_BUF_MAPPED XBF_MAPPED
#define XFS_B_DELWRI PBF_DELWRI
#define XFS_B_READ PBF_READ
#define XFS_B_WRITE PBF_WRITE
#define XFS_B_STALE PBF_STALE
#define XFS_BUF_TRYLOCK PBF_TRYLOCK
#define XFS_INCORE_TRYLOCK PBF_TRYLOCK
#define XFS_BUF_LOCK PBF_LOCK
#define XFS_BUF_MAPPED PBF_MAPPED
#define BUF_BUSY PBF_DONT_BLOCK
#define XFS_BUF_BFLAGS(x) ((x)->pb_flags)
#define XFS_BUF_ZEROFLAGS(x) \
((x)->pb_flags &= ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_DELWRI))
#define XFS_BUF_STALE(x) ((x)->pb_flags |= XFS_B_STALE)
#define XFS_BUF_UNSTALE(x) ((x)->pb_flags &= ~XFS_B_STALE)
#define XFS_BUF_ISSTALE(x) ((x)->pb_flags & XFS_B_STALE)
#define XFS_BUF_SUPER_STALE(x) do { \
XFS_BUF_STALE(x); \
pagebuf_delwri_dequeue(x); \
XFS_BUF_DONE(x); \
} while (0)
#define XFS_BUF_MANAGE PBF_FS_MANAGED #define BUF_BUSY XBF_DONT_BLOCK
#define XFS_BUF_UNMANAGE(x) ((x)->pb_flags &= ~PBF_FS_MANAGED)
#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags)
#define XFS_BUF_DELAYWRITE(x) ((x)->pb_flags |= PBF_DELWRI) #define XFS_BUF_ZEROFLAGS(bp) \
#define XFS_BUF_UNDELAYWRITE(x) pagebuf_delwri_dequeue(x) ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI))
#define XFS_BUF_ISDELAYWRITE(x) ((x)->pb_flags & PBF_DELWRI)
#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE)
#define XFS_BUF_ERROR(x,no) pagebuf_ioerror(x,no) #define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE)
#define XFS_BUF_GETERROR(x) pagebuf_geterror(x) #define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XFS_B_STALE)
#define XFS_BUF_ISERROR(x) (pagebuf_geterror(x)?1:0) #define XFS_BUF_SUPER_STALE(bp) do { \
XFS_BUF_STALE(bp); \
#define XFS_BUF_DONE(x) ((x)->pb_flags |= PBF_DONE) xfs_buf_delwri_dequeue(bp); \
#define XFS_BUF_UNDONE(x) ((x)->pb_flags &= ~PBF_DONE) XFS_BUF_DONE(bp); \
#define XFS_BUF_ISDONE(x) ((x)->pb_flags & PBF_DONE) } while (0)
#define XFS_BUF_BUSY(x) do { } while (0)
#define XFS_BUF_UNBUSY(x) do { } while (0)
#define XFS_BUF_ISBUSY(x) (1)
#define XFS_BUF_ASYNC(x) ((x)->pb_flags |= PBF_ASYNC)
#define XFS_BUF_UNASYNC(x) ((x)->pb_flags &= ~PBF_ASYNC)
#define XFS_BUF_ISASYNC(x) ((x)->pb_flags & PBF_ASYNC)
#define XFS_BUF_ORDERED(x) ((x)->pb_flags |= PBF_ORDERED)
#define XFS_BUF_UNORDERED(x) ((x)->pb_flags &= ~PBF_ORDERED)
#define XFS_BUF_ISORDERED(x) ((x)->pb_flags & PBF_ORDERED)
#define XFS_BUF_SHUT(x) printk("XFS_BUF_SHUT not implemented yet\n")
#define XFS_BUF_UNSHUT(x) printk("XFS_BUF_UNSHUT not implemented yet\n")
#define XFS_BUF_ISSHUT(x) (0)
#define XFS_BUF_HOLD(x) pagebuf_hold(x)
#define XFS_BUF_READ(x) ((x)->pb_flags |= PBF_READ)
#define XFS_BUF_UNREAD(x) ((x)->pb_flags &= ~PBF_READ)
#define XFS_BUF_ISREAD(x) ((x)->pb_flags & PBF_READ)
#define XFS_BUF_WRITE(x) ((x)->pb_flags |= PBF_WRITE)
#define XFS_BUF_UNWRITE(x) ((x)->pb_flags &= ~PBF_WRITE)
#define XFS_BUF_ISWRITE(x) ((x)->pb_flags & PBF_WRITE)
#define XFS_BUF_ISUNINITIAL(x) (0)
#define XFS_BUF_UNUNINITIAL(x) (0)
#define XFS_BUF_BP_ISMAPPED(bp) 1
#define XFS_BUF_IODONE_FUNC(buf) (buf)->pb_iodone
#define XFS_BUF_SET_IODONE_FUNC(buf, func) \
(buf)->pb_iodone = (func)
#define XFS_BUF_CLR_IODONE_FUNC(buf) \
(buf)->pb_iodone = NULL
#define XFS_BUF_SET_BDSTRAT_FUNC(buf, func) \
(buf)->pb_strat = (func)
#define XFS_BUF_CLR_BDSTRAT_FUNC(buf) \
(buf)->pb_strat = NULL
#define XFS_BUF_FSPRIVATE(buf, type) \
((type)(buf)->pb_fspriv)
#define XFS_BUF_SET_FSPRIVATE(buf, value) \
(buf)->pb_fspriv = (void *)(value)
#define XFS_BUF_FSPRIVATE2(buf, type) \
((type)(buf)->pb_fspriv2)
#define XFS_BUF_SET_FSPRIVATE2(buf, value) \
(buf)->pb_fspriv2 = (void *)(value)
#define XFS_BUF_FSPRIVATE3(buf, type) \
((type)(buf)->pb_fspriv3)
#define XFS_BUF_SET_FSPRIVATE3(buf, value) \
(buf)->pb_fspriv3 = (void *)(value)
#define XFS_BUF_SET_START(buf)
#define XFS_BUF_SET_BRELSE_FUNC(buf, value) \
(buf)->pb_relse = (value)
#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->pb_addr)
static inline xfs_caddr_t xfs_buf_offset(xfs_buf_t *bp, size_t offset)
{
if (bp->pb_flags & PBF_MAPPED)
return XFS_BUF_PTR(bp) + offset;
return (xfs_caddr_t) pagebuf_offset(bp, offset);
}
#define XFS_BUF_SET_PTR(bp, val, count) \ #define XFS_BUF_MANAGE XBF_FS_MANAGED
pagebuf_associate_memory(bp, val, count) #define XFS_BUF_UNMANAGE(bp) ((bp)->b_flags &= ~XBF_FS_MANAGED)
#define XFS_BUF_ADDR(bp) ((bp)->pb_bn)
#define XFS_BUF_SET_ADDR(bp, blk) \ #define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI)
((bp)->pb_bn = (xfs_daddr_t)(blk)) #define XFS_BUF_UNDELAYWRITE(bp) xfs_buf_delwri_dequeue(bp)
#define XFS_BUF_OFFSET(bp) ((bp)->pb_file_offset) #define XFS_BUF_ISDELAYWRITE(bp) ((bp)->b_flags & XBF_DELWRI)
#define XFS_BUF_SET_OFFSET(bp, off) \
((bp)->pb_file_offset = (off)) #define XFS_BUF_ERROR(bp,no) xfs_buf_ioerror(bp,no)
#define XFS_BUF_COUNT(bp) ((bp)->pb_count_desired) #define XFS_BUF_GETERROR(bp) xfs_buf_geterror(bp)
#define XFS_BUF_SET_COUNT(bp, cnt) \ #define XFS_BUF_ISERROR(bp) (xfs_buf_geterror(bp) ? 1 : 0)
((bp)->pb_count_desired = (cnt))
#define XFS_BUF_SIZE(bp) ((bp)->pb_buffer_length) #define XFS_BUF_DONE(bp) ((bp)->b_flags |= XBF_DONE)
#define XFS_BUF_SET_SIZE(bp, cnt) \ #define XFS_BUF_UNDONE(bp) ((bp)->b_flags &= ~XBF_DONE)
((bp)->pb_buffer_length = (cnt)) #define XFS_BUF_ISDONE(bp) ((bp)->b_flags & XBF_DONE)
#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)
#define XFS_BUF_SET_VTYPE(bp, type) #define XFS_BUF_BUSY(bp) do { } while (0)
#define XFS_BUF_SET_REF(bp, ref) #define XFS_BUF_UNBUSY(bp) do { } while (0)
#define XFS_BUF_ISBUSY(bp) (1)
#define XFS_BUF_ISPINNED(bp) pagebuf_ispin(bp)
#define XFS_BUF_ASYNC(bp) ((bp)->b_flags |= XBF_ASYNC)
#define XFS_BUF_VALUSEMA(bp) pagebuf_lock_value(bp) #define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC)
#define XFS_BUF_CPSEMA(bp) (pagebuf_cond_lock(bp) == 0) #define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC)
#define XFS_BUF_VSEMA(bp) pagebuf_unlock(bp)
#define XFS_BUF_PSEMA(bp,x) pagebuf_lock(bp) #define XFS_BUF_ORDERED(bp) ((bp)->b_flags |= XBF_ORDERED)
#define XFS_BUF_V_IODONESEMA(bp) up(&bp->pb_iodonesema); #define XFS_BUF_UNORDERED(bp) ((bp)->b_flags &= ~XBF_ORDERED)
#define XFS_BUF_ISORDERED(bp) ((bp)->b_flags & XBF_ORDERED)
/* setup the buffer target from a buftarg structure */
#define XFS_BUF_SET_TARGET(bp, target) \ #define XFS_BUF_SHUT(bp) do { } while (0)
(bp)->pb_target = (target) #define XFS_BUF_UNSHUT(bp) do { } while (0)
#define XFS_BUF_TARGET(bp) ((bp)->pb_target) #define XFS_BUF_ISSHUT(bp) (0)
#define XFS_BUFTARG_NAME(target) \
pagebuf_target_name(target) #define XFS_BUF_HOLD(bp) xfs_buf_hold(bp)
#define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ)
#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) #define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ)
#define XFS_BUF_SET_VTYPE(bp, type) #define XFS_BUF_ISREAD(bp) ((bp)->b_flags & XBF_READ)
#define XFS_BUF_SET_REF(bp, ref)
#define XFS_BUF_WRITE(bp) ((bp)->b_flags |= XBF_WRITE)
#define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE)
#define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE)
#define XFS_BUF_ISUNINITIAL(bp) (0)
#define XFS_BUF_UNUNINITIAL(bp) (0)
#define XFS_BUF_BP_ISMAPPED(bp) (1)
#define XFS_BUF_IODONE_FUNC(bp) ((bp)->b_iodone)
#define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func))
#define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL)
#define XFS_BUF_SET_BDSTRAT_FUNC(bp, func) ((bp)->b_strat = (func))
#define XFS_BUF_CLR_BDSTRAT_FUNC(bp) ((bp)->b_strat = NULL)
#define XFS_BUF_FSPRIVATE(bp, type) ((type)(bp)->b_fspriv)
#define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val))
#define XFS_BUF_FSPRIVATE2(bp, type) ((type)(bp)->b_fspriv2)
#define XFS_BUF_SET_FSPRIVATE2(bp, val) ((bp)->b_fspriv2 = (void*)(val))
#define XFS_BUF_FSPRIVATE3(bp, type) ((type)(bp)->b_fspriv3)
#define XFS_BUF_SET_FSPRIVATE3(bp, val) ((bp)->b_fspriv3 = (void*)(val))
#define XFS_BUF_SET_START(bp) do { } while (0)
#define XFS_BUF_SET_BRELSE_FUNC(bp, func) ((bp)->b_relse = (func))
#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->b_addr)
#define XFS_BUF_SET_PTR(bp, val, cnt) xfs_buf_associate_memory(bp, val, cnt)
#define XFS_BUF_ADDR(bp) ((bp)->b_bn)
#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno))
#define XFS_BUF_OFFSET(bp) ((bp)->b_file_offset)
#define XFS_BUF_SET_OFFSET(bp, off) ((bp)->b_file_offset = (off))
#define XFS_BUF_COUNT(bp) ((bp)->b_count_desired)
#define XFS_BUF_SET_COUNT(bp, cnt) ((bp)->b_count_desired = (cnt))
#define XFS_BUF_SIZE(bp) ((bp)->b_buffer_length)
#define XFS_BUF_SET_SIZE(bp, cnt) ((bp)->b_buffer_length = (cnt))
#define XFS_BUF_SET_VTYPE_REF(bp, type, ref) do { } while (0)
#define XFS_BUF_SET_VTYPE(bp, type) do { } while (0)
#define XFS_BUF_SET_REF(bp, ref) do { } while (0)
#define XFS_BUF_ISPINNED(bp) xfs_buf_ispin(bp)
#define XFS_BUF_VALUSEMA(bp) xfs_buf_lock_value(bp)
#define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0)
#define XFS_BUF_VSEMA(bp) xfs_buf_unlock(bp)
#define XFS_BUF_PSEMA(bp,x) xfs_buf_lock(bp)
#define XFS_BUF_V_IODONESEMA(bp) up(&bp->b_iodonesema);
#define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target))
#define XFS_BUF_TARGET(bp) ((bp)->b_target)
#define XFS_BUFTARG_NAME(target) xfs_buf_target_name(target)
static inline int xfs_bawrite(void *mp, xfs_buf_t *bp) static inline int xfs_bawrite(void *mp, xfs_buf_t *bp)
{ {
bp->pb_fspriv3 = mp; bp->b_fspriv3 = mp;
bp->pb_strat = xfs_bdstrat_cb; bp->b_strat = xfs_bdstrat_cb;
pagebuf_delwri_dequeue(bp); xfs_buf_delwri_dequeue(bp);
return pagebuf_iostart(bp, PBF_WRITE | PBF_ASYNC | _PBF_RUN_QUEUES); return xfs_buf_iostart(bp, XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES);
} }
static inline void xfs_buf_relse(xfs_buf_t *bp) static inline void xfs_buf_relse(xfs_buf_t *bp)
{ {
if (!bp->pb_relse) if (!bp->b_relse)
pagebuf_unlock(bp); xfs_buf_unlock(bp);
pagebuf_rele(bp); xfs_buf_rele(bp);
} }
#define xfs_bpin(bp) pagebuf_pin(bp) #define xfs_bpin(bp) xfs_buf_pin(bp)
#define xfs_bunpin(bp) pagebuf_unpin(bp) #define xfs_bunpin(bp) xfs_buf_unpin(bp)
#define xfs_buftrace(id, bp) \ #define xfs_buftrace(id, bp) \
pagebuf_trace(bp, id, NULL, (void *)__builtin_return_address(0)) xfs_buf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
#define xfs_biodone(pb) \ #define xfs_biodone(bp) xfs_buf_ioend(bp, 0)
pagebuf_iodone(pb, 0)
#define xfs_biomove(pb, off, len, data, rw) \ #define xfs_biomove(bp, off, len, data, rw) \
pagebuf_iomove((pb), (off), (len), (data), \ xfs_buf_iomove((bp), (off), (len), (data), \
((rw) == XFS_B_WRITE) ? PBRW_WRITE : PBRW_READ) ((rw) == XFS_B_WRITE) ? XBRW_WRITE : XBRW_READ)
#define xfs_biozero(pb, off, len) \ #define xfs_biozero(bp, off, len) \
pagebuf_iomove((pb), (off), (len), NULL, PBRW_ZERO) xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
static inline int XFS_bwrite(xfs_buf_t *pb) static inline int XFS_bwrite(xfs_buf_t *bp)
{ {
int iowait = (pb->pb_flags & PBF_ASYNC) == 0; int iowait = (bp->b_flags & XBF_ASYNC) == 0;
int error = 0; int error = 0;
if (!iowait) if (!iowait)
pb->pb_flags |= _PBF_RUN_QUEUES; bp->b_flags |= _XBF_RUN_QUEUES;
pagebuf_delwri_dequeue(pb); xfs_buf_delwri_dequeue(bp);
pagebuf_iostrategy(pb); xfs_buf_iostrategy(bp);
if (iowait) { if (iowait) {
error = pagebuf_iowait(pb); error = xfs_buf_iowait(bp);
xfs_buf_relse(pb); xfs_buf_relse(bp);
} }
return error; return error;
} }
#define XFS_bdwrite(pb) \ #define XFS_bdwrite(bp) xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC)
pagebuf_iostart(pb, PBF_DELWRI | PBF_ASYNC)
static inline int xfs_bdwrite(void *mp, xfs_buf_t *bp) static inline int xfs_bdwrite(void *mp, xfs_buf_t *bp)
{ {
bp->pb_strat = xfs_bdstrat_cb; bp->b_strat = xfs_bdstrat_cb;
bp->pb_fspriv3 = mp; bp->b_fspriv3 = mp;
return xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC);
return pagebuf_iostart(bp, PBF_DELWRI | PBF_ASYNC);
} }
#define XFS_bdstrat(bp) pagebuf_iorequest(bp) #define XFS_bdstrat(bp) xfs_buf_iorequest(bp)
#define xfs_iowait(pb) pagebuf_iowait(pb) #define xfs_iowait(bp) xfs_buf_iowait(bp)
#define xfs_baread(target, rablkno, ralen) \ #define xfs_baread(target, rablkno, ralen) \
pagebuf_readahead((target), (rablkno), (ralen), PBF_DONT_BLOCK) xfs_buf_readahead((target), (rablkno), (ralen), XBF_DONT_BLOCK)
#define xfs_buf_get_empty(len, target) pagebuf_get_empty((len), (target))
#define xfs_buf_get_noaddr(len, target) pagebuf_get_no_daddr((len), (target))
#define xfs_buf_free(bp) pagebuf_free(bp)
/* /*
* Handling of buftargs. * Handling of buftargs.
*/ */
extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int); extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int);
extern void xfs_free_buftarg(xfs_buftarg_t *, int); extern void xfs_free_buftarg(xfs_buftarg_t *, int);
extern void xfs_wait_buftarg(xfs_buftarg_t *); extern void xfs_wait_buftarg(xfs_buftarg_t *);
extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int); extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
extern int xfs_flush_buftarg(xfs_buftarg_t *, int); extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
#define xfs_getsize_buftarg(buftarg) \ #define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
block_size((buftarg)->pbr_bdev) #define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
#define xfs_readonly_buftarg(buftarg) \
bdev_read_only((buftarg)->pbr_bdev) #define xfs_binval(buftarg) xfs_flush_buftarg(buftarg, 1)
#define xfs_binval(buftarg) \ #define XFS_bflush(buftarg) xfs_flush_buftarg(buftarg, 1)
xfs_flush_buftarg(buftarg, 1)
#define XFS_bflush(buftarg) \
xfs_flush_buftarg(buftarg, 1)
#endif /* __XFS_BUF_H__ */ #endif /* __XFS_BUF_H__ */
...@@ -750,7 +750,7 @@ xfs_ioctl( ...@@ -750,7 +750,7 @@ xfs_ioctl(
(ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
mp->m_rtdev_targp : mp->m_ddev_targp; mp->m_rtdev_targp : mp->m_ddev_targp;
da.d_mem = da.d_miniosz = 1 << target->pbr_sshift; da.d_mem = da.d_miniosz = 1 << target->bt_sshift;
da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1); da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
if (copy_to_user(arg, &da, sizeof(da))) if (copy_to_user(arg, &da, sizeof(da)))
......
...@@ -232,7 +232,7 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh) ...@@ -232,7 +232,7 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
#define xfs_itruncate_data(ip, off) \ #define xfs_itruncate_data(ip, off) \
(-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off))) (-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off)))
#define xfs_statvfs_fsid(statp, mp) \ #define xfs_statvfs_fsid(statp, mp) \
({ u64 id = huge_encode_dev((mp)->m_dev); \ ({ u64 id = huge_encode_dev((mp)->m_ddev_targp->bt_dev); \
__kernel_fsid_t *fsid = &(statp)->f_fsid; \ __kernel_fsid_t *fsid = &(statp)->f_fsid; \
(fsid->val[0] = (u32)id, fsid->val[1] = (u32)(id >> 32)); }) (fsid->val[0] = (u32)id, fsid->val[1] = (u32)(id >> 32)); })
......
...@@ -233,8 +233,8 @@ xfs_read( ...@@ -233,8 +233,8 @@ xfs_read(
xfs_buftarg_t *target = xfs_buftarg_t *target =
(ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
mp->m_rtdev_targp : mp->m_ddev_targp; mp->m_rtdev_targp : mp->m_ddev_targp;
if ((*offset & target->pbr_smask) || if ((*offset & target->bt_smask) ||
(size & target->pbr_smask)) { (size & target->bt_smask)) {
if (*offset == ip->i_d.di_size) { if (*offset == ip->i_d.di_size) {
return (0); return (0);
} }
...@@ -618,7 +618,7 @@ xfs_write( ...@@ -618,7 +618,7 @@ xfs_write(
(xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ? (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
mp->m_rtdev_targp : mp->m_ddev_targp; mp->m_rtdev_targp : mp->m_ddev_targp;
if ((pos & target->pbr_smask) || (count & target->pbr_smask)) if ((pos & target->bt_smask) || (count & target->bt_smask))
return XFS_ERROR(-EINVAL); return XFS_ERROR(-EINVAL);
if (!VN_CACHED(vp) && pos < i_size_read(inode)) if (!VN_CACHED(vp) && pos < i_size_read(inode))
...@@ -938,7 +938,7 @@ xfs_bdstrat_cb(struct xfs_buf *bp) ...@@ -938,7 +938,7 @@ xfs_bdstrat_cb(struct xfs_buf *bp)
mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *); mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
if (!XFS_FORCED_SHUTDOWN(mp)) { if (!XFS_FORCED_SHUTDOWN(mp)) {
pagebuf_iorequest(bp); xfs_buf_iorequest(bp);
return 0; return 0;
} else { } else {
xfs_buftrace("XFS__BDSTRAT IOERROR", bp); xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
...@@ -991,7 +991,7 @@ xfsbdstrat( ...@@ -991,7 +991,7 @@ xfsbdstrat(
* if (XFS_BUF_IS_GRIO(bp)) { * if (XFS_BUF_IS_GRIO(bp)) {
*/ */
pagebuf_iorequest(bp); xfs_buf_iorequest(bp);
return 0; return 0;
} }
......
...@@ -109,15 +109,15 @@ struct xfsstats { ...@@ -109,15 +109,15 @@ struct xfsstats {
__uint32_t vn_remove; /* # times vn_remove called */ __uint32_t vn_remove; /* # times vn_remove called */
__uint32_t vn_free; /* # times vn_free called */ __uint32_t vn_free; /* # times vn_free called */
#define XFSSTAT_END_BUF (XFSSTAT_END_VNODE_OPS+9) #define XFSSTAT_END_BUF (XFSSTAT_END_VNODE_OPS+9)
__uint32_t pb_get; __uint32_t xb_get;
__uint32_t pb_create; __uint32_t xb_create;
__uint32_t pb_get_locked; __uint32_t xb_get_locked;
__uint32_t pb_get_locked_waited; __uint32_t xb_get_locked_waited;
__uint32_t pb_busy_locked; __uint32_t xb_busy_locked;
__uint32_t pb_miss_locked; __uint32_t xb_miss_locked;
__uint32_t pb_page_retries; __uint32_t xb_page_retries;
__uint32_t pb_page_found; __uint32_t xb_page_found;
__uint32_t pb_get_read; __uint32_t xb_get_read;
/* Extra precision counters */ /* Extra precision counters */
__uint64_t xs_xstrat_bytes; __uint64_t xs_xstrat_bytes;
__uint64_t xs_write_bytes; __uint64_t xs_write_bytes;
......
...@@ -309,7 +309,7 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp) ...@@ -309,7 +309,7 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp)
return; return;
} }
if (mp->m_ddev_targp->pbr_bdev->bd_disk->queue->ordered == if (mp->m_ddev_targp->bt_bdev->bd_disk->queue->ordered ==
QUEUE_ORDERED_NONE) { QUEUE_ORDERED_NONE) {
xfs_fs_cmn_err(CE_NOTE, mp, xfs_fs_cmn_err(CE_NOTE, mp,
"Disabling barriers, not supported by the underlying device"); "Disabling barriers, not supported by the underlying device");
...@@ -330,7 +330,7 @@ void ...@@ -330,7 +330,7 @@ void
xfs_blkdev_issue_flush( xfs_blkdev_issue_flush(
xfs_buftarg_t *buftarg) xfs_buftarg_t *buftarg)
{ {
blkdev_issue_flush(buftarg->pbr_bdev, NULL); blkdev_issue_flush(buftarg->bt_bdev, NULL);
} }
STATIC struct inode * STATIC struct inode *
...@@ -969,9 +969,9 @@ init_xfs_fs( void ) ...@@ -969,9 +969,9 @@ init_xfs_fs( void )
if (error < 0) if (error < 0)
goto undo_zones; goto undo_zones;
error = pagebuf_init(); error = xfs_buf_init();
if (error < 0) if (error < 0)
goto undo_pagebuf; goto undo_buffers;
vn_init(); vn_init();
xfs_init(); xfs_init();
...@@ -985,9 +985,9 @@ init_xfs_fs( void ) ...@@ -985,9 +985,9 @@ init_xfs_fs( void )
return 0; return 0;
undo_register: undo_register:
pagebuf_terminate(); xfs_buf_terminate();
undo_pagebuf: undo_buffers:
linvfs_destroy_zones(); linvfs_destroy_zones();
undo_zones: undo_zones:
...@@ -1001,7 +1001,7 @@ exit_xfs_fs( void ) ...@@ -1001,7 +1001,7 @@ exit_xfs_fs( void )
XFS_DM_EXIT(&xfs_fs_type); XFS_DM_EXIT(&xfs_fs_type);
unregister_filesystem(&xfs_fs_type); unregister_filesystem(&xfs_fs_type);
xfs_cleanup(); xfs_cleanup();
pagebuf_terminate(); xfs_buf_terminate();
linvfs_destroy_zones(); linvfs_destroy_zones();
ktrace_uninit(); ktrace_uninit();
} }
......
...@@ -308,7 +308,6 @@ typedef struct xfs_mount { ...@@ -308,7 +308,6 @@ typedef struct xfs_mount {
xfs_buftarg_t *m_ddev_targp; /* saves taking the address */ xfs_buftarg_t *m_ddev_targp; /* saves taking the address */
xfs_buftarg_t *m_logdev_targp;/* ptr to log device */ xfs_buftarg_t *m_logdev_targp;/* ptr to log device */
xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */ xfs_buftarg_t *m_rtdev_targp; /* ptr to rt device */
#define m_dev m_ddev_targp->pbr_dev
__uint8_t m_dircook_elog; /* log d-cookie entry bits */ __uint8_t m_dircook_elog; /* log d-cookie entry bits */
__uint8_t m_blkbit_log; /* blocklog + NBBY */ __uint8_t m_blkbit_log; /* blocklog + NBBY */
__uint8_t m_blkbb_log; /* blocklog - BBSHIFT */ __uint8_t m_blkbb_log; /* blocklog - BBSHIFT */
......
...@@ -238,6 +238,7 @@ xfs_bioerror_relse( ...@@ -238,6 +238,7 @@ xfs_bioerror_relse(
} }
return (EIO); return (EIO);
} }
/* /*
* Prints out an ALERT message about I/O error. * Prints out an ALERT message about I/O error.
*/ */
...@@ -252,11 +253,9 @@ xfs_ioerror_alert( ...@@ -252,11 +253,9 @@ xfs_ioerror_alert(
"I/O error in filesystem (\"%s\") meta-data dev %s block 0x%llx" "I/O error in filesystem (\"%s\") meta-data dev %s block 0x%llx"
" (\"%s\") error %d buf count %zd", " (\"%s\") error %d buf count %zd",
(!mp || !mp->m_fsname) ? "(fs name not set)" : mp->m_fsname, (!mp || !mp->m_fsname) ? "(fs name not set)" : mp->m_fsname,
XFS_BUFTARG_NAME(bp->pb_target), XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)),
(__uint64_t)blkno, (__uint64_t)blkno, func,
func, XFS_BUF_GETERROR(bp), XFS_BUF_COUNT(bp));
XFS_BUF_GETERROR(bp),
XFS_BUF_COUNT(bp));
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment