Commit 37cd9600 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-v3.6-rc1' of git://oss.sgi.com/xfs/xfs

Pull xfs update from Ben Myers:
 "Numerous cleanups and several bug fixes.  Here are some highlights:

   - Discontiguous directory buffer support
   - Inode allocator refactoring
   - Removal of the IO lock in inode reclaim
   - Implementation of .update_time
   - Fix for handling of EOF in xfs_vm_writepage
   - Fix for races in xfsaild, and idle mode is re-enabled
   - Fix for a crash in xfs_buf completion handlers on unmount."

Fix up trivial conflicts in fs/xfs/{xfs_buf.c,xfs_log.c,xfs_log_priv.h}
due to duplicate patches that had already been merged for 3.5.

* tag 'for-linus-v3.6-rc1' of git://oss.sgi.com/xfs/xfs: (44 commits)
  xfs: wait for the write the superblock on unmount
  xfs: re-enable xfsaild idle mode and fix associated races
  xfs: remove iolock lock classes
  xfs: avoid the iolock in xfs_free_eofblocks for evicted inodes
  xfs: do not take the iolock in xfs_inactive
  xfs: remove xfs_inactive_attrs
  xfs: clean up xfs_inactive
  xfs: do not read the AGI buffer in xfs_dialloc until nessecary
  xfs: refactor xfs_ialloc_ag_select
  xfs: add a short cut to xfs_dialloc for the non-NULL agbp case
  xfs: remove the alloc_done argument to xfs_dialloc
  xfs: split xfs_dialloc
  xfs: remove xfs_ialloc_find_free
  Prefix IO_XX flags with XFS_IO_XX to avoid namespace colision.
  xfs: remove xfs_inotobp
  xfs: merge xfs_itobp into xfs_imap_to_bp
  xfs: handle EOF correctly in xfs_vm_writepage
  xfs: implement ->update_time
  xfs: fix comment typo of struct xfs_da_blkinfo.
  xfs: do not call xfs_bdstrat_cb in xfs_buf_iodone_callbacks
  ...
parents 95b18e69 9a57fa8e
......@@ -50,20 +50,6 @@ typedef struct xfs_alloc_rec_incore {
/* btree pointer type */
typedef __be32 xfs_alloc_ptr_t;
/*
* Minimum and maximum blocksize and sectorsize.
* The blocksize upper limit is pretty much arbitrary.
* The sectorsize upper limit is due to sizeof(sb_sectsize).
*/
#define XFS_MIN_BLOCKSIZE_LOG 9 /* i.e. 512 bytes */
#define XFS_MAX_BLOCKSIZE_LOG 16 /* i.e. 65536 bytes */
#define XFS_MIN_BLOCKSIZE (1 << XFS_MIN_BLOCKSIZE_LOG)
#define XFS_MAX_BLOCKSIZE (1 << XFS_MAX_BLOCKSIZE_LOG)
#define XFS_MIN_SECTORSIZE_LOG 9 /* i.e. 512 bytes */
#define XFS_MAX_SECTORSIZE_LOG 15 /* i.e. 32768 bytes */
#define XFS_MIN_SECTORSIZE (1 << XFS_MIN_SECTORSIZE_LOG)
#define XFS_MAX_SECTORSIZE (1 << XFS_MAX_SECTORSIZE_LOG)
/*
* Block numbers in the AG:
* SB is sector 0, AGF is sector 1, AGI is sector 2, AGFL is sector 3.
......
......@@ -179,7 +179,7 @@ xfs_finish_ioend(
if (atomic_dec_and_test(&ioend->io_remaining)) {
struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount;
if (ioend->io_type == IO_UNWRITTEN)
if (ioend->io_type == XFS_IO_UNWRITTEN)
queue_work(mp->m_unwritten_workqueue, &ioend->io_work);
else if (ioend->io_append_trans)
queue_work(mp->m_data_workqueue, &ioend->io_work);
......@@ -210,7 +210,7 @@ xfs_end_io(
* For unwritten extents we need to issue transactions to convert a
* range to normal written extens after the data I/O has finished.
*/
if (ioend->io_type == IO_UNWRITTEN) {
if (ioend->io_type == XFS_IO_UNWRITTEN) {
/*
* For buffered I/O we never preallocate a transaction when
* doing the unwritten extent conversion, but for direct I/O
......@@ -312,7 +312,7 @@ xfs_map_blocks(
if (XFS_FORCED_SHUTDOWN(mp))
return -XFS_ERROR(EIO);
if (type == IO_UNWRITTEN)
if (type == XFS_IO_UNWRITTEN)
bmapi_flags |= XFS_BMAPI_IGSTATE;
if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
......@@ -323,10 +323,10 @@ xfs_map_blocks(
ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
(ip->i_df.if_flags & XFS_IFEXTENTS));
ASSERT(offset <= mp->m_maxioffset);
ASSERT(offset <= mp->m_super->s_maxbytes);
if (offset + count > mp->m_maxioffset)
count = mp->m_maxioffset - offset;
if (offset + count > mp->m_super->s_maxbytes)
count = mp->m_super->s_maxbytes - offset;
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
offset_fsb = XFS_B_TO_FSBT(mp, offset);
error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
......@@ -336,7 +336,7 @@ xfs_map_blocks(
if (error)
return -XFS_ERROR(error);
if (type == IO_DELALLOC &&
if (type == XFS_IO_DELALLOC &&
(!nimaps || isnullstartblock(imap->br_startblock))) {
error = xfs_iomap_write_allocate(ip, offset, count, imap);
if (!error)
......@@ -345,7 +345,7 @@ xfs_map_blocks(
}
#ifdef DEBUG
if (type == IO_UNWRITTEN) {
if (type == XFS_IO_UNWRITTEN) {
ASSERT(nimaps);
ASSERT(imap->br_startblock != HOLESTARTBLOCK);
ASSERT(imap->br_startblock != DELAYSTARTBLOCK);
......@@ -634,11 +634,11 @@ xfs_check_page_type(
bh = head = page_buffers(page);
do {
if (buffer_unwritten(bh))
acceptable += (type == IO_UNWRITTEN);
acceptable += (type == XFS_IO_UNWRITTEN);
else if (buffer_delay(bh))
acceptable += (type == IO_DELALLOC);
acceptable += (type == XFS_IO_DELALLOC);
else if (buffer_dirty(bh) && buffer_mapped(bh))
acceptable += (type == IO_OVERWRITE);
acceptable += (type == XFS_IO_OVERWRITE);
else
break;
} while ((bh = bh->b_this_page) != head);
......@@ -721,11 +721,11 @@ xfs_convert_page(
if (buffer_unwritten(bh) || buffer_delay(bh) ||
buffer_mapped(bh)) {
if (buffer_unwritten(bh))
type = IO_UNWRITTEN;
type = XFS_IO_UNWRITTEN;
else if (buffer_delay(bh))
type = IO_DELALLOC;
type = XFS_IO_DELALLOC;
else
type = IO_OVERWRITE;
type = XFS_IO_OVERWRITE;
if (!xfs_imap_valid(inode, imap, offset)) {
done = 1;
......@@ -733,7 +733,7 @@ xfs_convert_page(
}
lock_buffer(bh);
if (type != IO_OVERWRITE)
if (type != XFS_IO_OVERWRITE)
xfs_map_at_offset(inode, bh, imap, offset);
xfs_add_to_ioend(inode, bh, offset, type,
ioendp, done);
......@@ -831,7 +831,7 @@ xfs_aops_discard_page(
struct buffer_head *bh, *head;
loff_t offset = page_offset(page);
if (!xfs_check_page_type(page, IO_DELALLOC))
if (!xfs_check_page_type(page, XFS_IO_DELALLOC))
goto out_invalidate;
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
......@@ -927,11 +927,26 @@ xfs_vm_writepage(
end_index = offset >> PAGE_CACHE_SHIFT;
last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
if (page->index >= end_index) {
if ((page->index >= end_index + 1) ||
!(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1);
/*
* Just skip the page if it is fully outside i_size, e.g. due
* to a truncate operation that is in progress.
*/
if (page->index >= end_index + 1 || offset_into_page == 0) {
unlock_page(page);
return 0;
}
/*
* The page straddles i_size. It must be zeroed out on each
* and every writepage invocation because it may be mmapped.
* "A file is mapped in multiples of the page size. For a file
* that is not a multiple of the page size, the remaining
* memory is zeroed when mapped, and writes to that region are
* not written out to the file."
*/
zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE);
}
end_offset = min_t(unsigned long long,
......@@ -941,7 +956,7 @@ xfs_vm_writepage(
bh = head = page_buffers(page);
offset = page_offset(page);
type = IO_OVERWRITE;
type = XFS_IO_OVERWRITE;
if (wbc->sync_mode == WB_SYNC_NONE)
nonblocking = 1;
......@@ -966,18 +981,18 @@ xfs_vm_writepage(
}
if (buffer_unwritten(bh)) {
if (type != IO_UNWRITTEN) {
type = IO_UNWRITTEN;
if (type != XFS_IO_UNWRITTEN) {
type = XFS_IO_UNWRITTEN;
imap_valid = 0;
}
} else if (buffer_delay(bh)) {
if (type != IO_DELALLOC) {
type = IO_DELALLOC;
if (type != XFS_IO_DELALLOC) {
type = XFS_IO_DELALLOC;
imap_valid = 0;
}
} else if (buffer_uptodate(bh)) {
if (type != IO_OVERWRITE) {
type = IO_OVERWRITE;
if (type != XFS_IO_OVERWRITE) {
type = XFS_IO_OVERWRITE;
imap_valid = 0;
}
} else {
......@@ -1013,7 +1028,7 @@ xfs_vm_writepage(
}
if (imap_valid) {
lock_buffer(bh);
if (type != IO_OVERWRITE)
if (type != XFS_IO_OVERWRITE)
xfs_map_at_offset(inode, bh, &imap, offset);
xfs_add_to_ioend(inode, bh, offset, type, &ioend,
new_ioend);
......@@ -1054,7 +1069,7 @@ xfs_vm_writepage(
* Reserve log space if we might write beyond the on-disk
* inode size.
*/
if (ioend->io_type != IO_UNWRITTEN &&
if (ioend->io_type != XFS_IO_UNWRITTEN &&
xfs_ioend_is_append(ioend)) {
err = xfs_setfilesize_trans_alloc(ioend);
if (err)
......@@ -1162,9 +1177,9 @@ __xfs_get_blocks(
lockmode = xfs_ilock_map_shared(ip);
}
ASSERT(offset <= mp->m_maxioffset);
if (offset + size > mp->m_maxioffset)
size = mp->m_maxioffset - offset;
ASSERT(offset <= mp->m_super->s_maxbytes);
if (offset + size > mp->m_super->s_maxbytes)
size = mp->m_super->s_maxbytes - offset;
end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
offset_fsb = XFS_B_TO_FSBT(mp, offset);
......@@ -1351,7 +1366,7 @@ xfs_end_io_direct_write(
ioend->io_iocb = iocb;
ioend->io_result = ret;
if (private && size > 0)
ioend->io_type = IO_UNWRITTEN;
ioend->io_type = XFS_IO_UNWRITTEN;
if (is_async) {
ioend->io_isasync = 1;
......@@ -1383,7 +1398,7 @@ xfs_vm_direct_IO(
* and converts at least on unwritten extent we will cancel
* the still clean transaction after the I/O has finished.
*/
iocb->private = ioend = xfs_alloc_ioend(inode, IO_DIRECT);
iocb->private = ioend = xfs_alloc_ioend(inode, XFS_IO_DIRECT);
if (offset + size > XFS_I(inode)->i_d.di_size) {
ret = xfs_setfilesize_trans_alloc(ioend);
if (ret)
......
......@@ -24,17 +24,17 @@ extern mempool_t *xfs_ioend_pool;
* Types of I/O for bmap clustering and I/O completion tracking.
*/
enum {
IO_DIRECT = 0, /* special case for direct I/O ioends */
IO_DELALLOC, /* mapping covers delalloc region */
IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */
IO_OVERWRITE, /* mapping covers already allocated extent */
XFS_IO_DIRECT = 0, /* special case for direct I/O ioends */
XFS_IO_DELALLOC, /* covers delalloc region */
XFS_IO_UNWRITTEN, /* covers allocated but uninitialized data */
XFS_IO_OVERWRITE, /* covers already allocated extent */
};
#define XFS_IO_TYPES \
{ 0, "" }, \
{ IO_DELALLOC, "delalloc" }, \
{ IO_UNWRITTEN, "unwritten" }, \
{ IO_OVERWRITE, "overwrite" }
{ XFS_IO_DELALLOC, "delalloc" }, \
{ XFS_IO_UNWRITTEN, "unwritten" }, \
{ XFS_IO_OVERWRITE, "overwrite" }
/*
* xfs_ioend struct manages large extent writes for XFS.
......
......@@ -893,7 +893,7 @@ STATIC int
xfs_attr_leaf_addname(xfs_da_args_t *args)
{
xfs_inode_t *dp;
xfs_dabuf_t *bp;
struct xfs_buf *bp;
int retval, error, committed, forkoff;
trace_xfs_attr_leaf_addname(args);
......@@ -915,11 +915,11 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
*/
retval = xfs_attr_leaf_lookup_int(bp, args);
if ((args->flags & ATTR_REPLACE) && (retval == ENOATTR)) {
xfs_da_brelse(args->trans, bp);
xfs_trans_brelse(args->trans, bp);
return(retval);
} else if (retval == EEXIST) {
if (args->flags & ATTR_CREATE) { /* pure create op */
xfs_da_brelse(args->trans, bp);
xfs_trans_brelse(args->trans, bp);
return(retval);
}
......@@ -937,7 +937,6 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* if required.
*/
retval = xfs_attr_leaf_add(bp, args);
xfs_da_buf_done(bp);
if (retval == ENOSPC) {
/*
* Promote the attribute list to the Btree format, then
......@@ -1065,8 +1064,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
*/
if (committed)
xfs_trans_ijoin(args->trans, dp, 0);
} else
xfs_da_buf_done(bp);
}
/*
* Commit the remove and start the next trans in series.
......@@ -1092,7 +1090,7 @@ STATIC int
xfs_attr_leaf_removename(xfs_da_args_t *args)
{
xfs_inode_t *dp;
xfs_dabuf_t *bp;
struct xfs_buf *bp;
int error, committed, forkoff;
trace_xfs_attr_leaf_removename(args);
......@@ -1111,7 +1109,7 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
ASSERT(bp != NULL);
error = xfs_attr_leaf_lookup_int(bp, args);
if (error == ENOATTR) {
xfs_da_brelse(args->trans, bp);
xfs_trans_brelse(args->trans, bp);
return(error);
}
......@@ -1141,8 +1139,7 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
*/
if (committed)
xfs_trans_ijoin(args->trans, dp, 0);
} else
xfs_da_buf_done(bp);
}
return(0);
}
......@@ -1155,7 +1152,7 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
STATIC int
xfs_attr_leaf_get(xfs_da_args_t *args)
{
xfs_dabuf_t *bp;
struct xfs_buf *bp;
int error;
args->blkno = 0;
......@@ -1167,11 +1164,11 @@ xfs_attr_leaf_get(xfs_da_args_t *args)
error = xfs_attr_leaf_lookup_int(bp, args);
if (error != EEXIST) {
xfs_da_brelse(args->trans, bp);
xfs_trans_brelse(args->trans, bp);
return(error);
}
error = xfs_attr_leaf_getvalue(bp, args);
xfs_da_brelse(args->trans, bp);
xfs_trans_brelse(args->trans, bp);
if (!error && (args->rmtblkno > 0) && !(args->flags & ATTR_KERNOVAL)) {
error = xfs_attr_rmtval_get(args);
}
......@@ -1186,23 +1183,23 @@ xfs_attr_leaf_list(xfs_attr_list_context_t *context)
{
xfs_attr_leafblock_t *leaf;
int error;
xfs_dabuf_t *bp;
struct xfs_buf *bp;
context->cursor->blkno = 0;
error = xfs_da_read_buf(NULL, context->dp, 0, -1, &bp, XFS_ATTR_FORK);
if (error)
return XFS_ERROR(error);
ASSERT(bp != NULL);
leaf = bp->data;
leaf = bp->b_addr;
if (unlikely(leaf->hdr.info.magic != cpu_to_be16(XFS_ATTR_LEAF_MAGIC))) {
XFS_CORRUPTION_ERROR("xfs_attr_leaf_list", XFS_ERRLEVEL_LOW,
context->dp->i_mount, leaf);
xfs_da_brelse(NULL, bp);
xfs_trans_brelse(NULL, bp);
return XFS_ERROR(EFSCORRUPTED);
}
error = xfs_attr_leaf_list_int(bp, context);
xfs_da_brelse(NULL, bp);
xfs_trans_brelse(NULL, bp);
return XFS_ERROR(error);
}
......@@ -1489,7 +1486,7 @@ xfs_attr_node_removename(xfs_da_args_t *args)
xfs_da_state_t *state;
xfs_da_state_blk_t *blk;
xfs_inode_t *dp;
xfs_dabuf_t *bp;
struct xfs_buf *bp;
int retval, error, committed, forkoff;
trace_xfs_attr_node_removename(args);
......@@ -1601,14 +1598,13 @@ xfs_attr_node_removename(xfs_da_args_t *args)
*/
ASSERT(state->path.active == 1);
ASSERT(state->path.blk[0].bp);
xfs_da_buf_done(state->path.blk[0].bp);
state->path.blk[0].bp = NULL;
error = xfs_da_read_buf(args->trans, args->dp, 0, -1, &bp,
XFS_ATTR_FORK);
if (error)
goto out;
ASSERT((((xfs_attr_leafblock_t *)bp->data)->hdr.info.magic) ==
ASSERT((((xfs_attr_leafblock_t *)bp->b_addr)->hdr.info.magic) ==
cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
......@@ -1635,7 +1631,7 @@ xfs_attr_node_removename(xfs_da_args_t *args)
if (committed)
xfs_trans_ijoin(args->trans, dp, 0);
} else
xfs_da_brelse(args->trans, bp);
xfs_trans_brelse(args->trans, bp);
}
error = 0;
......@@ -1665,8 +1661,7 @@ xfs_attr_fillstate(xfs_da_state_t *state)
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->bp) {
blk->disk_blkno = xfs_da_blkno(blk->bp);
xfs_da_buf_done(blk->bp);
blk->disk_blkno = XFS_BUF_ADDR(blk->bp);
blk->bp = NULL;
} else {
blk->disk_blkno = 0;
......@@ -1681,8 +1676,7 @@ xfs_attr_fillstate(xfs_da_state_t *state)
ASSERT((path->active >= 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
for (blk = path->blk, level = 0; level < path->active; blk++, level++) {
if (blk->bp) {
blk->disk_blkno = xfs_da_blkno(blk->bp);
xfs_da_buf_done(blk->bp);
blk->disk_blkno = XFS_BUF_ADDR(blk->bp);
blk->bp = NULL;
} else {
blk->disk_blkno = 0;
......@@ -1792,7 +1786,7 @@ xfs_attr_node_get(xfs_da_args_t *args)
* If not in a transaction, we have to release all the buffers.
*/
for (i = 0; i < state->path.active; i++) {
xfs_da_brelse(args->trans, state->path.blk[i].bp);
xfs_trans_brelse(args->trans, state->path.blk[i].bp);
state->path.blk[i].bp = NULL;
}
......@@ -1808,7 +1802,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
xfs_da_intnode_t *node;
xfs_da_node_entry_t *btree;
int error, i;
xfs_dabuf_t *bp;
struct xfs_buf *bp;
cursor = context->cursor;
cursor->initted = 1;
......@@ -1825,30 +1819,30 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
if ((error != 0) && (error != EFSCORRUPTED))
return(error);
if (bp) {
node = bp->data;
node = bp->b_addr;
switch (be16_to_cpu(node->hdr.info.magic)) {
case XFS_DA_NODE_MAGIC:
trace_xfs_attr_list_wrong_blk(context);
xfs_da_brelse(NULL, bp);
xfs_trans_brelse(NULL, bp);
bp = NULL;
break;
case XFS_ATTR_LEAF_MAGIC:
leaf = bp->data;
leaf = bp->b_addr;
if (cursor->hashval > be32_to_cpu(leaf->entries[
be16_to_cpu(leaf->hdr.count)-1].hashval)) {
trace_xfs_attr_list_wrong_blk(context);
xfs_da_brelse(NULL, bp);
xfs_trans_brelse(NULL, bp);
bp = NULL;
} else if (cursor->hashval <=
be32_to_cpu(leaf->entries[0].hashval)) {
trace_xfs_attr_list_wrong_blk(context);
xfs_da_brelse(NULL, bp);
xfs_trans_brelse(NULL, bp);
bp = NULL;
}
break;
default:
trace_xfs_attr_list_wrong_blk(context);
xfs_da_brelse(NULL, bp);
xfs_trans_brelse(NULL, bp);
bp = NULL;
}
}
......@@ -1873,7 +1867,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
context->dp->i_mount);
return(XFS_ERROR(EFSCORRUPTED));
}
node = bp->data;
node = bp->b_addr;
if (node->hdr.info.magic ==
cpu_to_be16(XFS_ATTR_LEAF_MAGIC))
break;
......@@ -1883,7 +1877,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
XFS_ERRLEVEL_LOW,
context->dp->i_mount,
node);
xfs_da_brelse(NULL, bp);
xfs_trans_brelse(NULL, bp);
return(XFS_ERROR(EFSCORRUPTED));
}
btree = node->btree;
......@@ -1898,10 +1892,10 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
}
}
if (i == be16_to_cpu(node->hdr.count)) {
xfs_da_brelse(NULL, bp);
xfs_trans_brelse(NULL, bp);
return(0);
}
xfs_da_brelse(NULL, bp);
xfs_trans_brelse(NULL, bp);
}
}
ASSERT(bp != NULL);
......@@ -1912,24 +1906,24 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
* adding the information.
*/
for (;;) {
leaf = bp->data;
leaf = bp->b_addr;
if (unlikely(leaf->hdr.info.magic !=
cpu_to_be16(XFS_ATTR_LEAF_MAGIC))) {
XFS_CORRUPTION_ERROR("xfs_attr_node_list(4)",
XFS_ERRLEVEL_LOW,
context->dp->i_mount, leaf);
xfs_da_brelse(NULL, bp);
xfs_trans_brelse(NULL, bp);
return(XFS_ERROR(EFSCORRUPTED));
}
error = xfs_attr_leaf_list_int(bp, context);
if (error) {
xfs_da_brelse(NULL, bp);
xfs_trans_brelse(NULL, bp);
return error;
}
if (context->seen_enough || leaf->hdr.info.forw == 0)
break;
cursor->blkno = be32_to_cpu(leaf->hdr.info.forw);
xfs_da_brelse(NULL, bp);
xfs_trans_brelse(NULL, bp);
error = xfs_da_read_buf(NULL, context->dp, cursor->blkno, -1,
&bp, XFS_ATTR_FORK);
if (error)
......@@ -1941,7 +1935,7 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
return(XFS_ERROR(EFSCORRUPTED));
}
}
xfs_da_brelse(NULL, bp);
xfs_trans_brelse(NULL, bp);
return(0);
}
......
This diff is collapsed.
......@@ -31,7 +31,6 @@
struct attrlist;
struct attrlist_cursor_kern;
struct xfs_attr_list_context;
struct xfs_dabuf;
struct xfs_da_args;
struct xfs_da_state;
struct xfs_da_state_blk;
......@@ -215,7 +214,7 @@ int xfs_attr_shortform_getvalue(struct xfs_da_args *args);
int xfs_attr_shortform_to_leaf(struct xfs_da_args *args);
int xfs_attr_shortform_remove(struct xfs_da_args *args);
int xfs_attr_shortform_list(struct xfs_attr_list_context *context);
int xfs_attr_shortform_allfit(struct xfs_dabuf *bp, struct xfs_inode *dp);
int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
int xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes);
......@@ -223,7 +222,7 @@ int xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes);
* Internal routines when attribute fork size == XFS_LBSIZE(mp).
*/
int xfs_attr_leaf_to_node(struct xfs_da_args *args);
int xfs_attr_leaf_to_shortform(struct xfs_dabuf *bp,
int xfs_attr_leaf_to_shortform(struct xfs_buf *bp,
struct xfs_da_args *args, int forkoff);
int xfs_attr_leaf_clearflag(struct xfs_da_args *args);
int xfs_attr_leaf_setflag(struct xfs_da_args *args);
......@@ -235,14 +234,14 @@ int xfs_attr_leaf_flipflags(xfs_da_args_t *args);
int xfs_attr_leaf_split(struct xfs_da_state *state,
struct xfs_da_state_blk *oldblk,
struct xfs_da_state_blk *newblk);
int xfs_attr_leaf_lookup_int(struct xfs_dabuf *leaf,
int xfs_attr_leaf_lookup_int(struct xfs_buf *leaf,
struct xfs_da_args *args);
int xfs_attr_leaf_getvalue(struct xfs_dabuf *bp, struct xfs_da_args *args);
int xfs_attr_leaf_add(struct xfs_dabuf *leaf_buffer,
int xfs_attr_leaf_getvalue(struct xfs_buf *bp, struct xfs_da_args *args);
int xfs_attr_leaf_add(struct xfs_buf *leaf_buffer,
struct xfs_da_args *args);
int xfs_attr_leaf_remove(struct xfs_dabuf *leaf_buffer,
int xfs_attr_leaf_remove(struct xfs_buf *leaf_buffer,
struct xfs_da_args *args);
int xfs_attr_leaf_list_int(struct xfs_dabuf *bp,
int xfs_attr_leaf_list_int(struct xfs_buf *bp,
struct xfs_attr_list_context *context);
/*
......@@ -257,9 +256,9 @@ int xfs_attr_root_inactive(struct xfs_trans **trans, struct xfs_inode *dp);
/*
* Utility routines.
*/
xfs_dahash_t xfs_attr_leaf_lasthash(struct xfs_dabuf *bp, int *count);
int xfs_attr_leaf_order(struct xfs_dabuf *leaf1_bp,
struct xfs_dabuf *leaf2_bp);
xfs_dahash_t xfs_attr_leaf_lasthash(struct xfs_buf *bp, int *count);
int xfs_attr_leaf_order(struct xfs_buf *leaf1_bp,
struct xfs_buf *leaf2_bp);
int xfs_attr_leaf_newentsize(int namelen, int valuelen, int blocksize,
int *local);
#endif /* __XFS_ATTR_LEAF_H__ */
......@@ -5517,7 +5517,7 @@ xfs_getbmap(
if (xfs_get_extsz_hint(ip) ||
ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
prealloced = 1;
fixlen = XFS_MAXIOFFSET(mp);
fixlen = mp->m_super->s_maxbytes;
} else {
prealloced = 0;
fixlen = XFS_ISIZE(ip);
......
......@@ -164,14 +164,49 @@ xfs_buf_stale(
ASSERT(atomic_read(&bp->b_hold) >= 1);
}
static int
xfs_buf_get_maps(
struct xfs_buf *bp,
int map_count)
{
ASSERT(bp->b_maps == NULL);
bp->b_map_count = map_count;
if (map_count == 1) {
bp->b_maps = &bp->b_map;
return 0;
}
bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
KM_NOFS);
if (!bp->b_maps)
return ENOMEM;
return 0;
}
/*
* Frees b_pages if it was allocated.
*/
static void
xfs_buf_free_maps(
struct xfs_buf *bp)
{
if (bp->b_maps != &bp->b_map) {
kmem_free(bp->b_maps);
bp->b_maps = NULL;
}
}
struct xfs_buf *
xfs_buf_alloc(
_xfs_buf_alloc(
struct xfs_buftarg *target,
xfs_daddr_t blkno,
size_t numblks,
struct xfs_buf_map *map,
int nmaps,
xfs_buf_flags_t flags)
{
struct xfs_buf *bp;
int error;
int i;
bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
if (unlikely(!bp))
......@@ -192,16 +227,28 @@ xfs_buf_alloc(
sema_init(&bp->b_sema, 0); /* held, no waiters */
XB_SET_OWNER(bp);
bp->b_target = target;
bp->b_flags = flags;
/*
* Set length and io_length to the same value initially.
* I/O routines should use io_length, which will be the same in
* most cases but may be reset (e.g. XFS recovery).
*/
bp->b_length = numblks;
bp->b_io_length = numblks;
bp->b_flags = flags;
bp->b_bn = blkno;
error = xfs_buf_get_maps(bp, nmaps);
if (error) {
kmem_zone_free(xfs_buf_zone, bp);
return NULL;
}
bp->b_bn = map[0].bm_bn;
bp->b_length = 0;
for (i = 0; i < nmaps; i++) {
bp->b_maps[i].bm_bn = map[i].bm_bn;
bp->b_maps[i].bm_len = map[i].bm_len;
bp->b_length += map[i].bm_len;
}
bp->b_io_length = bp->b_length;
atomic_set(&bp->b_pin_count, 0);
init_waitqueue_head(&bp->b_waiters);
......@@ -280,6 +327,7 @@ xfs_buf_free(
} else if (bp->b_flags & _XBF_KMEM)
kmem_free(bp->b_addr);
_xfs_buf_free_pages(bp);
xfs_buf_free_maps(bp);
kmem_zone_free(xfs_buf_zone, bp);
}
......@@ -327,8 +375,9 @@ xfs_buf_allocate_memory(
}
use_alloc_page:
start = BBTOB(bp->b_bn) >> PAGE_SHIFT;
end = (BBTOB(bp->b_bn + bp->b_length) + PAGE_SIZE - 1) >> PAGE_SHIFT;
start = BBTOB(bp->b_map.bm_bn) >> PAGE_SHIFT;
end = (BBTOB(bp->b_map.bm_bn + bp->b_length) + PAGE_SIZE - 1)
>> PAGE_SHIFT;
page_count = end - start;
error = _xfs_buf_get_pages(bp, page_count, flags);
if (unlikely(error))
......@@ -425,8 +474,8 @@ _xfs_buf_map_pages(
xfs_buf_t *
_xfs_buf_find(
struct xfs_buftarg *btp,
xfs_daddr_t blkno,
size_t numblks,
struct xfs_buf_map *map,
int nmaps,
xfs_buf_flags_t flags,
xfs_buf_t *new_bp)
{
......@@ -435,7 +484,12 @@ _xfs_buf_find(
struct rb_node **rbp;
struct rb_node *parent;
xfs_buf_t *bp;
xfs_daddr_t blkno = map[0].bm_bn;
int numblks = 0;
int i;
for (i = 0; i < nmaps; i++)
numblks += map[i].bm_len;
numbytes = BBTOB(numblks);
/* Check for IOs smaller than the sector size / not sector aligned */
......@@ -527,31 +581,31 @@ _xfs_buf_find(
* more hits than misses.
*/
struct xfs_buf *
xfs_buf_get(
xfs_buftarg_t *target,
xfs_daddr_t blkno,
size_t numblks,
xfs_buf_get_map(
struct xfs_buftarg *target,
struct xfs_buf_map *map,
int nmaps,
xfs_buf_flags_t flags)
{
struct xfs_buf *bp;
struct xfs_buf *new_bp;
int error = 0;
bp = _xfs_buf_find(target, blkno, numblks, flags, NULL);
bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
if (likely(bp))
goto found;
new_bp = xfs_buf_alloc(target, blkno, numblks, flags);
new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
if (unlikely(!new_bp))
return NULL;
error = xfs_buf_allocate_memory(new_bp, flags);
if (error) {
kmem_zone_free(xfs_buf_zone, new_bp);
xfs_buf_free(new_bp);
return NULL;
}
bp = _xfs_buf_find(target, blkno, numblks, flags, new_bp);
bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
if (!bp) {
xfs_buf_free(new_bp);
return NULL;
......@@ -560,8 +614,6 @@ xfs_buf_get(
if (bp != new_bp)
xfs_buf_free(new_bp);
bp->b_io_length = bp->b_length;
found:
if (!bp->b_addr) {
error = _xfs_buf_map_pages(bp, flags);
......@@ -584,7 +636,7 @@ _xfs_buf_read(
xfs_buf_flags_t flags)
{
ASSERT(!(flags & XBF_WRITE));
ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
ASSERT(bp->b_map.bm_bn != XFS_BUF_DADDR_NULL);
bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
......@@ -596,17 +648,17 @@ _xfs_buf_read(
}
xfs_buf_t *
xfs_buf_read(
xfs_buftarg_t *target,
xfs_daddr_t blkno,
size_t numblks,
xfs_buf_read_map(
struct xfs_buftarg *target,
struct xfs_buf_map *map,
int nmaps,
xfs_buf_flags_t flags)
{
xfs_buf_t *bp;
struct xfs_buf *bp;
flags |= XBF_READ;
bp = xfs_buf_get(target, blkno, numblks, flags);
bp = xfs_buf_get_map(target, map, nmaps, flags);
if (bp) {
trace_xfs_buf_read(bp, flags, _RET_IP_);
......@@ -634,15 +686,15 @@ xfs_buf_read(
* safe manner.
*/
void
xfs_buf_readahead(
xfs_buftarg_t *target,
xfs_daddr_t blkno,
size_t numblks)
xfs_buf_readahead_map(
struct xfs_buftarg *target,
struct xfs_buf_map *map,
int nmaps)
{
if (bdi_read_congested(target->bt_bdi))
return;
xfs_buf_read(target, blkno, numblks,
xfs_buf_read_map(target, map, nmaps,
XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
}
......@@ -665,8 +717,10 @@ xfs_buf_read_uncached(
return NULL;
/* set up the buffer for a read IO */
XFS_BUF_SET_ADDR(bp, daddr);
XFS_BUF_READ(bp);
ASSERT(bp->b_map_count == 1);
bp->b_bn = daddr;
bp->b_maps[0].bm_bn = daddr;
bp->b_flags |= XBF_READ;
xfsbdstrat(target->bt_mount, bp);
error = xfs_buf_iowait(bp);
......@@ -694,7 +748,11 @@ xfs_buf_set_empty(
bp->b_addr = NULL;
bp->b_length = numblks;
bp->b_io_length = numblks;
ASSERT(bp->b_map_count == 1);
bp->b_bn = XFS_BUF_DADDR_NULL;
bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
bp->b_maps[0].bm_len = bp->b_length;
}
static inline struct page *
......@@ -758,9 +816,10 @@ xfs_buf_get_uncached(
{
unsigned long page_count;
int error, i;
xfs_buf_t *bp;
struct xfs_buf *bp;
DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
bp = xfs_buf_alloc(target, XFS_BUF_DADDR_NULL, numblks, 0);
bp = _xfs_buf_alloc(target, &map, 1, 0);
if (unlikely(bp == NULL))
goto fail;
......@@ -791,6 +850,7 @@ xfs_buf_get_uncached(
__free_page(bp->b_pages[i]);
_xfs_buf_free_pages(bp);
fail_free_buf:
xfs_buf_free_maps(bp);
kmem_zone_free(xfs_buf_zone, bp);
fail:
return NULL;
......@@ -1144,36 +1204,39 @@ xfs_buf_bio_end_io(
bio_put(bio);
}
STATIC void
_xfs_buf_ioapply(
xfs_buf_t *bp)
static void
xfs_buf_ioapply_map(
struct xfs_buf *bp,
int map,
int *buf_offset,
int *count,
int rw)
{
int rw, map_i, total_nr_pages, nr_pages;
int page_index;
int total_nr_pages = bp->b_page_count;
int nr_pages;
struct bio *bio;
int offset = bp->b_offset;
int size = BBTOB(bp->b_io_length);
sector_t sector = bp->b_bn;
sector_t sector = bp->b_maps[map].bm_bn;
int size;
int offset;
total_nr_pages = bp->b_page_count;
map_i = 0;
if (bp->b_flags & XBF_WRITE) {
if (bp->b_flags & XBF_SYNCIO)
rw = WRITE_SYNC;
else
rw = WRITE;
if (bp->b_flags & XBF_FUA)
rw |= REQ_FUA;
if (bp->b_flags & XBF_FLUSH)
rw |= REQ_FLUSH;
} else if (bp->b_flags & XBF_READ_AHEAD) {
rw = READA;
} else {
rw = READ;
/* skip the pages in the buffer before the start offset */
page_index = 0;
offset = *buf_offset;
while (offset >= PAGE_SIZE) {
page_index++;
offset -= PAGE_SIZE;
}
/* we only use the buffer cache for meta-data */
rw |= REQ_META;
/*
* Limit the IO size to the length of the current vector, and update the
* remaining IO count for the next time around.
*/
size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
*count -= size;
*buf_offset += size;
next_chunk:
atomic_inc(&bp->b_io_remaining);
......@@ -1188,13 +1251,14 @@ _xfs_buf_ioapply(
bio->bi_private = bp;
for (; size && nr_pages; nr_pages--, map_i++) {
for (; size && nr_pages; nr_pages--, page_index++) {
int rbytes, nbytes = PAGE_SIZE - offset;
if (nbytes > size)
nbytes = size;
rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
offset);
if (rbytes < nbytes)
break;
......@@ -1216,6 +1280,54 @@ _xfs_buf_ioapply(
xfs_buf_ioerror(bp, EIO);
bio_put(bio);
}
}
STATIC void
_xfs_buf_ioapply(
struct xfs_buf *bp)
{
struct blk_plug plug;
int rw;
int offset;
int size;
int i;
if (bp->b_flags & XBF_WRITE) {
if (bp->b_flags & XBF_SYNCIO)
rw = WRITE_SYNC;
else
rw = WRITE;
if (bp->b_flags & XBF_FUA)
rw |= REQ_FUA;
if (bp->b_flags & XBF_FLUSH)
rw |= REQ_FLUSH;
} else if (bp->b_flags & XBF_READ_AHEAD) {
rw = READA;
} else {
rw = READ;
}
/* we only use the buffer cache for meta-data */
rw |= REQ_META;
/*
* Walk all the vectors issuing IO on them. Set up the initial offset
* into the buffer and the desired IO size before we start -
* _xfs_buf_ioapply_vec() will modify them appropriately for each
* subsequent call.
*/
offset = bp->b_offset;
size = BBTOB(bp->b_io_length);
blk_start_plug(&plug);
for (i = 0; i < bp->b_map_count; i++) {
xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
if (bp->b_error)
break;
if (size <= 0)
break; /* all done */
}
blk_finish_plug(&plug);
}
void
......@@ -1557,7 +1669,7 @@ xfs_buf_cmp(
struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
xfs_daddr_t diff;
diff = ap->b_bn - bp->b_bn;
diff = ap->b_map.bm_bn - bp->b_map.bm_bn;
if (diff < 0)
return -1;
if (diff > 0)
......
......@@ -58,6 +58,7 @@ typedef enum {
#define _XBF_PAGES (1 << 20)/* backed by refcounted pages */
#define _XBF_KMEM (1 << 21)/* backed by heap memory */
#define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */
#define _XBF_COMPOUND (1 << 23)/* compound buffer */
typedef unsigned int xfs_buf_flags_t;
......@@ -75,7 +76,8 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_UNMAPPED, "UNMAPPED" }, /* ditto */\
{ _XBF_PAGES, "PAGES" }, \
{ _XBF_KMEM, "KMEM" }, \
{ _XBF_DELWRI_Q, "DELWRI_Q" }
{ _XBF_DELWRI_Q, "DELWRI_Q" }, \
{ _XBF_COMPOUND, "COMPOUND" }
typedef struct xfs_buftarg {
dev_t bt_dev;
......@@ -98,6 +100,14 @@ typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
#define XB_PAGES 2
struct xfs_buf_map {
xfs_daddr_t bm_bn; /* block number for I/O */
int bm_len; /* size of I/O */
};
#define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
typedef struct xfs_buf {
/*
* first cacheline holds all the fields needed for an uncontended cache
......@@ -107,7 +117,7 @@ typedef struct xfs_buf {
* fast-path on locking.
*/
struct rb_node b_rbnode; /* rbtree node */
xfs_daddr_t b_bn; /* block number for I/O */
xfs_daddr_t b_bn; /* block number of buffer */
int b_length; /* size of buffer in BBs */
atomic_t b_hold; /* reference count */
atomic_t b_lru_ref; /* lru reclaim ref count */
......@@ -127,12 +137,16 @@ typedef struct xfs_buf {
struct xfs_trans *b_transp;
struct page **b_pages; /* array of page pointers */
struct page *b_page_array[XB_PAGES]; /* inline pages */
struct xfs_buf_map *b_maps; /* compound buffer map */
struct xfs_buf_map b_map; /* inline compound buffer map */
int b_map_count;
int b_io_length; /* IO size in BBs */
atomic_t b_pin_count; /* pin count */
atomic_t b_io_remaining; /* #outstanding I/O requests */
unsigned int b_page_count; /* size of page array */
unsigned int b_offset; /* page offset in first page */
unsigned short b_error; /* error code on I/O */
#ifdef XFS_BUF_LOCK_TRACKING
int b_last_holder;
#endif
......@@ -140,22 +154,78 @@ typedef struct xfs_buf {
/* Finding and Reading Buffers */
struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target, xfs_daddr_t blkno,
size_t numblks, xfs_buf_flags_t flags,
struct xfs_buf *new_bp);
#define xfs_incore(buftarg,blkno,len,lockit) \
_xfs_buf_find(buftarg, blkno ,len, lockit, NULL)
struct xfs_buf *xfs_buf_get(struct xfs_buftarg *target, xfs_daddr_t blkno,
size_t numblks, xfs_buf_flags_t flags);
struct xfs_buf *xfs_buf_read(struct xfs_buftarg *target, xfs_daddr_t blkno,
size_t numblks, xfs_buf_flags_t flags);
void xfs_buf_readahead(struct xfs_buftarg *target, xfs_daddr_t blkno,
size_t numblks);
struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target,
struct xfs_buf_map *map, int nmaps,
xfs_buf_flags_t flags, struct xfs_buf *new_bp);
static inline struct xfs_buf *
xfs_incore(
struct xfs_buftarg *target,
xfs_daddr_t blkno,
size_t numblks,
xfs_buf_flags_t flags)
{
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
return _xfs_buf_find(target, &map, 1, flags, NULL);
}
struct xfs_buf *_xfs_buf_alloc(struct xfs_buftarg *target,
struct xfs_buf_map *map, int nmaps,
xfs_buf_flags_t flags);
static inline struct xfs_buf *
xfs_buf_alloc(
struct xfs_buftarg *target,
xfs_daddr_t blkno,
size_t numblks,
xfs_buf_flags_t flags)
{
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
return _xfs_buf_alloc(target, &map, 1, flags);
}
struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target,
struct xfs_buf_map *map, int nmaps,
xfs_buf_flags_t flags);
struct xfs_buf *xfs_buf_read_map(struct xfs_buftarg *target,
struct xfs_buf_map *map, int nmaps,
xfs_buf_flags_t flags);
void xfs_buf_readahead_map(struct xfs_buftarg *target,
struct xfs_buf_map *map, int nmaps);
static inline struct xfs_buf *
xfs_buf_get(
struct xfs_buftarg *target,
xfs_daddr_t blkno,
size_t numblks,
xfs_buf_flags_t flags)
{
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
return xfs_buf_get_map(target, &map, 1, flags);
}
static inline struct xfs_buf *
xfs_buf_read(
struct xfs_buftarg *target,
xfs_daddr_t blkno,
size_t numblks,
xfs_buf_flags_t flags)
{
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
return xfs_buf_read_map(target, &map, 1, flags);
}
static inline void
xfs_buf_readahead(
struct xfs_buftarg *target,
xfs_daddr_t blkno,
size_t numblks)
{
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
return xfs_buf_readahead_map(target, &map, 1);
}
struct xfs_buf *xfs_buf_get_empty(struct xfs_buftarg *target, size_t numblks);
struct xfs_buf *xfs_buf_alloc(struct xfs_buftarg *target, xfs_daddr_t blkno,
size_t numblks, xfs_buf_flags_t flags);
void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks);
int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length);
......@@ -232,8 +302,18 @@ void xfs_buf_stale(struct xfs_buf *bp);
#define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE)
#define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE)
#define XFS_BUF_ADDR(bp) ((bp)->b_bn)
#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_bn = (xfs_daddr_t)(bno))
/*
* These macros use the IO block map rather than b_bn. b_bn is now really
* just for the buffer cache index for cached buffers. As IO does not use b_bn
* anymore, uncached buffers do not use b_bn at all and hence must modify the IO
* map directly. Uncached buffers are not allowed to be discontiguous, so this
* is safe to do.
*
* In future, uncached buffers will pass the block number directly to the io
* request function and hence these macros will go away at that point.
*/
#define XFS_BUF_ADDR(bp) ((bp)->b_map.bm_bn)
#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_map.bm_bn = (xfs_daddr_t)(bno))
static inline void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref)
{
......
This diff is collapsed.
......@@ -20,23 +20,6 @@
extern kmem_zone_t *xfs_buf_item_zone;
/*
* This is the structure used to lay out a buf log item in the
* log. The data map describes which 128 byte chunks of the buffer
* have been logged.
* For 6.2 and beyond, this is XFS_LI_BUF. We use this to log everything.
*/
typedef struct xfs_buf_log_format {
unsigned short blf_type; /* buf log item type indicator */
unsigned short blf_size; /* size of this item */
ushort blf_flags; /* misc state */
ushort blf_len; /* number of blocks in this buf */
__int64_t blf_blkno; /* starting blkno of this buf */
unsigned int blf_map_size; /* size of data bitmap in words */
unsigned int blf_data_map[1];/* variable size bitmap of */
/* regions of buffer in this item */
} xfs_buf_log_format_t;
/*
* This flag indicates that the buffer contains on disk inodes
* and requires special recovery handling.
......@@ -60,6 +43,23 @@ typedef struct xfs_buf_log_format {
#define BIT_TO_WORD_SHIFT 5
#define NBWORD (NBBY * sizeof(unsigned int))
/*
* This is the structure used to lay out a buf log item in the
* log. The data map describes which 128 byte chunks of the buffer
* have been logged.
*/
#define XFS_BLF_DATAMAP_SIZE ((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) / NBWORD)
typedef struct xfs_buf_log_format {
unsigned short blf_type; /* buf log item type indicator */
unsigned short blf_size; /* size of this item */
ushort blf_flags; /* misc state */
ushort blf_len; /* number of blocks in this buf */
__int64_t blf_blkno; /* starting blkno of this buf */
unsigned int blf_map_size; /* used size of data bitmap in words */
unsigned int blf_data_map[XFS_BLF_DATAMAP_SIZE]; /* dirty bitmap */
} xfs_buf_log_format_t;
/*
* buf log item flags
*/
......@@ -102,7 +102,9 @@ typedef struct xfs_buf_log_item {
char *bli_orig; /* original buffer copy */
char *bli_logged; /* bytes logged (bitmap) */
#endif
xfs_buf_log_format_t bli_format; /* in-log header */
int bli_format_count; /* count of headers */
struct xfs_buf_log_format *bli_formats; /* array of in-log header ptrs */
struct xfs_buf_log_format bli_format; /* embedded in-log header */
} xfs_buf_log_item_t;
void xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
......
This diff is collapsed.
......@@ -32,7 +32,7 @@ struct zone;
/*
* This structure is common to both leaf nodes and non-leaf nodes in the Btree.
*
* Is is used to manage a doubly linked list of all blocks at the same
* It is used to manage a doubly linked list of all blocks at the same
* level in the Btree, and to identify which type of block this is.
*/
#define XFS_DA_NODE_MAGIC 0xfebe /* magic number: non-leaf blocks */
......@@ -132,24 +132,6 @@ typedef struct xfs_da_args {
{ XFS_DA_OP_OKNOENT, "OKNOENT" }, \
{ XFS_DA_OP_CILOOKUP, "CILOOKUP" }
/*
* Structure to describe buffer(s) for a block.
* This is needed in the directory version 2 format case, when
* multiple non-contiguous fsblocks might be needed to cover one
* logical directory block.
* If the buffer count is 1 then the data pointer points to the
* same place as the b_addr field for the buffer, else to kmem_alloced memory.
*/
typedef struct xfs_dabuf {
int nbuf; /* number of buffer pointers present */
short dirty; /* data needs to be copied back */
short bbcount; /* how large is data in bbs */
void *data; /* pointer for buffers' data */
struct xfs_buf *bps[1]; /* actually nbuf of these */
} xfs_dabuf_t;
#define XFS_DA_BUF_SIZE(n) \
(sizeof(xfs_dabuf_t) + sizeof(struct xfs_buf *) * ((n) - 1))
/*
* Storage for holding state during Btree searches and split/join ops.
*
......@@ -158,7 +140,7 @@ typedef struct xfs_dabuf {
* which is slightly more than enough.
*/
typedef struct xfs_da_state_blk {
xfs_dabuf_t *bp; /* buffer containing block */
struct xfs_buf *bp; /* buffer containing block */
xfs_dablk_t blkno; /* filesystem blkno of buffer */
xfs_daddr_t disk_blkno; /* on-disk blkno (in BBs) of buffer */
int index; /* relevant index into block */
......@@ -211,7 +193,7 @@ struct xfs_nameops {
* Routines used for growing the Btree.
*/
int xfs_da_node_create(xfs_da_args_t *args, xfs_dablk_t blkno, int level,
xfs_dabuf_t **bpp, int whichfork);
struct xfs_buf **bpp, int whichfork);
int xfs_da_split(xfs_da_state_t *state);
/*
......@@ -241,14 +223,14 @@ int xfs_da_grow_inode_int(struct xfs_da_args *args, xfs_fileoff_t *bno,
int count);
int xfs_da_get_buf(struct xfs_trans *trans, struct xfs_inode *dp,
xfs_dablk_t bno, xfs_daddr_t mappedbno,
xfs_dabuf_t **bp, int whichfork);
struct xfs_buf **bp, int whichfork);
int xfs_da_read_buf(struct xfs_trans *trans, struct xfs_inode *dp,
xfs_dablk_t bno, xfs_daddr_t mappedbno,
xfs_dabuf_t **bpp, int whichfork);
struct xfs_buf **bpp, int whichfork);
xfs_daddr_t xfs_da_reada_buf(struct xfs_trans *trans, struct xfs_inode *dp,
xfs_dablk_t bno, int whichfork);
int xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
xfs_dabuf_t *dead_buf);
struct xfs_buf *dead_buf);
uint xfs_da_hashname(const __uint8_t *name_string, int name_length);
enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args,
......@@ -258,15 +240,7 @@ enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args,
xfs_da_state_t *xfs_da_state_alloc(void);
void xfs_da_state_free(xfs_da_state_t *state);
void xfs_da_buf_done(xfs_dabuf_t *dabuf);
void xfs_da_log_buf(struct xfs_trans *tp, xfs_dabuf_t *dabuf, uint first,
uint last);
void xfs_da_brelse(struct xfs_trans *tp, xfs_dabuf_t *dabuf);
void xfs_da_binval(struct xfs_trans *tp, xfs_dabuf_t *dabuf);
xfs_daddr_t xfs_da_blkno(xfs_dabuf_t *dabuf);
extern struct kmem_zone *xfs_da_state_zone;
extern struct kmem_zone *xfs_dabuf_zone;
extern const struct xfs_nameops xfs_default_nameops;
#endif /* __XFS_DA_BTREE_H__ */
......@@ -33,7 +33,7 @@ typedef struct xfs_timestamp {
* variable size the leftover area split into a data and an attribute fork.
* The format of the data and attribute fork depends on the format of the
* inode as indicated by di_format and di_aformat. To access the data and
* attribute use the XFS_DFORK_PTR, XFS_DFORK_DPTR, and XFS_DFORK_PTR macros
* attribute use the XFS_DFORK_DPTR, XFS_DFORK_APTR, and XFS_DFORK_PTR macros
* below.
*
* There is a very similar struct icdinode in xfs_inode which matches the
......
......@@ -592,7 +592,7 @@ int
xfs_dir2_shrink_inode(
xfs_da_args_t *args,
xfs_dir2_db_t db,
xfs_dabuf_t *bp)
struct xfs_buf *bp)
{
xfs_fileoff_t bno; /* directory file offset */
xfs_dablk_t da; /* directory file offset */
......@@ -634,7 +634,7 @@ xfs_dir2_shrink_inode(
/*
* Invalidate the buffer from the transaction.
*/
xfs_da_binval(tp, bp);
xfs_trans_binval(tp, bp);
/*
* If it's not a data block, we're done.
*/
......
This diff is collapsed.
......@@ -42,8 +42,8 @@ xfs_dir2_data_freefind(xfs_dir2_data_hdr_t *hdr, xfs_dir2_data_unused_t *dup);
*/
void
xfs_dir2_data_check(
xfs_inode_t *dp, /* incore inode pointer */
xfs_dabuf_t *bp) /* data block's buffer */
struct xfs_inode *dp, /* incore inode pointer */
struct xfs_buf *bp) /* data block's buffer */
{
xfs_dir2_dataptr_t addr; /* addr for leaf lookup */
xfs_dir2_data_free_t *bf; /* bestfree table */
......@@ -65,7 +65,7 @@ xfs_dir2_data_check(
struct xfs_name name;
mp = dp->i_mount;
hdr = bp->data;
hdr = bp->b_addr;
bf = hdr->bestfree;
p = (char *)(hdr + 1);
......@@ -389,9 +389,9 @@ int /* error */
xfs_dir2_data_init(
xfs_da_args_t *args, /* directory operation args */
xfs_dir2_db_t blkno, /* logical dir block number */
xfs_dabuf_t **bpp) /* output block buffer */
struct xfs_buf **bpp) /* output block buffer */
{
xfs_dabuf_t *bp; /* block buffer */
struct xfs_buf *bp; /* block buffer */
xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_inode_t *dp; /* incore directory inode */
xfs_dir2_data_unused_t *dup; /* unused entry pointer */
......@@ -417,7 +417,7 @@ xfs_dir2_data_init(
/*
* Initialize the header.
*/
hdr = bp->data;
hdr = bp->b_addr;
hdr->magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC);
hdr->bestfree[0].offset = cpu_to_be16(sizeof(*hdr));
for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) {
......@@ -449,16 +449,16 @@ xfs_dir2_data_init(
*/
void
xfs_dir2_data_log_entry(
xfs_trans_t *tp, /* transaction pointer */
xfs_dabuf_t *bp, /* block buffer */
struct xfs_trans *tp,
struct xfs_buf *bp,
xfs_dir2_data_entry_t *dep) /* data entry pointer */
{
xfs_dir2_data_hdr_t *hdr = bp->data;
xfs_dir2_data_hdr_t *hdr = bp->b_addr;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)hdr),
xfs_trans_log_buf(tp, bp, (uint)((char *)dep - (char *)hdr),
(uint)((char *)(xfs_dir2_data_entry_tag_p(dep) + 1) -
(char *)hdr - 1));
}
......@@ -468,15 +468,15 @@ xfs_dir2_data_log_entry(
*/
void
xfs_dir2_data_log_header(
xfs_trans_t *tp, /* transaction pointer */
xfs_dabuf_t *bp) /* block buffer */
struct xfs_trans *tp,
struct xfs_buf *bp)
{
xfs_dir2_data_hdr_t *hdr = bp->data;
xfs_dir2_data_hdr_t *hdr = bp->b_addr;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
xfs_da_log_buf(tp, bp, 0, sizeof(*hdr) - 1);
xfs_trans_log_buf(tp, bp, 0, sizeof(*hdr) - 1);
}
/*
......@@ -484,11 +484,11 @@ xfs_dir2_data_log_header(
*/
void
xfs_dir2_data_log_unused(
xfs_trans_t *tp, /* transaction pointer */
xfs_dabuf_t *bp, /* block buffer */
struct xfs_trans *tp,
struct xfs_buf *bp,
xfs_dir2_data_unused_t *dup) /* data unused pointer */
{
xfs_dir2_data_hdr_t *hdr = bp->data;
xfs_dir2_data_hdr_t *hdr = bp->b_addr;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
......@@ -496,13 +496,13 @@ xfs_dir2_data_log_unused(
/*
* Log the first part of the unused entry.
*/
xfs_da_log_buf(tp, bp, (uint)((char *)dup - (char *)hdr),
xfs_trans_log_buf(tp, bp, (uint)((char *)dup - (char *)hdr),
(uint)((char *)&dup->length + sizeof(dup->length) -
1 - (char *)hdr));
/*
* Log the end (tag) of the unused entry.
*/
xfs_da_log_buf(tp, bp,
xfs_trans_log_buf(tp, bp,
(uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)hdr),
(uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)hdr +
sizeof(xfs_dir2_data_off_t) - 1));
......@@ -514,8 +514,8 @@ xfs_dir2_data_log_unused(
*/
void
xfs_dir2_data_make_free(
xfs_trans_t *tp, /* transaction pointer */
xfs_dabuf_t *bp, /* block buffer */
struct xfs_trans *tp,
struct xfs_buf *bp,
xfs_dir2_data_aoff_t offset, /* starting byte offset */
xfs_dir2_data_aoff_t len, /* length in bytes */
int *needlogp, /* out: log header */
......@@ -531,7 +531,7 @@ xfs_dir2_data_make_free(
xfs_dir2_data_unused_t *prevdup; /* unused entry before us */
mp = tp->t_mountp;
hdr = bp->data;
hdr = bp->b_addr;
/*
* Figure out where the end of the data area is.
......@@ -696,8 +696,8 @@ xfs_dir2_data_make_free(
*/
void
xfs_dir2_data_use_free(
xfs_trans_t *tp, /* transaction pointer */
xfs_dabuf_t *bp, /* data block buffer */
struct xfs_trans *tp,
struct xfs_buf *bp,
xfs_dir2_data_unused_t *dup, /* unused entry */
xfs_dir2_data_aoff_t offset, /* starting offset to use */
xfs_dir2_data_aoff_t len, /* length to use */
......@@ -713,7 +713,7 @@ xfs_dir2_data_use_free(
xfs_dir2_data_unused_t *newdup2; /* another new unused entry */
int oldlen; /* old unused entry's length */
hdr = bp->data;
hdr = bp->b_addr;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
ASSERT(be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG);
......
This diff is collapsed.
This diff is collapsed.
......@@ -25,7 +25,7 @@ extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, int *r);
extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
xfs_dir2_db_t *dbp);
extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
struct xfs_dabuf *bp);
struct xfs_buf *bp);
extern int xfs_dir_cilookup_result(struct xfs_da_args *args,
const unsigned char *name, int len);
......@@ -37,11 +37,11 @@ extern int xfs_dir2_block_lookup(struct xfs_da_args *args);
extern int xfs_dir2_block_removename(struct xfs_da_args *args);
extern int xfs_dir2_block_replace(struct xfs_da_args *args);
extern int xfs_dir2_leaf_to_block(struct xfs_da_args *args,
struct xfs_dabuf *lbp, struct xfs_dabuf *dbp);
struct xfs_buf *lbp, struct xfs_buf *dbp);
/* xfs_dir2_data.c */
#ifdef DEBUG
extern void xfs_dir2_data_check(struct xfs_inode *dp, struct xfs_dabuf *bp);
extern void xfs_dir2_data_check(struct xfs_inode *dp, struct xfs_buf *bp);
#else
#define xfs_dir2_data_check(dp,bp)
#endif
......@@ -51,43 +51,43 @@ xfs_dir2_data_freeinsert(struct xfs_dir2_data_hdr *hdr,
extern void xfs_dir2_data_freescan(struct xfs_mount *mp,
struct xfs_dir2_data_hdr *hdr, int *loghead);
extern int xfs_dir2_data_init(struct xfs_da_args *args, xfs_dir2_db_t blkno,
struct xfs_dabuf **bpp);
extern void xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_dabuf *bp,
struct xfs_buf **bpp);
extern void xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_buf *bp,
struct xfs_dir2_data_entry *dep);
extern void xfs_dir2_data_log_header(struct xfs_trans *tp,
struct xfs_dabuf *bp);
extern void xfs_dir2_data_log_unused(struct xfs_trans *tp, struct xfs_dabuf *bp,
struct xfs_buf *bp);
extern void xfs_dir2_data_log_unused(struct xfs_trans *tp, struct xfs_buf *bp,
struct xfs_dir2_data_unused *dup);
extern void xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_dabuf *bp,
extern void xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_buf *bp,
xfs_dir2_data_aoff_t offset, xfs_dir2_data_aoff_t len,
int *needlogp, int *needscanp);
extern void xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_dabuf *bp,
extern void xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_buf *bp,
struct xfs_dir2_data_unused *dup, xfs_dir2_data_aoff_t offset,
xfs_dir2_data_aoff_t len, int *needlogp, int *needscanp);
/* xfs_dir2_leaf.c */
extern int xfs_dir2_block_to_leaf(struct xfs_da_args *args,
struct xfs_dabuf *dbp);
struct xfs_buf *dbp);
extern int xfs_dir2_leaf_addname(struct xfs_da_args *args);
extern void xfs_dir2_leaf_compact(struct xfs_da_args *args,
struct xfs_dabuf *bp);
extern void xfs_dir2_leaf_compact_x1(struct xfs_dabuf *bp, int *indexp,
struct xfs_buf *bp);
extern void xfs_dir2_leaf_compact_x1(struct xfs_buf *bp, int *indexp,
int *lowstalep, int *highstalep, int *lowlogp, int *highlogp);
extern int xfs_dir2_leaf_getdents(struct xfs_inode *dp, void *dirent,
size_t bufsize, xfs_off_t *offset, filldir_t filldir);
extern int xfs_dir2_leaf_init(struct xfs_da_args *args, xfs_dir2_db_t bno,
struct xfs_dabuf **bpp, int magic);
extern void xfs_dir2_leaf_log_ents(struct xfs_trans *tp, struct xfs_dabuf *bp,
struct xfs_buf **bpp, int magic);
extern void xfs_dir2_leaf_log_ents(struct xfs_trans *tp, struct xfs_buf *bp,
int first, int last);
extern void xfs_dir2_leaf_log_header(struct xfs_trans *tp,
struct xfs_dabuf *bp);
struct xfs_buf *bp);
extern int xfs_dir2_leaf_lookup(struct xfs_da_args *args);
extern int xfs_dir2_leaf_removename(struct xfs_da_args *args);
extern int xfs_dir2_leaf_replace(struct xfs_da_args *args);
extern int xfs_dir2_leaf_search_hash(struct xfs_da_args *args,
struct xfs_dabuf *lbp);
struct xfs_buf *lbp);
extern int xfs_dir2_leaf_trim_data(struct xfs_da_args *args,
struct xfs_dabuf *lbp, xfs_dir2_db_t db);
struct xfs_buf *lbp, xfs_dir2_db_t db);
extern struct xfs_dir2_leaf_entry *
xfs_dir2_leaf_find_entry(struct xfs_dir2_leaf *leaf, int index, int compact,
int lowstale, int highstale,
......@@ -96,13 +96,13 @@ extern int xfs_dir2_node_to_leaf(struct xfs_da_state *state);
/* xfs_dir2_node.c */
extern int xfs_dir2_leaf_to_node(struct xfs_da_args *args,
struct xfs_dabuf *lbp);
extern xfs_dahash_t xfs_dir2_leafn_lasthash(struct xfs_dabuf *bp, int *count);
extern int xfs_dir2_leafn_lookup_int(struct xfs_dabuf *bp,
struct xfs_buf *lbp);
extern xfs_dahash_t xfs_dir2_leafn_lasthash(struct xfs_buf *bp, int *count);
extern int xfs_dir2_leafn_lookup_int(struct xfs_buf *bp,
struct xfs_da_args *args, int *indexp,
struct xfs_da_state *state);
extern int xfs_dir2_leafn_order(struct xfs_dabuf *leaf1_bp,
struct xfs_dabuf *leaf2_bp);
extern int xfs_dir2_leafn_order(struct xfs_buf *leaf1_bp,
struct xfs_buf *leaf2_bp);
extern int xfs_dir2_leafn_split(struct xfs_da_state *state,
struct xfs_da_state_blk *oldblk, struct xfs_da_state_blk *newblk);
extern int xfs_dir2_leafn_toosmall(struct xfs_da_state *state, int *action);
......@@ -122,7 +122,7 @@ extern xfs_ino_t xfs_dir2_sfe_get_ino(struct xfs_dir2_sf_hdr *sfp,
struct xfs_dir2_sf_entry *sfep);
extern int xfs_dir2_block_sfsize(struct xfs_inode *dp,
struct xfs_dir2_data_hdr *block, struct xfs_dir2_sf_hdr *sfhp);
extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_dabuf *bp,
extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_buf *bp,
int size, xfs_dir2_sf_hdr_t *sfhp);
extern int xfs_dir2_sf_addname(struct xfs_da_args *args);
extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
......
......@@ -222,7 +222,7 @@ xfs_dir2_block_sfsize(
int /* error */
xfs_dir2_block_to_sf(
xfs_da_args_t *args, /* operation arguments */
xfs_dabuf_t *bp, /* block buffer */
struct xfs_buf *bp,
int size, /* shortform directory size */
xfs_dir2_sf_hdr_t *sfhp) /* shortform directory hdr */
{
......@@ -249,7 +249,7 @@ xfs_dir2_block_to_sf(
* and add local data.
*/
hdr = kmem_alloc(mp->m_dirblksize, KM_SLEEP);
memcpy(hdr, bp->data, mp->m_dirblksize);
memcpy(hdr, bp->b_addr, mp->m_dirblksize);
logflags = XFS_ILOG_CORE;
if ((error = xfs_dir2_shrink_inode(args, mp->m_dirdatablk, bp))) {
ASSERT(error != ENOSPC);
......
......@@ -236,7 +236,6 @@ xfs_file_aio_read(
ssize_t ret = 0;
int ioflags = 0;
xfs_fsize_t n;
unsigned long seg;
XFS_STATS_INC(xs_read_calls);
......@@ -247,19 +246,9 @@ xfs_file_aio_read(
if (file->f_mode & FMODE_NOCMTIME)
ioflags |= IO_INVIS;
/* START copy & waste from filemap.c */
for (seg = 0; seg < nr_segs; seg++) {
const struct iovec *iv = &iovp[seg];
/*
* If any segment has a negative length, or the cumulative
* length ever wraps negative then return -EINVAL.
*/
size += iv->iov_len;
if (unlikely((ssize_t)(size|iv->iov_len) < 0))
return XFS_ERROR(-EINVAL);
}
/* END copy & waste from filemap.c */
ret = generic_segment_checks(iovp, &nr_segs, &size, VERIFY_WRITE);
if (ret < 0)
return ret;
if (unlikely(ioflags & IO_ISDIRECT)) {
xfs_buftarg_t *target =
......@@ -273,7 +262,7 @@ xfs_file_aio_read(
}
}
n = XFS_MAXIOFFSET(mp) - iocb->ki_pos;
n = mp->m_super->s_maxbytes - iocb->ki_pos;
if (n <= 0 || size == 0)
return 0;
......
This diff is collapsed.
......@@ -75,8 +75,6 @@ xfs_dialloc(
umode_t mode, /* mode bits for new inode */
int okalloc, /* ok to allocate more space */
struct xfs_buf **agbp, /* buf for a.g. inode header */
boolean_t *alloc_done, /* an allocation was done to replenish
the free inodes */
xfs_ino_t *inop); /* inode number allocated */
/*
......
......@@ -40,17 +40,6 @@
#include "xfs_trace.h"
/*
* Define xfs inode iolock lockdep classes. We need to ensure that all active
* inodes are considered the same for lockdep purposes, including inodes that
* are recycled through the XFS_IRECLAIMABLE state. This is the the only way to
* guarantee the locks are considered the same when there are multiple lock
* initialisation siteѕ. Also, define a reclaimable inode class so it is
* obvious in lockdep reports which class the report is against.
*/
static struct lock_class_key xfs_iolock_active;
struct lock_class_key xfs_iolock_reclaimable;
/*
* Allocate and initialise an xfs_inode.
*/
......@@ -80,8 +69,6 @@ xfs_inode_alloc(
ASSERT(ip->i_ino == 0);
mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
&xfs_iolock_active, "xfs_iolock_active");
/* initialise the xfs inode */
ip->i_ino = ino;
......@@ -250,8 +237,6 @@ xfs_iget_cache_hit(
ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
&xfs_iolock_active, "xfs_iolock_active");
spin_unlock(&ip->i_flags_lock);
spin_unlock(&pag->pag_ici_lock);
......
......@@ -132,23 +132,28 @@ xfs_inobp_check(
#endif
/*
* Find the buffer associated with the given inode map
* We do basic validation checks on the buffer once it has been
* retrieved from disk.
* This routine is called to map an inode to the buffer containing the on-disk
* version of the inode. It returns a pointer to the buffer containing the
* on-disk inode in the bpp parameter, and in the dipp parameter it returns a
* pointer to the on-disk inode within that buffer.
*
* If a non-zero error is returned, then the contents of bpp and dipp are
* undefined.
*/
STATIC int
int
xfs_imap_to_bp(
xfs_mount_t *mp,
xfs_trans_t *tp,
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_imap *imap,
xfs_buf_t **bpp,
struct xfs_dinode **dipp,
struct xfs_buf **bpp,
uint buf_flags,
uint iget_flags)
{
struct xfs_buf *bp;
int error;
int i;
int ni;
xfs_buf_t *bp;
buf_flags |= XBF_UNMAPPED;
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
......@@ -189,8 +194,8 @@ xfs_imap_to_bp(
xfs_trans_brelse(tp, bp);
return XFS_ERROR(EINVAL);
}
XFS_CORRUPTION_ERROR("xfs_imap_to_bp",
XFS_ERRLEVEL_HIGH, mp, dip);
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_HIGH,
mp, dip);
#ifdef DEBUG
xfs_emerg(mp,
"bad inode magic/vsn daddr %lld #%d (magic=%x)",
......@@ -204,96 +209,9 @@ xfs_imap_to_bp(
}
xfs_inobp_check(mp, bp);
*bpp = bp;
return 0;
}
/*
* This routine is called to map an inode number within a file
* system to the buffer containing the on-disk version of the
* inode. It returns a pointer to the buffer containing the
* on-disk inode in the bpp parameter, and in the dip parameter
* it returns a pointer to the on-disk inode within that buffer.
*
* If a non-zero error is returned, then the contents of bpp and
* dipp are undefined.
*
* Use xfs_imap() to determine the size and location of the
* buffer to read from disk.
*/
int
xfs_inotobp(
xfs_mount_t *mp,
xfs_trans_t *tp,
xfs_ino_t ino,
xfs_dinode_t **dipp,
xfs_buf_t **bpp,
int *offset,
uint imap_flags)
{
struct xfs_imap imap;
xfs_buf_t *bp;
int error;
imap.im_blkno = 0;
error = xfs_imap(mp, tp, ino, &imap, imap_flags);
if (error)
return error;
error = xfs_imap_to_bp(mp, tp, &imap, &bp, 0, imap_flags);
if (error)
return error;
*dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
*bpp = bp;
*offset = imap.im_boffset;
return 0;
}
/*
* This routine is called to map an inode to the buffer containing
* the on-disk version of the inode. It returns a pointer to the
* buffer containing the on-disk inode in the bpp parameter, and in
* the dip parameter it returns a pointer to the on-disk inode within
* that buffer.
*
* If a non-zero error is returned, then the contents of bpp and
* dipp are undefined.
*
* The inode is expected to already been mapped to its buffer and read
* in once, thus we can use the mapping information stored in the inode
* rather than calling xfs_imap(). This allows us to avoid the overhead
* of looking at the inode btree for small block file systems
* (see xfs_imap()).
*/
int
xfs_itobp(
xfs_mount_t *mp,
xfs_trans_t *tp,
xfs_inode_t *ip,
xfs_dinode_t **dipp,
xfs_buf_t **bpp,
uint buf_flags)
{
xfs_buf_t *bp;
int error;
ASSERT(ip->i_imap.im_blkno != 0);
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, buf_flags, 0);
if (error)
return error;
if (!bp) {
ASSERT(buf_flags & XBF_TRYLOCK);
ASSERT(tp == NULL);
*bpp = NULL;
return EAGAIN;
}
*dipp = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
*bpp = bp;
*dipp = (struct xfs_dinode *)xfs_buf_offset(bp, imap->im_boffset);
return 0;
}
......@@ -796,10 +714,9 @@ xfs_iread(
/*
* Get pointers to the on-disk inode and the buffer containing it.
*/
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp, 0, iget_flags);
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
if (error)
return error;
dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
/*
* If we got something that isn't an inode it means someone
......@@ -876,7 +793,7 @@ xfs_iread(
/*
* Use xfs_trans_brelse() to release the buffer containing the
* on-disk inode, because it was acquired with xfs_trans_read_buf()
* in xfs_itobp() above. If tp is NULL, this is just a normal
* in xfs_imap_to_bp() above. If tp is NULL, this is just a normal
* brelse(). If we're within a transaction, then xfs_trans_brelse()
* will only release the buffer if it is not dirty within the
* transaction. It will be OK to release the buffer in this case,
......@@ -970,7 +887,6 @@ xfs_ialloc(
prid_t prid,
int okalloc,
xfs_buf_t **ialloc_context,
boolean_t *call_again,
xfs_inode_t **ipp)
{
xfs_ino_t ino;
......@@ -985,10 +901,10 @@ xfs_ialloc(
* the on-disk inode to be allocated.
*/
error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
ialloc_context, call_again, &ino);
ialloc_context, &ino);
if (error)
return error;
if (*call_again || ino == NULLFSINO) {
if (*ialloc_context || ino == NULLFSINO) {
*ipp = NULL;
return 0;
}
......@@ -1207,7 +1123,9 @@ xfs_itruncate_extents(
int error = 0;
int done = 0;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
xfs_isilocked(ip, XFS_IOLOCK_EXCL));
ASSERT(new_size <= XFS_ISIZE(ip));
ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
ASSERT(ip->i_itemp != NULL);
......@@ -1226,7 +1144,7 @@ xfs_itruncate_extents(
* then there is nothing to do.
*/
first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
if (first_unmap_block == last_block)
return 0;
......@@ -1355,7 +1273,8 @@ xfs_iunlink(
* Here we put the head pointer into our next pointer,
* and then we fall through to point the head at us.
*/
error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0);
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
0, 0);
if (error)
return error;
......@@ -1429,16 +1348,16 @@ xfs_iunlink_remove(
if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
/*
* We're at the head of the list. Get the inode's
* on-disk buffer to see if there is anyone after us
* on the list. Only modify our next pointer if it
* is not already NULLAGINO. This saves us the overhead
* of dealing with the buffer when there is no need to
* change it.
* We're at the head of the list. Get the inode's on-disk
* buffer to see if there is anyone after us on the list.
* Only modify our next pointer if it is not already NULLAGINO.
* This saves us the overhead of dealing with the buffer when
* there is no need to change it.
*/
error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0);
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
0, 0);
if (error) {
xfs_warn(mp, "%s: xfs_itobp() returned error %d.",
xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
__func__, error);
return error;
}
......@@ -1472,34 +1391,45 @@ xfs_iunlink_remove(
next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
last_ibp = NULL;
while (next_agino != agino) {
/*
* If the last inode wasn't the one pointing to
* us, then release its buffer since we're not
* going to do anything with it.
*/
if (last_ibp != NULL) {
struct xfs_imap imap;
if (last_ibp)
xfs_trans_brelse(tp, last_ibp);
}
imap.im_blkno = 0;
next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
error = xfs_inotobp(mp, tp, next_ino, &last_dip,
&last_ibp, &last_offset, 0);
error = xfs_imap(mp, tp, next_ino, &imap, 0);
if (error) {
xfs_warn(mp,
"%s: xfs_inotobp() returned error %d.",
"%s: xfs_imap returned error %d.",
__func__, error);
return error;
}
error = xfs_imap_to_bp(mp, tp, &imap, &last_dip,
&last_ibp, 0, 0);
if (error) {
xfs_warn(mp,
"%s: xfs_imap_to_bp returned error %d.",
__func__, error);
return error;
}
last_offset = imap.im_boffset;
next_agino = be32_to_cpu(last_dip->di_next_unlinked);
ASSERT(next_agino != NULLAGINO);
ASSERT(next_agino != 0);
}
/*
* Now last_ibp points to the buffer previous to us on
* the unlinked list. Pull us from the list.
* Now last_ibp points to the buffer previous to us on the
* unlinked list. Pull us from the list.
*/
error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0);
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
0, 0);
if (error) {
xfs_warn(mp, "%s: xfs_itobp(2) returned error %d.",
xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.",
__func__, error);
return error;
}
......@@ -1749,7 +1679,8 @@ xfs_ifree(
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, 0);
error = xfs_imap_to_bp(ip->i_mount, tp, &ip->i_imap, &dip, &ibp,
0, 0);
if (error)
return error;
......@@ -2428,7 +2359,7 @@ xfs_iflush(
/*
* For stale inodes we cannot rely on the backing buffer remaining
* stale in cache for the remaining life of the stale inode and so
* xfs_itobp() below may give us a buffer that no longer contains
* xfs_imap_to_bp() below may give us a buffer that no longer contains
* inodes below. We have to check this after ensuring the inode is
* unpinned so that it is safe to reclaim the stale inode after the
* flush call.
......@@ -2454,7 +2385,8 @@ xfs_iflush(
/*
* Get the buffer containing the on-disk inode.
*/
error = xfs_itobp(mp, NULL, ip, &dip, &bp, XBF_TRYLOCK);
error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
0);
if (error || !bp) {
xfs_ifunlock(ip);
return error;
......
......@@ -487,8 +487,6 @@ static inline int xfs_isiflocked(struct xfs_inode *ip)
#define XFS_IOLOCK_DEP(flags) (((flags) & XFS_IOLOCK_DEP_MASK) >> XFS_IOLOCK_SHIFT)
#define XFS_ILOCK_DEP(flags) (((flags) & XFS_ILOCK_DEP_MASK) >> XFS_ILOCK_SHIFT)
extern struct lock_class_key xfs_iolock_reclaimable;
/*
* For multiple groups support: if S_ISGID bit is set in the parent
* directory, group of new file is set to that of the parent, and
......@@ -517,7 +515,7 @@ void xfs_inode_free(struct xfs_inode *ip);
*/
int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, umode_t,
xfs_nlink_t, xfs_dev_t, prid_t, int,
struct xfs_buf **, boolean_t *, xfs_inode_t **);
struct xfs_buf **, xfs_inode_t **);
uint xfs_ip2xflags(struct xfs_inode *);
uint xfs_dic2xflags(struct xfs_dinode *);
......@@ -557,12 +555,9 @@ do { \
#define XFS_IGET_UNTRUSTED 0x2
#define XFS_IGET_DONTCACHE 0x4
int xfs_inotobp(struct xfs_mount *, struct xfs_trans *,
xfs_ino_t, struct xfs_dinode **,
struct xfs_buf **, int *, uint);
int xfs_itobp(struct xfs_mount *, struct xfs_trans *,
struct xfs_inode *, struct xfs_dinode **,
struct xfs_buf **, uint);
int xfs_imap_to_bp(struct xfs_mount *, struct xfs_trans *,
struct xfs_imap *, struct xfs_dinode **,
struct xfs_buf **, uint, uint);
int xfs_iread(struct xfs_mount *, struct xfs_trans *,
struct xfs_inode *, uint);
void xfs_dinode_to_disk(struct xfs_dinode *,
......
......@@ -285,7 +285,7 @@ xfs_iomap_eof_want_preallocate(
* do any speculative allocation.
*/
start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1)));
count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
count_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
while (count_fsb > 0) {
imaps = nimaps;
firstblock = NULLFSBLOCK;
......@@ -416,8 +416,8 @@ xfs_iomap_write_delay(
* Make sure preallocation does not create extents beyond the range we
* actually support in this filesystem.
*/
if (last_fsb > XFS_B_TO_FSB(mp, mp->m_maxioffset))
last_fsb = XFS_B_TO_FSB(mp, mp->m_maxioffset);
if (last_fsb > XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes))
last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
ASSERT(last_fsb > offset_fsb);
......
This diff is collapsed.
......@@ -555,7 +555,7 @@ xfs_bulkstat_single(
/*
* note that requesting valid inode numbers which are not allocated
* to inodes will most likely cause xfs_itobp to generate warning
* to inodes will most likely cause xfs_imap_to_bp to generate warning
* messages about bad magic numbers. This is ok. The fact that
* the inode isn't actually an inode is handled by the
* error check below. Done this way to make the usual case faster
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -940,7 +940,7 @@ xfs_qm_dqiterate(
map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
lblkno = 0;
maxlblkcnt = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
do {
nmaps = XFS_DQITER_MAP_SIZE;
/*
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment