Commit 9bdb8160 authored by Nathan Scott's avatar Nathan Scott

Merge sgi.com:/source2/linux-2.6 into sgi.com:/source2/xfs-linux-2.6

parents 68f12028 c16123eb
/*
* Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
......@@ -156,9 +156,7 @@ linvfs_unwritten_convert_direct(
{
ASSERT(!private || inode == (struct inode *)private);
/* private indicates an unwritten extent lay beneath this IO,
* see linvfs_get_block_core.
*/
/* private indicates an unwritten extent lay beneath this IO */
if (private && size > 0) {
vnode_t *vp = LINVFS_GET_VP(inode);
int error;
......@@ -728,11 +726,9 @@ xfs_page_state_convert(
pgoff_t end_index, last_index, tlast;
int len, err, i, cnt = 0, uptodate = 1;
int flags = startio ? 0 : BMAPI_TRYLOCK;
int page_dirty = 1;
int delalloc = 0;
int page_dirty, delalloc = 0;
/* Are we off the end of the file ? */
/* Is this page beyond the end of the file? */
offset = i_size_read(inode);
end_index = offset >> PAGE_CACHE_SHIFT;
last_index = (offset - 1) >> PAGE_CACHE_SHIFT;
......@@ -751,7 +747,13 @@ xfs_page_state_convert(
bh = head = page_buffers(page);
iomp = NULL;
/*
* page_dirty is initially a count of buffers on the page and
* is decrememted as we move each into a cleanable state.
*/
len = bh->b_size;
page_dirty = PAGE_CACHE_SIZE / len;
do {
if (offset >= end_offset)
break;
......@@ -794,7 +796,7 @@ xfs_page_state_convert(
}
BUG_ON(!buffer_locked(bh));
bh_arr[cnt++] = bh;
page_dirty = 0;
page_dirty--;
}
/*
* Second case, allocate space for a delalloc buffer.
......@@ -821,7 +823,7 @@ xfs_page_state_convert(
unlock_buffer(bh);
mark_buffer_dirty(bh);
}
page_dirty = 0;
page_dirty--;
}
} else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
(unmapped || startio)) {
......@@ -857,13 +859,13 @@ xfs_page_state_convert(
unlock_buffer(bh);
mark_buffer_dirty(bh);
}
page_dirty = 0;
page_dirty--;
}
} else if (startio) {
if (buffer_uptodate(bh) &&
!test_and_set_bit(BH_Lock, &bh->b_state)) {
bh_arr[cnt++] = bh;
page_dirty = 0;
page_dirty--;
}
}
}
......@@ -907,7 +909,7 @@ xfs_page_state_convert(
}
STATIC int
linvfs_get_block_core(
__linvfs_get_block(
struct inode *inode,
sector_t iblock,
unsigned long blocks,
......@@ -977,11 +979,11 @@ linvfs_get_block_core(
if (iomap.iomap_flags & IOMAP_DELAY) {
BUG_ON(direct);
if (create) {
set_buffer_mapped(bh_result);
set_buffer_uptodate(bh_result);
}
set_buffer_mapped(bh_result);
set_buffer_delay(bh_result);
}
}
if (blocks) {
bh_result->b_size = (ssize_t)min(
......@@ -999,7 +1001,7 @@ linvfs_get_block(
struct buffer_head *bh_result,
int create)
{
return linvfs_get_block_core(inode, iblock, 0, bh_result,
return __linvfs_get_block(inode, iblock, 0, bh_result,
create, 0, BMAPI_WRITE);
}
......@@ -1011,7 +1013,7 @@ linvfs_get_blocks_direct(
struct buffer_head *bh_result,
int create)
{
return linvfs_get_block_core(inode, iblock, max_blocks, bh_result,
return __linvfs_get_block(inode, iblock, max_blocks, bh_result,
create, 1, BMAPI_WRITE|BMAPI_DIRECT);
}
......
......@@ -676,6 +676,8 @@ xfs_write(
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
fs_check_frozen(vp->v_vfsp, SB_FREEZE_WRITE);
if (ioflags & IO_ISDIRECT) {
xfs_buftarg_t *target =
(xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
......@@ -960,10 +962,10 @@ xfs_write(
xfs_trans_set_sync(tp);
error = xfs_trans_commit(tp, 0, NULL);
xfs_iunlock(xip, XFS_ILOCK_EXCL);
}
if (error)
goto out_unlock_internal;
}
}
xfs_rwunlock(bdp, locktype);
if (need_isem)
......
......@@ -348,6 +348,12 @@ linvfs_write_inode(
if (sync)
flags |= FLUSH_SYNC;
VOP_IFLUSH(vp, flags, error);
if (error == EAGAIN) {
if (sync)
VOP_IFLUSH(vp, flags | FLUSH_LOG, error);
else
error = 0;
}
}
return -error;
......
......@@ -3681,25 +3681,25 @@ xfs_inode_flush(
{
xfs_inode_t *ip;
xfs_mount_t *mp;
xfs_inode_log_item_t *iip;
int error = 0;
ip = XFS_BHVTOI(bdp);
mp = ip->i_mount;
iip = ip->i_itemp;
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
/* Bypass inodes which have already been cleaned by
/*
* Bypass inodes which have already been cleaned by
* the inode flush clustering code inside xfs_iflush
*/
if ((ip->i_update_core == 0) &&
((ip->i_itemp == NULL) ||
!(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL)))
((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL)))
return 0;
if (flags & FLUSH_LOG) {
xfs_inode_log_item_t *iip = ip->i_itemp;
if (iip && iip->ili_last_lsn) {
xlog_t *log = mp->m_log;
xfs_lsn_t sync_lsn;
......@@ -3714,12 +3714,12 @@ xfs_inode_flush(
if (flags & FLUSH_SYNC)
log_flags |= XFS_LOG_SYNC;
return xfs_log_force(mp, iip->ili_last_lsn,
log_flags);
return xfs_log_force(mp, iip->ili_last_lsn, log_flags);
}
}
/* We make this non-blocking if the inode is contended,
/*
* We make this non-blocking if the inode is contended,
* return EAGAIN to indicate to the caller that they
* did not succeed. This prevents the flush path from
* blocking on inodes inside another operation right
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment