Commit 421b3a51 authored by Stephen Lord's avatar Stephen Lord Committed by Christoph Hellwig

[XFS] rework iocore infrastructure, remove some code and make it more

orthogonal. In addition, add some of the hooks for unwritten extents.

SGI Modid: 2.5.x-xfs:slinx:134603a
parent 4a725254
......@@ -48,6 +48,9 @@ map_blocks(
vnode_t *vp = LINVFS_GET_VP(inode);
int error, nmaps = 1;
if (((flags & (PBF_DIRECT|PBF_SYNC)) == PBF_DIRECT) &&
(offset >= inode->i_size))
count = max(count, XFS_WRITE_IO_LOG);
retry:
VOP_BMAP(vp, offset, count, flags, pbmapp, &nmaps, error);
if (flags & PBF_WRITE) {
......@@ -515,7 +518,7 @@ linvfs_get_block_core(
/* If we are doing writes at the end of the file,
* allocate in chunks
*/
if (create && (offset >= inode->i_size) && !(flags & PBF_SYNC))
if (create && (offset >= inode->i_size))
size = 1 << XFS_WRITE_IO_LOG;
else
size = 1 << inode->i_blkbits;
......@@ -534,15 +537,20 @@ linvfs_get_block_core(
page_buf_daddr_t bn;
loff_t delta;
delta = offset - pbmap.pbm_offset;
delta >>= inode->i_blkbits;
/* For unwritten extents do not report a disk address on
* the read case.
*/
if (create || ((pbmap.pbm_flags & PBMF_UNWRITTEN) == 0)) {
delta = offset - pbmap.pbm_offset;
delta >>= inode->i_blkbits;
bn = pbmap.pbm_bn >> (inode->i_blkbits - 9);
bn += delta;
bn = pbmap.pbm_bn >> (inode->i_blkbits - 9);
bn += delta;
bh_result->b_blocknr = bn;
bh_result->b_bdev = pbmap.pbm_target->pbr_bdev;
set_buffer_mapped(bh_result);
bh_result->b_blocknr = bn;
bh_result->b_bdev = pbmap.pbm_target->pbr_bdev;
set_buffer_mapped(bh_result);
}
}
/* If we previously allocated a block out beyond eof and
......
......@@ -30,361 +30,18 @@
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/
/*
* fs/xfs/linux/xfs_lrw.c (Linux Read Write stuff)
* fs/xfs/linux/xfs_iomap.c (Linux Read Write stuff)
*
*/
#include <xfs.h>
#include <linux/pagemap.h>
#include <linux/capability.h>
#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
<< mp->m_writeio_log)
#define XFS_STRAT_WRITE_IMAPS 2
STATIC int xfs_iomap_read(xfs_iocore_t *, loff_t, size_t, int, page_buf_bmap_t *,
int *);
STATIC int xfs_iomap_write(xfs_iocore_t *, loff_t, size_t, page_buf_bmap_t *,
int *, int);
STATIC int xfs_iomap_write_delay(xfs_iocore_t *, loff_t, size_t, page_buf_bmap_t *,
int *, int, int);
STATIC int xfs_iomap_write_direct(xfs_iocore_t *, loff_t, size_t, page_buf_bmap_t *,
int *, int, int);
STATIC int _xfs_imap_to_bmap(xfs_iocore_t *, xfs_off_t, xfs_bmbt_irec_t *,
page_buf_bmap_t *, int, int);
int
xfs_strategy(
xfs_inode_t *ip,
xfs_off_t offset,
ssize_t count,
int flags,
page_buf_bmap_t *pbmapp,
int *npbmaps)
{
xfs_iocore_t *io;
xfs_mount_t *mp;
int error;
xfs_fileoff_t offset_fsb;
xfs_fileoff_t end_fsb;
xfs_fileoff_t map_start_fsb;
xfs_fileoff_t last_block;
xfs_fsblock_t first_block;
xfs_bmap_free_t free_list;
xfs_filblks_t count_fsb;
int committed, i, loops, nimaps;
int is_xfs;
xfs_bmbt_irec_t imap[XFS_MAX_RW_NBMAPS];
xfs_trans_t *tp;
mp = ip->i_mount;
io = &ip->i_iocore;
is_xfs = IO_IS_XFS(io);
ASSERT((ip->i_d.di_mode & IFMT) == IFREG);
ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
((io->io_flags & XFS_IOCORE_RT) != 0));
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
offset_fsb = XFS_B_TO_FSBT(mp, offset);
nimaps = min(XFS_MAX_RW_NBMAPS, *npbmaps);
end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
first_block = NULLFSBLOCK;
XFS_ILOCK(mp, io, XFS_ILOCK_SHARED | XFS_EXTSIZE_RD);
error = XFS_BMAPI(mp, NULL, io, offset_fsb,
(xfs_filblks_t)(end_fsb - offset_fsb),
XFS_BMAPI_ENTIRE, &first_block, 0, imap,
&nimaps, NULL);
XFS_IUNLOCK(mp, io, XFS_ILOCK_SHARED | XFS_EXTSIZE_RD);
if (error) {
return XFS_ERROR(error);
}
if (nimaps && !ISNULLSTARTBLOCK(imap[0].br_startblock)) {
*npbmaps = _xfs_imap_to_bmap(&ip->i_iocore, offset, imap,
pbmapp, nimaps, *npbmaps);
return 0;
}
/*
* Make sure that the dquots are there.
*/
if (XFS_IS_QUOTA_ON(mp)) {
if (XFS_NOT_DQATTACHED(mp, ip)) {
if ((error = xfs_qm_dqattach(ip, 0))) {
return XFS_ERROR(error);
}
}
}
XFS_STATS_ADD(xfsstats.xs_xstrat_bytes,
XFS_FSB_TO_B(mp, imap[0].br_blockcount));
offset_fsb = imap[0].br_startoff;
count_fsb = imap[0].br_blockcount;
map_start_fsb = offset_fsb;
while (count_fsb != 0) {
/*
* Set up a transaction with which to allocate the
* backing store for the file. Do allocations in a
* loop until we get some space in the range we are
* interested in. The other space that might be allocated
* is in the delayed allocation extent on which we sit
* but before our buffer starts.
*/
nimaps = 0;
loops = 0;
while (nimaps == 0) {
if (is_xfs) {
tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
error = xfs_trans_reserve(tp, 0,
XFS_WRITE_LOG_RES(mp),
0, XFS_TRANS_PERM_LOG_RES,
XFS_WRITE_LOG_COUNT);
if (error) {
xfs_trans_cancel(tp, 0);
goto error0;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip,
XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
} else {
tp = NULL;
XFS_ILOCK(mp, io, XFS_ILOCK_EXCL |
XFS_EXTSIZE_WR);
}
/*
* Allocate the backing store for the file.
*/
XFS_BMAP_INIT(&(free_list),
&(first_block));
nimaps = XFS_STRAT_WRITE_IMAPS;
/*
* Ensure we don't go beyond eof - it is possible
* the extents changed since we did the read call,
* we dropped the ilock in the interim.
*/
end_fsb = XFS_B_TO_FSB(mp, XFS_SIZE(mp, io));
xfs_bmap_last_offset(NULL, ip, &last_block,
XFS_DATA_FORK);
last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
if ((map_start_fsb + count_fsb) > last_block) {
count_fsb = last_block - map_start_fsb;
if (count_fsb == 0) {
if (is_xfs) {
xfs_bmap_cancel(&free_list);
xfs_trans_cancel(tp,
(XFS_TRANS_RELEASE_LOG_RES |
XFS_TRANS_ABORT));
}
XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL |
XFS_EXTSIZE_WR);
return XFS_ERROR(EAGAIN);
}
}
error = XFS_BMAPI(mp, tp, io, map_start_fsb, count_fsb,
XFS_BMAPI_WRITE, &first_block, 1,
imap, &nimaps, &free_list);
if (error) {
xfs_bmap_cancel(&free_list);
xfs_trans_cancel(tp,
(XFS_TRANS_RELEASE_LOG_RES |
XFS_TRANS_ABORT));
XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL |
XFS_EXTSIZE_WR);
goto error0;
}
if (is_xfs) {
error = xfs_bmap_finish(&(tp), &(free_list),
first_block, &committed);
if (error) {
xfs_bmap_cancel(&free_list);
xfs_trans_cancel(tp,
(XFS_TRANS_RELEASE_LOG_RES |
XFS_TRANS_ABORT));
xfs_iunlock(ip, XFS_ILOCK_EXCL);
goto error0;
}
error = xfs_trans_commit(tp,
XFS_TRANS_RELEASE_LOG_RES,
NULL);
if (error) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
goto error0;
}
}
if (nimaps == 0) {
XFS_IUNLOCK(mp, io,
XFS_ILOCK_EXCL|XFS_EXTSIZE_WR);
} /* else hold 'till we maybe loop again below */
}
/*
* See if we were able to allocate an extent that
* covers at least part of the user's requested size.
*/
offset_fsb = XFS_B_TO_FSBT(mp, offset);
for (i = 0; i < nimaps; i++) {
int maps;
if ((offset_fsb >= imap[i].br_startoff) &&
(offset_fsb <
(imap[i].br_startoff + imap[i].br_blockcount))) {
XFS_IUNLOCK(mp, io,
XFS_ILOCK_EXCL|XFS_EXTSIZE_WR);
maps = min(nimaps, *npbmaps);
*npbmaps = _xfs_imap_to_bmap(io, offset,
&imap[i], pbmapp,
maps, *npbmaps);
XFS_STATS_INC(xfsstats.xs_xstrat_quick);
return 0;
}
count_fsb -= imap[i].br_blockcount; /* for next bmapi,
if needed. */
}
/*
* We didn't get an extent the caller can write into so
* loop around and try starting after the last imap we got back.
*/
nimaps--; /* Index of last entry */
ASSERT(nimaps >= 0);
ASSERT(offset_fsb >=
imap[nimaps].br_startoff + imap[nimaps].br_blockcount);
ASSERT(count_fsb);
offset_fsb =
imap[nimaps].br_startoff + imap[nimaps].br_blockcount;
map_start_fsb = offset_fsb;
XFS_STATS_INC(xfsstats.xs_xstrat_split);
XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_WR);
}
ASSERT(0); /* Should never get here */
error0:
if (error) {
ASSERT(count_fsb != 0);
ASSERT(is_xfs || XFS_FORCED_SHUTDOWN(mp));
}
return XFS_ERROR(error);
}
/*
* xfs_bmap() is the same as the irix xfs_bmap from xfs_rw.c
* execpt for slight changes to the params
*/
int
xfs_bmap(bhv_desc_t *bdp,
xfs_off_t offset,
ssize_t count,
int flags,
page_buf_bmap_t *pbmapp,
int *npbmaps)
{
xfs_inode_t *ip;
int error;
int lockmode;
int fsynced = 0;
vnode_t *vp;
ip = XFS_BHVTOI(bdp);
ASSERT((ip->i_d.di_mode & IFMT) == IFREG);
ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
if (XFS_FORCED_SHUTDOWN(ip->i_iocore.io_mount))
return XFS_ERROR(EIO);
if (flags & PBF_READ) {
lockmode = xfs_ilock_map_shared(ip);
error = xfs_iomap_read(&ip->i_iocore, offset, count,
XFS_BMAPI_ENTIRE, pbmapp, npbmaps);
xfs_iunlock_map_shared(ip, lockmode);
} else if (flags & PBF_FILE_ALLOCATE) {
error = xfs_strategy(ip, offset, count, flags,
pbmapp, npbmaps);
} else { /* PBF_WRITE */
ASSERT(flags & PBF_WRITE);
vp = BHV_TO_VNODE(bdp);
xfs_ilock(ip, XFS_ILOCK_EXCL);
/*
* Make sure that the dquots are there. This doesn't hold
* the ilock across a disk read.
*/
if (XFS_IS_QUOTA_ON(ip->i_mount)) {
if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) {
if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_ILOCKED))) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return XFS_ERROR(error);
}
}
}
retry:
error = xfs_iomap_write(&ip->i_iocore, offset, count,
pbmapp, npbmaps, flags);
/* xfs_iomap_write unlocks/locks/unlocks */
if (error == ENOSPC) {
switch (fsynced) {
case 0:
if (ip->i_delayed_blks) {
filemap_fdatawrite(LINVFS_GET_IP(vp)->i_mapping);
fsynced = 1;
} else {
fsynced = 2;
flags |= PBF_SYNC;
}
error = 0;
xfs_ilock(ip, XFS_ILOCK_EXCL);
goto retry;
case 1:
fsynced = 2;
if (!(flags & PBF_SYNC)) {
flags |= PBF_SYNC;
error = 0;
xfs_ilock(ip, XFS_ILOCK_EXCL);
goto retry;
}
case 2:
sync_blockdev(vp->v_vfsp->vfs_super->s_bdev);
xfs_log_force(ip->i_mount, (xfs_lsn_t)0,
XFS_LOG_FORCE|XFS_LOG_SYNC);
error = 0;
/**
delay(HZ);
**/
fsynced++;
xfs_ilock(ip, XFS_ILOCK_EXCL);
goto retry;
}
}
}
return XFS_ERROR(error);
}
#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
STATIC int
_xfs_imap_to_bmap(
......@@ -397,7 +54,7 @@ _xfs_imap_to_bmap(
{
xfs_mount_t *mp;
xfs_fsize_t nisize;
int im, pbm;
int pbm;
xfs_fsblock_t start_block;
mp = io->io_mount;
......@@ -405,7 +62,7 @@ _xfs_imap_to_bmap(
if (io->io_new_size > nisize)
nisize = io->io_new_size;
for (im=pbm=0; im < imaps && pbm < pbmaps; im++,pbmapp++,imap++,pbm++) {
for (pbm = 0; imaps && pbm < pbmaps; imaps--, pbmapp++, imap++, pbm++) {
pbmapp->pbm_target = io->io_flags & XFS_IOCORE_RT ?
mp->m_rtdev_targp : mp->m_ddev_targp;
pbmapp->pbm_offset = XFS_FSB_TO_B(mp, imap->br_startoff);
......@@ -422,9 +79,8 @@ _xfs_imap_to_bmap(
pbmapp->pbm_flags = PBMF_DELAY;
} else {
pbmapp->pbm_bn = XFS_FSB_TO_DB_IO(io, start_block);
if (ISUNWRITTEN(imap)) {
if (ISUNWRITTEN(imap))
pbmapp->pbm_flags |= PBMF_UNWRITTEN;
}
}
if ((pbmapp->pbm_offset + pbmapp->pbm_bsize) >= nisize) {
......@@ -436,149 +92,337 @@ _xfs_imap_to_bmap(
return pbm; /* Return the number filled */
}
STATIC int
xfs_iomap_read(
xfs_iocore_t *io,
loff_t offset,
size_t count,
int
xfs_iomap(xfs_iocore_t *io,
xfs_off_t offset,
ssize_t count,
int flags,
page_buf_bmap_t *pbmapp,
int *npbmaps)
{
xfs_fileoff_t offset_fsb;
xfs_fileoff_t end_fsb;
int nimaps;
xfs_mount_t *mp = io->io_mount;
xfs_fileoff_t offset_fsb, end_fsb;
int error;
xfs_mount_t *mp;
xfs_bmbt_irec_t imap[XFS_MAX_RW_NBMAPS];
int lockmode = 0;
xfs_bmbt_irec_t imap;
int nimaps = 1;
int bmap_flags = 0;
ASSERT(ismrlocked(io->io_lock, MR_UPDATE | MR_ACCESS) != 0);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
switch (flags & (PBF_READ|PBF_WRITE|PBF_FILE_ALLOCATE)) {
case PBF_READ:
lockmode = XFS_LCK_MAP_SHARED(mp, io);
bmap_flags = XFS_BMAPI_ENTIRE;
break;
case PBF_WRITE:
lockmode = XFS_ILOCK_EXCL|XFS_EXTSIZE_WR;
bmap_flags = 0;
XFS_ILOCK(mp, io, lockmode);
break;
case PBF_FILE_ALLOCATE:
lockmode = XFS_ILOCK_SHARED|XFS_EXTSIZE_RD;
bmap_flags = XFS_BMAPI_ENTIRE;
XFS_ILOCK(mp, io, lockmode);
break;
default:
ASSERT(flags & (PBF_READ|PBF_WRITE|PBF_FILE_ALLOCATE));
}
mp = io->io_mount;
offset_fsb = XFS_B_TO_FSBT(mp, offset);
nimaps = sizeof(imap) / sizeof(imap[0]);
nimaps = min(nimaps, *npbmaps); /* Don't ask for more than caller has */
end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
error = XFS_BMAPI(mp, NULL, io, offset_fsb,
(xfs_filblks_t)(end_fsb - offset_fsb),
flags, NULL, 0, imap,
&nimaps, NULL);
if (error) {
return XFS_ERROR(error);
(xfs_filblks_t)(end_fsb - offset_fsb) ,
bmap_flags, NULL, 0, &imap,
&nimaps, NULL);
if (error)
goto out;
switch (flags & (PBF_WRITE|PBF_FILE_ALLOCATE)) {
case PBF_WRITE:
/* If we found an extent, return it */
if (nimaps && (imap.br_startblock != HOLESTARTBLOCK))
break;
if (flags & PBF_DIRECT) {
error = XFS_IOMAP_WRITE_DIRECT(mp, io, offset,
count, flags, &imap, &nimaps, nimaps);
} else {
error = XFS_IOMAP_WRITE_DELAY(mp, io, offset, count,
flags, &imap, &nimaps);
}
break;
case PBF_FILE_ALLOCATE:
/* If we found an extent, return it */
XFS_IUNLOCK(mp, io, lockmode);
lockmode = 0;
if (nimaps && !ISNULLSTARTBLOCK(imap.br_startblock))
break;
error = XFS_IOMAP_WRITE_ALLOCATE(mp, io, &imap, &nimaps);
break;
}
if(nimaps) {
*npbmaps = _xfs_imap_to_bmap(io, offset, imap, pbmapp, nimaps,
*npbmaps);
} else
if (nimaps) {
*npbmaps = _xfs_imap_to_bmap(io, offset, &imap,
pbmapp, nimaps, *npbmaps);
} else {
*npbmaps = 0;
}
out:
if (lockmode)
XFS_IUNLOCK(mp, io, lockmode);
return XFS_ERROR(error);
}
/*
* xfs_iomap_write: return pagebuf_bmap_t's telling higher layers
* where to write.
* There are 2 main cases:
* 1 the extents already exist
* 2 must allocate.
* There are 3 cases when we allocate:
* delay allocation (doesn't really allocate or use transactions)
* direct allocation (no previous delay allocation)
* convert delay to real allocations
*/
static int
xfs_flush_space(
xfs_inode_t *ip,
int *fsynced,
int *ioflags)
{
vnode_t *vp = XFS_ITOV(ip);
switch (*fsynced) {
case 0:
if (ip->i_delayed_blks) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
filemap_fdatawrite(LINVFS_GET_IP(vp)->i_mapping);
xfs_ilock(ip, XFS_ILOCK_EXCL);
*fsynced = 1;
} else {
*ioflags |= PBF_SYNC;
*fsynced = 2;
}
return 0;
case 1:
*fsynced = 2;
*ioflags |= PBF_SYNC;
return 0;
case 2:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
sync_blockdev(vp->v_vfsp->vfs_super->s_bdev);
xfs_log_force(ip->i_mount, (xfs_lsn_t)0,
XFS_LOG_FORCE|XFS_LOG_SYNC);
xfs_ilock(ip, XFS_ILOCK_EXCL);
*fsynced = 3;
return 0;
}
return 1;
}
STATIC int
xfs_iomap_write(
xfs_iocore_t *io,
int
xfs_iomap_write_direct(
xfs_inode_t *ip,
loff_t offset,
size_t count,
page_buf_bmap_t *pbmapp,
int *npbmaps,
int ioflag)
int ioflag,
xfs_bmbt_irec_t *ret_imap,
int *nmaps,
int found)
{
int maps;
int error = 0;
int found;
int flags = 0;
xfs_mount_t *mp = ip->i_mount;
xfs_iocore_t *io = &ip->i_iocore;
xfs_fileoff_t offset_fsb;
xfs_fileoff_t last_fsb;
xfs_filblks_t count_fsb;
xfs_fsize_t isize;
xfs_fsblock_t firstfsb;
int nimaps, maps;
int error;
int bmapi_flag;
int rt;
xfs_trans_t *tp;
xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS], *imapp;
xfs_bmap_free_t free_list;
int aeof;
xfs_filblks_t datablocks;
int committed;
int numrtextents;
uint resblks;
maps = *npbmaps;
if (!maps)
goto out;
/*
* Make sure that the dquots are there. This doesn't hold
* the ilock across a disk read.
*/
if (XFS_IS_QUOTA_ON(mp) && XFS_NOT_DQATTACHED(mp, ip)) {
if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_ILOCKED))) {
return XFS_ERROR(error);
}
}
maps = min(XFS_WRITE_IMAPS, *nmaps);
nimaps = maps;
isize = ip->i_d.di_size;
aeof = (offset + count) > isize;
if (io->io_new_size > isize)
isize = io->io_new_size;
offset_fsb = XFS_B_TO_FSBT(mp, offset);
last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
count_fsb = last_fsb - offset_fsb;
if (found && (ret_imap->br_startblock == HOLESTARTBLOCK)) {
xfs_fileoff_t map_last_fsb;
map_last_fsb = ret_imap->br_blockcount + ret_imap->br_startoff;
if (map_last_fsb < last_fsb) {
last_fsb = map_last_fsb;
count_fsb = last_fsb - offset_fsb;
}
ASSERT(count_fsb > 0);
}
/*
* determine if reserving space on
* the data or realtime partition.
*/
if ((rt = ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) {
int sbrtextsize, iprtextsize;
sbrtextsize = mp->m_sb.sb_rextsize;
iprtextsize =
ip->i_d.di_extsize ? ip->i_d.di_extsize : sbrtextsize;
numrtextents = (count_fsb + iprtextsize - 1);
do_div(numrtextents, sbrtextsize);
datablocks = 0;
} else {
datablocks = count_fsb;
numrtextents = 0;
}
/*
* allocate and setup the transaction
*/
xfs_iunlock(ip, XFS_ILOCK_EXCL);
tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
resblks = XFS_DIOSTRAT_SPACE_RES(mp, datablocks);
error = xfs_trans_reserve(tp, resblks,
XFS_WRITE_LOG_RES(mp), numrtextents,
XFS_TRANS_PERM_LOG_RES,
XFS_WRITE_LOG_COUNT);
/*
* If we have extents that are allocated for this range,
* return them.
* check for running out of space
*/
if (error)
/*
* Free the transaction structure.
*/
xfs_trans_cancel(tp, 0);
xfs_ilock(ip, XFS_ILOCK_EXCL);
found = 0;
error = xfs_iomap_read(io, offset, count, flags, pbmapp, npbmaps);
if (error)
goto out;
goto error_out; /* Don't return in above if .. trans ..,
need lock to return */
if (XFS_IS_QUOTA_ON(mp)) {
if (xfs_trans_reserve_blkquota(tp, ip, resblks)) {
error = (EDQUOT);
goto error1;
}
}
nimaps = 1;
bmapi_flag = XFS_BMAPI_WRITE;
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
if (offset < ip->i_d.di_size || rt)
bmapi_flag |= XFS_BMAPI_PREALLOC;
/*
* If we found mappings and they can just have data written
* without conversion,
* let the caller write these and call us again.
*
* If we have a HOLE or UNWRITTEN, proceed down lower to
* get the space or to convert to written.
* issue the bmapi() call to allocate the blocks
*/
XFS_BMAP_INIT(&free_list, &firstfsb);
imapp = &imap[0];
error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,
bmapi_flag, &firstfsb, 0, imapp, &nimaps, &free_list);
if (error) {
goto error0;
}
/*
* complete the transaction
*/
if (*npbmaps) {
if (!(pbmapp->pbm_flags & PBMF_HOLE)) {
*npbmaps = 1; /* Only checked the first one. */
/* We could check more, ... */
goto out;
}
error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed);
if (error) {
goto error0;
}
found = *npbmaps;
*npbmaps = maps; /* Restore to original requested */
if (ioflag & PBF_DIRECT) {
error = xfs_iomap_write_direct(io, offset, count, pbmapp,
npbmaps, ioflag, found);
} else {
error = xfs_iomap_write_delay(io, offset, count, pbmapp,
npbmaps, ioflag, found);
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
if (error) {
goto error_out;
}
out:
XFS_IUNLOCK(io->io_mount, io, XFS_ILOCK_EXCL);
/* copy any maps to caller's array and return any error. */
if (nimaps == 0) {
error = (ENOSPC);
goto error_out;
}
*ret_imap = imap[0];
*nmaps = 1;
return 0;
error0: /* Cancel bmap, unlock inode, and cancel trans */
xfs_bmap_cancel(&free_list);
error1: /* Just cancel transaction */
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
*nmaps = 0; /* nothing set-up here */
error_out:
return XFS_ERROR(error);
}
STATIC int
int
xfs_iomap_write_delay(
xfs_iocore_t *io,
xfs_inode_t *ip,
loff_t offset,
size_t count,
page_buf_bmap_t *pbmapp,
int *npbmaps,
int ioflag,
int found)
xfs_bmbt_irec_t *ret_imap,
int *nmaps)
{
xfs_mount_t *mp = ip->i_mount;
xfs_iocore_t *io = &ip->i_iocore;
xfs_fileoff_t offset_fsb;
xfs_fileoff_t ioalign;
xfs_fileoff_t last_fsb;
xfs_fileoff_t start_fsb;
xfs_filblks_t count_fsb;
xfs_off_t aligned_offset;
xfs_fsize_t isize;
xfs_fsblock_t firstblock;
int nimaps;
int error;
int n;
unsigned int iosize;
xfs_mount_t *mp;
#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];
int aeof;
int fsynced = 0;
ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);
ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
mp = io->io_mount;
/*
* Make sure that the dquots are there. This doesn't hold
* the ilock across a disk read.
*/
if (XFS_IS_QUOTA_ON(mp) && XFS_NOT_DQATTACHED(mp, ip)) {
if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_ILOCKED))) {
return XFS_ERROR(error);
}
}
isize = XFS_SIZE(mp, io);
retry:
isize = ip->i_d.di_size;
if (io->io_new_size > isize) {
isize = io->io_new_size;
}
......@@ -591,50 +435,22 @@ xfs_iomap_write_delay(
* then extend the allocation (and the buffer used for the write)
* out to the file system's write iosize. We clean up any extra
* space left over when the file is closed in xfs_inactive().
* We can only do this if we are sure that we will create buffers
* over all of the space we allocate beyond the end of the file.
* Not doing so would allow us to create delalloc blocks with
* no pages in memory covering them. So, we need to check that
* there are not any real blocks in the area beyond the end of
* the file which we are optimistically going to preallocate. If
* there are then our buffers will stop when they encounter them
* and we may accidentally create delalloc blocks beyond them
* that we never cover with a buffer. All of this is because
* we are not actually going to write the extra blocks preallocated
* at this point.
*
* We don't bother with this for sync writes, because we need
* to minimize the amount we write for good performance.
*/
if (!(ioflag & PBF_SYNC) && ((offset + count) > XFS_SIZE(mp, io))) {
start_fsb = XFS_B_TO_FSBT(mp,
((xfs_ufsize_t)(offset + count - 1)));
count_fsb = mp->m_writeio_blocks;
while (count_fsb > 0) {
nimaps = XFS_WRITE_IMAPS;
error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb,
0, NULL, 0, imap, &nimaps,
NULL);
if (error) {
return error;
}
for (n = 0; n < nimaps; n++) {
if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
(imap[n].br_startblock != DELAYSTARTBLOCK)) {
goto write_map;
}
start_fsb += imap[n].br_blockcount;
count_fsb -= imap[n].br_blockcount;
ASSERT(count_fsb < 0xffff000);
}
}
if (!(ioflag & PBF_SYNC) && ((offset + count) > ip->i_d.di_size)) {
xfs_off_t aligned_offset;
unsigned int iosize;
xfs_fileoff_t ioalign;
iosize = mp->m_writeio_blocks;
aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
last_fsb = ioalign + iosize;
aeof = 1;
}
write_map:
nimaps = XFS_WRITE_IMAPS;
firstblock = NULLFSBLOCK;
......@@ -642,11 +458,11 @@ xfs_iomap_write_delay(
* roundup the allocation request to m_dalign boundary if file size
* is greater that 512K and we are allocating past the allocation eof
*/
if (mp->m_dalign && (XFS_SIZE(mp, io) >= mp->m_dalign) && aeof) {
if (mp->m_dalign && (isize >= mp->m_dalign) && aeof) {
int eof;
xfs_fileoff_t new_last_fsb;
new_last_fsb = roundup_64(last_fsb, mp->m_dalign);
error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof);
error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
if (error) {
return error;
}
......@@ -655,7 +471,7 @@ xfs_iomap_write_delay(
}
}
error = XFS_BMAPI(mp, NULL, io, offset_fsb,
error = xfs_bmapi(NULL, ip, offset_fsb,
(xfs_filblks_t)(last_fsb - offset_fsb),
XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |
XFS_BMAPI_ENTIRE, &firstblock, 1, imap,
......@@ -663,235 +479,257 @@ xfs_iomap_write_delay(
/*
* This can be EDQUOT, if nimaps == 0
*/
if (error) {
if (error && (error != ENOSPC)) {
return XFS_ERROR(error);
}
/*
* If bmapi returned us nothing, and if we didn't get back EDQUOT,
* then we must have run out of space.
*/
if (nimaps == 0) {
return XFS_ERROR(ENOSPC);
if (xfs_flush_space(ip, &fsynced, &ioflag))
return XFS_ERROR(ENOSPC);
error = 0;
goto retry;
}
/*
* Now map our desired I/O size and alignment over the
* extents returned by xfs_bmapi().
*/
*npbmaps = _xfs_imap_to_bmap(io, offset, imap, pbmapp,
nimaps, *npbmaps);
*ret_imap = imap[0];
*nmaps = 1;
return 0;
}
STATIC int
xfs_iomap_write_direct(
xfs_iocore_t *io,
loff_t offset,
size_t count,
page_buf_bmap_t *pbmapp,
int *npbmaps,
int ioflag,
int found)
/*
* Pass in a delayed allocate extent, convert it to real extents;
* return to the caller the extent we create which maps on top of
* the originating callers request.
*
* Called without a lock on the inode.
*/
int
xfs_iomap_write_allocate(
xfs_inode_t *ip,
xfs_bmbt_irec_t *map,
int *retmap)
{
xfs_inode_t *ip = XFS_IO_INODE(io);
xfs_mount_t *mp;
xfs_fileoff_t offset_fsb;
xfs_fileoff_t last_fsb;
xfs_mount_t *mp = ip->i_mount;
xfs_fileoff_t offset_fsb, last_block;
xfs_fileoff_t end_fsb, map_start_fsb;
xfs_fsblock_t first_block;
xfs_bmap_free_t free_list;
xfs_filblks_t count_fsb;
xfs_fsize_t isize;
xfs_fsblock_t firstfsb;
int nimaps, maps;
int error;
xfs_bmbt_irec_t imap[XFS_STRAT_WRITE_IMAPS];
xfs_trans_t *tp;
int i, nimaps, committed;
int error = 0;
#define XFS_WRITE_IMAPS XFS_BMAP_MAX_NMAP
xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS], *imapp;
xfs_bmap_free_t free_list;
int aeof;
int bmapi_flags;
xfs_filblks_t datablocks;
int rt;
int committed;
int numrtextents;
uint resblks;
int rtextsize;
*retmap = 0;
maps = min(XFS_WRITE_IMAPS, *npbmaps);
nimaps = maps;
/*
* Make sure that the dquots are there.
*/
mp = io->io_mount;
isize = XFS_SIZE(mp, io);
if (io->io_new_size > isize)
isize = io->io_new_size;
if (XFS_IS_QUOTA_ON(mp) && XFS_NOT_DQATTACHED(mp, ip)) {
if ((error = xfs_qm_dqattach(ip, 0))) {
return XFS_ERROR(error);
}
}
aeof = ((offset + count) > isize) ? 1 : 0;
offset_fsb = map->br_startoff;
count_fsb = map->br_blockcount;
map_start_fsb = offset_fsb;
offset_fsb = XFS_B_TO_FSBT(mp, offset);
last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
count_fsb = last_fsb - offset_fsb;
if (found && (pbmapp->pbm_flags & PBMF_HOLE)) {
xfs_fileoff_t map_last_fsb;
XFS_STATS_ADD(xfsstats.xs_xstrat_bytes, XFS_FSB_TO_B(mp, count_fsb));
map_last_fsb = XFS_B_TO_FSB(mp,
(pbmapp->pbm_bsize + pbmapp->pbm_offset));
while (count_fsb != 0) {
/*
* Set up a transaction with which to allocate the
* backing store for the file. Do allocations in a
* loop until we get some space in the range we are
* interested in. The other space that might be allocated
* is in the delayed allocation extent on which we sit
* but before our buffer starts.
*/
if (map_last_fsb < last_fsb) {
last_fsb = map_last_fsb;
count_fsb = last_fsb - offset_fsb;
}
ASSERT(count_fsb > 0);
}
nimaps = 0;
while (nimaps == 0) {
tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
error = xfs_trans_reserve(tp, 0, XFS_WRITE_LOG_RES(mp),
0, XFS_TRANS_PERM_LOG_RES,
XFS_WRITE_LOG_COUNT);
if (error) {
xfs_trans_cancel(tp, 0);
return XFS_ERROR(error);
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
/*
* roundup the allocation request to m_dalign boundary if file size
* is greater that 512K and we are allocating past the allocation eof
*/
if (!found && mp->m_dalign && (isize >= 524288) && aeof) {
int eof;
xfs_fileoff_t new_last_fsb;
XFS_BMAP_INIT(&free_list, &first_block);
new_last_fsb = roundup_64(last_fsb, mp->m_dalign);
printk("xfs_iomap_write_direct: about to XFS_BMAP_EOF %Ld\n",
new_last_fsb);
error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof);
if (error)
goto error_out;
if (eof)
last_fsb = new_last_fsb;
}
nimaps = XFS_STRAT_WRITE_IMAPS;
/*
* Ensure we don't go beyond eof - it is possible
* the extents changed since we did the read call,
* we dropped the ilock in the interim.
*/
bmapi_flags = XFS_BMAPI_WRITE|XFS_BMAPI_DIRECT_IO|XFS_BMAPI_ENTIRE;
bmapi_flags &= ~XFS_BMAPI_DIRECT_IO;
end_fsb = XFS_B_TO_FSB(mp, ip->i_d.di_size);
xfs_bmap_last_offset(NULL, ip, &last_block,
XFS_DATA_FORK);
last_block = XFS_FILEOFF_MAX(last_block, end_fsb);
if ((map_start_fsb + count_fsb) > last_block) {
count_fsb = last_block - map_start_fsb;
if (count_fsb == 0) {
error = EAGAIN;
goto trans_cancel;
}
}
/*
* determine if this is a realtime file
*/
if ((rt = (ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) != 0) {
rtextsize = mp->m_sb.sb_rextsize;
} else
rtextsize = 0;
/* Go get the actual blocks */
error = xfs_bmapi(tp, ip, map_start_fsb, count_fsb,
XFS_BMAPI_WRITE, &first_block, 1,
imap, &nimaps, &free_list);
error = 0;
if (error)
goto trans_cancel;
/*
* allocate file space for the bmapp entries passed in.
*/
error = xfs_bmap_finish(&tp, &free_list,
first_block, &committed);
/*
* determine if reserving space on
* the data or realtime partition.
*/
if (rt) {
numrtextents = (count_fsb + rtextsize - 1);
do_div(numrtextents, rtextsize);
datablocks = 0;
} else {
datablocks = count_fsb;
numrtextents = 0;
}
if (error)
goto trans_cancel;
/*
* allocate and setup the transaction
*/
tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
resblks = XFS_DIOSTRAT_SPACE_RES(mp, datablocks);
error = xfs_trans_commit(tp,
XFS_TRANS_RELEASE_LOG_RES, NULL);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
if (error)
goto error0;
error = xfs_trans_reserve(tp,
resblks,
XFS_WRITE_LOG_RES(mp),
numrtextents,
XFS_TRANS_PERM_LOG_RES,
XFS_WRITE_LOG_COUNT);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
/*
* check for running out of space
*/
if (error) {
/*
* Free the transaction structure.
* See if we were able to allocate an extent that
* covers at least part of the callers request
*/
xfs_trans_cancel(tp, 0);
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
for (i = 0; i < nimaps; i++) {
if ((map->br_startoff >= imap[i].br_startoff) &&
(map->br_startoff < (imap[i].br_startoff +
imap[i].br_blockcount))) {
*map = imap[i];
*retmap = 1;
XFS_STATS_INC(xfsstats.xs_xstrat_quick);
return 0;
}
count_fsb -= imap[i].br_blockcount;
}
if (error) {
goto error_out; /* Don't return in above if .. trans ..,
need lock to return */
/* So far we have not mapped the requested part of the
* file, just surrounding data, try again.
*/
nimaps--;
offset_fsb = imap[nimaps].br_startoff +
imap[nimaps].br_blockcount;
map_start_fsb = offset_fsb;
}
if (XFS_IS_QUOTA_ON(mp)) {
if (xfs_trans_reserve_quota(tp,
ip->i_udquot,
ip->i_gdquot,
resblks, 0, 0)) {
error = (EDQUOT);
goto error1;
}
nimaps = 1;
} else {
nimaps = 2;
}
trans_cancel:
xfs_bmap_cancel(&free_list);
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
error0:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return XFS_ERROR(error);
}
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
int
xfs_iomap_write_unwritten(
xfs_inode_t *ip,
loff_t offset,
size_t count)
{
xfs_mount_t *mp = ip->i_mount;
xfs_trans_t *tp;
xfs_fileoff_t offset_fsb;
xfs_filblks_t count_fsb;
xfs_filblks_t numblks_fsb;
xfs_bmbt_irec_t imap;
int committed;
int error;
int nres;
int nimaps;
xfs_fsblock_t firstfsb;
xfs_bmap_free_t free_list;
/*
* issue the bmapi() call to allocate the blocks
*/
XFS_BMAP_INIT(&free_list, &firstfsb);
imapp = &imap[0];
error = XFS_BMAPI(mp, tp, io, offset_fsb, count_fsb,
bmapi_flags, &firstfsb, 1, imapp, &nimaps, &free_list);
if (error) {
goto error0;
}
offset_fsb = XFS_B_TO_FSBT(mp, offset);
count_fsb = XFS_B_TO_FSB(mp, count);
/*
* complete the transaction
*/
do {
nres = XFS_DIOSTRAT_SPACE_RES(mp, 0);
error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed);
if (error) {
goto error0;
}
/*
* set up a transaction to convert the range of extents
* from unwritten to real. Do allocations in a loop until
* we have covered the range passed in.
*/
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
if (error) {
goto error_out;
}
tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
error = xfs_trans_reserve(tp, nres,
XFS_WRITE_LOG_RES(mp), 0,
XFS_TRANS_PERM_LOG_RES,
XFS_WRITE_LOG_COUNT);
if (error) {
xfs_trans_cancel(tp, 0);
goto error0;
}
/* copy any maps to caller's array and return any error. */
if (nimaps == 0) {
error = (ENOSPC);
goto error_out;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
maps = min(nimaps, maps);
*npbmaps = _xfs_imap_to_bmap(io, offset, &imap[0], pbmapp, maps,
*npbmaps);
if (*npbmaps) {
/*
* this is new since xfs_iomap_read
* didn't find it.
* Modify the unwritten extent state of the buffer.
*/
if (*npbmaps != 1) {
/* NEED MORE WORK FOR MULTIPLE BMAPS (which are new) */
BUG();
}
}
goto out;
XFS_BMAP_INIT(&free_list, &firstfsb);
nimaps = 1;
error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,
XFS_BMAPI_WRITE, &firstfsb,
1, &imap, &nimaps, &free_list);
if (error)
goto error_on_bmapi_transaction;
error0: /* Cancel bmap, unlock inode, and cancel trans */
xfs_bmap_cancel(&free_list);
error = xfs_bmap_finish(&(tp), &(free_list),
firstfsb, &committed);
if (error)
goto error_on_bmapi_transaction;
error1: /* Just cancel transaction */
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
*npbmaps = 0; /* nothing set-up here */
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
if (error)
goto error0;
error_out:
out: /* Just return error and any tracing at end of routine */
if ((numblks_fsb = imap.br_blockcount) == 0) {
/*
* The numblks_fsb value should always get
* smaller, otherwise the loop is stuck.
*/
ASSERT(imap.br_blockcount);
break;
}
offset_fsb += numblks_fsb;
count_fsb -= numblks_fsb;
} while (count_fsb > 0);
return 0;
error_on_bmapi_transaction:
xfs_bmap_cancel(&free_list);
xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT));
xfs_iunlock(ip, XFS_ILOCK_EXCL);
error0:
return XFS_ERROR(error);
}
......@@ -808,6 +808,25 @@ xfs_bdstrat_cb(struct xfs_buf *bp)
}
}
int
xfs_bmap(bhv_desc_t *bdp,
xfs_off_t offset,
ssize_t count,
int flags,
page_buf_bmap_t *pbmapp,
int *npbmaps)
{
xfs_inode_t *ip = XFS_BHVTOI(bdp);
xfs_iocore_t *io = &ip->i_iocore;
ASSERT((ip->i_d.di_mode & IFMT) == IFREG);
ASSERT(((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) != 0) ==
((ip->i_iocore.io_flags & XFS_IOCORE_RT) != 0));
return xfs_iomap(io, offset, count, flags, pbmapp, npbmaps);
}
/*
* Wrapper around bdstrat so that we can stop data
* from going to disk in case we are shutting down the filesystem.
......
......@@ -36,6 +36,8 @@ struct vnode;
struct bhv_desc;
struct xfs_mount;
struct xfs_iocore;
struct xfs_inode;
struct xfs_bmbt_irec;
struct page_buf_s;
struct page_buf_bmap_s;
......@@ -62,6 +64,16 @@ extern ssize_t xfs_sendfile (struct bhv_desc *, struct file *,
loff_t *, size_t, read_actor_t,
void *, struct cred *);
extern int xfs_iomap (struct xfs_iocore *, xfs_off_t, ssize_t, int,
struct page_buf_bmap_s *, int *);
extern int xfs_iomap_write_direct (struct xfs_inode *, loff_t, size_t,
int, struct xfs_bmbt_irec *, int *, int);
extern int xfs_iomap_write_delay (struct xfs_inode *, loff_t, size_t,
int, struct xfs_bmbt_irec *, int *);
extern int xfs_iomap_write_allocate (struct xfs_inode *,
struct xfs_bmbt_irec *, int *);
extern int xfs_iomap_write_unwritten (struct xfs_inode *, loff_t, size_t);
extern int xfs_dev_is_read_only (struct xfs_mount *, char *);
extern void XFS_log_write_unmount_ro (struct bhv_desc *);
......
......@@ -41,14 +41,24 @@ xfs_size_fn(
}
xfs_ioops_t xfs_iocore_xfs = {
.xfs_ioinit = (xfs_ioinit_t) fs_noerr,
.xfs_bmapi_func = (xfs_bmapi_t) xfs_bmapi,
.xfs_bmap_eof_func = (xfs_bmap_eof_t) xfs_bmap_eof,
.xfs_iomap_write_direct =
(xfs_iomap_write_direct_t) xfs_iomap_write_direct,
.xfs_iomap_write_delay =
(xfs_iomap_write_delay_t) xfs_iomap_write_delay,
.xfs_iomap_write_allocate =
(xfs_iomap_write_allocate_t) xfs_iomap_write_allocate,
.xfs_iomap_write_unwritten =
(xfs_iomap_write_unwritten_t) xfs_iomap_write_unwritten,
.xfs_ilock = (xfs_lock_t) xfs_ilock,
.xfs_lck_map_shared = (xfs_lck_map_shared_t) xfs_ilock_map_shared,
.xfs_ilock_demote = (xfs_lock_demote_t) xfs_ilock_demote,
.xfs_ilock_nowait = (xfs_lock_nowait_t) xfs_ilock_nowait,
.xfs_unlock = (xfs_unlk_t) xfs_iunlock,
.xfs_size_func = (xfs_size_t) xfs_size_fn,
.xfs_lastbyte = (xfs_lastbyte_t) xfs_file_last_byte,
.xfs_iodone = (xfs_iodone_t) fs_noerr,
};
void
......@@ -83,4 +93,3 @@ xfs_iocore_inode_init(
xfs_iocore_inode_reinit(ip);
}
......@@ -87,41 +87,60 @@ struct xfs_bmap_free;
#define AIL_LOCK(mp,s) s=mutex_spinlock(&(mp)->m_ail_lock)
#define AIL_UNLOCK(mp,s) mutex_spinunlock(&(mp)->m_ail_lock, s)
/* Prototypes and functions for I/O core modularization, a vector
* of functions is used to indirect from xfs/cxfs independent code
* to the xfs/cxfs dependent code.
* The vector is placed in the mount structure so that we can
* minimize the number of memory indirections involved.
/*
* Prototypes and functions for I/O core modularization.
*/
struct flid;
struct buf;
typedef int (*xfs_ioinit_t)(struct vfs *,
struct xfs_mount_args *, int *);
typedef int (*xfs_bmapi_t)(struct xfs_trans *, void *,
xfs_fileoff_t, xfs_filblks_t, int,
xfs_fsblock_t *, xfs_extlen_t,
struct xfs_bmbt_irec *, int *,
struct xfs_bmap_free *);
typedef int (*xfs_bmap_eof_t)(void *, xfs_fileoff_t, int, int *);
typedef int (*xfs_iomap_write_direct_t)(
void *, loff_t, size_t, int,
struct xfs_bmbt_irec *, int *, int);
typedef int (*xfs_iomap_write_delay_t)(
void *, loff_t, size_t, int,
struct xfs_bmbt_irec *, int *);
typedef int (*xfs_iomap_write_allocate_t)(
void *, struct xfs_bmbt_irec *, int *);
typedef int (*xfs_iomap_write_unwritten_t)(
void *, loff_t, size_t);
typedef uint (*xfs_lck_map_shared_t)(void *);
typedef void (*xfs_lock_t)(void *, uint);
typedef void (*xfs_lock_demote_t)(void *, uint);
typedef int (*xfs_lock_nowait_t)(void *, uint);
typedef void (*xfs_unlk_t)(void *, unsigned int);
typedef void (*xfs_chgtime_t)(void *, int);
typedef xfs_fsize_t (*xfs_size_t)(void *);
typedef xfs_fsize_t (*xfs_lastbyte_t)(void *);
typedef xfs_fsize_t (*xfs_iodone_t)(struct vfs *);
typedef struct xfs_ioops {
xfs_bmapi_t xfs_bmapi_func;
xfs_bmap_eof_t xfs_bmap_eof_func;
xfs_lock_t xfs_ilock;
xfs_lock_demote_t xfs_ilock_demote;
xfs_lock_nowait_t xfs_ilock_nowait;
xfs_unlk_t xfs_unlock;
xfs_chgtime_t xfs_chgtime;
xfs_size_t xfs_size_func;
xfs_lastbyte_t xfs_lastbyte;
xfs_ioinit_t xfs_ioinit;
xfs_bmapi_t xfs_bmapi_func;
xfs_bmap_eof_t xfs_bmap_eof_func;
xfs_iomap_write_direct_t xfs_iomap_write_direct;
xfs_iomap_write_delay_t xfs_iomap_write_delay;
xfs_iomap_write_allocate_t xfs_iomap_write_allocate;
xfs_iomap_write_unwritten_t xfs_iomap_write_unwritten;
xfs_lock_t xfs_ilock;
xfs_lck_map_shared_t xfs_lck_map_shared;
xfs_lock_demote_t xfs_ilock_demote;
xfs_lock_nowait_t xfs_ilock_nowait;
xfs_unlk_t xfs_unlock;
xfs_size_t xfs_size_func;
xfs_iodone_t xfs_iodone;
} xfs_ioops_t;
#define XFS_IOINIT(vfsp, args, flags) \
(*(mp)->m_io_ops.xfs_ioinit)(vfsp, args, flags)
#define XFS_BMAPI(mp, trans,io,bno,len,f,first,tot,mval,nmap,flist) \
(*(mp)->m_io_ops.xfs_bmapi_func) \
(trans,(io)->io_obj,bno,len,f,first,tot,mval,nmap,flist)
......@@ -130,9 +149,31 @@ typedef struct xfs_ioops {
(*(mp)->m_io_ops.xfs_bmap_eof_func) \
((io)->io_obj, endoff, whichfork, eof)
#define XFS_IOMAP_WRITE_DIRECT(mp, io, offset, count, flags, mval, nmap, found)\
(*(mp)->m_io_ops.xfs_iomap_write_direct) \
((io)->io_obj, offset, count, flags, mval, nmap, found)
#define XFS_IOMAP_WRITE_DELAY(mp, io, offset, count, flags, mval, nmap) \
(*(mp)->m_io_ops.xfs_iomap_write_delay) \
((io)->io_obj, offset, count, flags, mval, nmap)
#define XFS_IOMAP_WRITE_ALLOCATE(mp, io, mval, nmap) \
(*(mp)->m_io_ops.xfs_iomap_write_allocate) \
((io)->io_obj, mval, nmap)
#define XFS_IOMAP_WRITE_UNWRITTEN(mp, io, offset, count) \
(*(mp)->m_io_ops.xfs_iomap_write_unwritten) \
((io)->io_obj, offset, count)
#define XFS_LCK_MAP_SHARED(mp, io) \
(*(mp)->m_io_ops.xfs_lck_map_shared)((io)->io_obj)
#define XFS_ILOCK(mp, io, mode) \
(*(mp)->m_io_ops.xfs_ilock)((io)->io_obj, mode)
#define XFS_ILOCK_NOWAIT(mp, io, mode) \
(*(mp)->m_io_ops.xfs_ilock_nowait)((io)->io_obj, mode)
#define XFS_IUNLOCK(mp, io, mode) \
(*(mp)->m_io_ops.xfs_unlock)((io)->io_obj, mode)
......@@ -142,8 +183,13 @@ typedef struct xfs_ioops {
#define XFS_SIZE(mp, io) \
(*(mp)->m_io_ops.xfs_size_func)((io)->io_obj)
#define XFS_LASTBYTE(mp, io) \
(*(mp)->m_io_ops.xfs_lastbyte)((io)->io_obj)
#define XFS_IODONE(vfsp) \
(*(mp)->m_io_ops.xfs_iodone)(vfsp)
/*
* Prototypes and functions for the XFS realtime subsystem.
*/
typedef struct xfs_mount {
......@@ -303,8 +349,8 @@ typedef struct xfs_mount {
/*
* Default minimum read and write sizes.
*/
#define XFS_READIO_LOG_LARGE 12
#define XFS_WRITEIO_LOG_LARGE 12
#define XFS_READIO_LOG_LARGE 16
#define XFS_WRITEIO_LOG_LARGE 16
/*
* Default allocation size
*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment