Commit cff3f611 authored by Stephen Lord's avatar Stephen Lord

Merge bk://kernel.bkbits.net/lord/xfs-2.5

into laptop.americas.sgi.com:/home/lord/src/xfs-2.5
parents 74a6c1d3 6e50b0f1
......@@ -125,7 +125,7 @@ blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
return blockdev_direct_IO(rw, iocb, inode, inode->i_bdev, iov, offset,
nr_segs, blkdev_get_blocks);
nr_segs, blkdev_get_blocks, NULL);
}
static int blkdev_writepage(struct page *page, struct writeback_control *wbc)
......
......@@ -15,6 +15,8 @@
* added support for non-aligned IO.
* 06Nov2002 pbadari@us.ibm.com
* added asynchronous IO support.
* 21Jul2003 nathans@sgi.com
* added IO completion notifier.
*/
#include <linux/kernel.h>
......@@ -74,6 +76,7 @@ struct dio {
int boundary; /* prev block is at a boundary */
int reap_counter; /* rate limit reaping */
get_blocks_t *get_blocks; /* block mapping function */
dio_iodone_t *end_io; /* IO completion function */
sector_t final_block_in_bio; /* current final block in bio + 1 */
sector_t next_block_for_io; /* next block to be put under IO,
in dio_blocks units */
......@@ -192,6 +195,18 @@ static struct page *dio_get_page(struct dio *dio)
return dio->pages[dio->head++];
}
/*
* Called when all DIO BIO I/O has been completed - let the filesystem
* know, if it registered an interest earlier via get_blocks. Pass the
* private field of the map buffer_head so that filesystems can use it
* to hold additional state between get_blocks calls and dio_complete.
*/
static void dio_complete(struct dio *dio, loff_t offset, ssize_t bytes)
{
if (dio->end_io)
dio->end_io(dio->inode, offset, bytes, dio->map_bh.b_private);
}
/*
* Called when a BIO has been processed. If the count goes to zero then IO is
* complete and we can signal this to the AIO layer.
......@@ -199,7 +214,9 @@ static struct page *dio_get_page(struct dio *dio)
static void finished_one_bio(struct dio *dio)
{
if (atomic_dec_and_test(&dio->bio_count)) {
if(dio->is_async) {
if (dio->is_async) {
dio_complete(dio, dio->block_in_file << dio->blkbits,
dio->result);
aio_complete(dio->iocb, dio->result, 0);
kfree(dio);
}
......@@ -824,7 +841,7 @@ static int do_direct_IO(struct dio *dio)
static int
direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
const struct iovec *iov, loff_t offset, unsigned long nr_segs,
unsigned blkbits, get_blocks_t get_blocks)
unsigned blkbits, get_blocks_t get_blocks, dio_iodone_t end_io)
{
unsigned long user_addr;
int seg;
......@@ -852,6 +869,8 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
dio->boundary = 0;
dio->reap_counter = 0;
dio->get_blocks = get_blocks;
dio->end_io = end_io;
dio->map_bh.b_private = NULL;
dio->final_block_in_bio = -1;
dio->next_block_for_io = -1;
......@@ -953,6 +972,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
if (rw == READ && (offset + ret > i_size))
ret = i_size - offset;
}
dio_complete(dio, offset, ret);
kfree(dio);
}
return ret;
......@@ -964,7 +984,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
int
blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, const struct iovec *iov, loff_t offset,
unsigned long nr_segs, get_blocks_t get_blocks)
unsigned long nr_segs, get_blocks_t get_blocks, dio_iodone_t end_io)
{
int seg;
size_t size;
......@@ -999,7 +1019,7 @@ blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
}
retval = direct_io_worker(rw, iocb, inode, iov, offset,
nr_segs, blkbits, get_blocks);
nr_segs, blkbits, get_blocks, end_io);
out:
return retval;
}
......@@ -662,7 +662,7 @@ ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs, ext2_get_blocks);
offset, nr_segs, ext2_get_blocks, NULL);
}
static int
......
......@@ -1562,7 +1562,8 @@ static int ext3_direct_IO(int rw, struct kiocb *iocb,
}
ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs, ext3_direct_io_get_blocks);
offset, nr_segs,
ext3_direct_io_get_blocks, NULL);
out_stop:
if (handle) {
......
......@@ -308,7 +308,7 @@ static int jfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs, jfs_get_blocks);
offset, nr_segs, jfs_get_blocks, NULL);
}
struct address_space_operations jfs_aops = {
......
......@@ -76,10 +76,10 @@ linvfs_unwritten_done(
/*
* Issue transactions to convert a buffer range from unwritten
* to written extents.
* to written extents (buffered IO).
*/
STATIC void
linvfs_unwritten_conv(
linvfs_unwritten_convert(
xfs_buf_t *bp)
{
vnode_t *vp = XFS_BUF_FSPRIVATE(bp, vnode_t *);
......@@ -97,6 +97,30 @@ linvfs_unwritten_conv(
pagebuf_iodone(bp, 0, 0);
}
/*
* Issue transactions to convert a buffer range from unwritten
* to written extents (direct IO).
*/
STATIC void
linvfs_unwritten_convert_direct(
struct inode *inode,
loff_t offset,
ssize_t size,
void *private)
{
ASSERT(!private || inode == (struct inode *)private);
/* private indicates an unwritten extent lay beneath this IO,
* see linvfs_get_block_core.
*/
if (private && size > 0) {
vnode_t *vp = LINVFS_GET_VP(inode);
int error;
VOP_BMAP(vp, offset, size, BMAP_UNWRITTEN, NULL, NULL, error);
}
}
STATIC int
map_blocks(
struct inode *inode,
......@@ -465,7 +489,7 @@ map_unwritten(
XFS_BUF_SET_SIZE(pb, size);
XFS_BUF_SET_OFFSET(pb, offset);
XFS_BUF_SET_FSPRIVATE(pb, LINVFS_GET_VP(inode));
XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_conv);
XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_convert);
if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
pagebuf_iodone(pb, 1, 1);
......@@ -813,7 +837,7 @@ STATIC int
linvfs_get_block_core(
struct inode *inode,
sector_t iblock,
int blocks,
unsigned long blocks,
struct buffer_head *bh_result,
int create,
int direct,
......@@ -863,8 +887,11 @@ linvfs_get_block_core(
set_buffer_mapped(bh_result);
}
if (pbmap.pbm_flags & PBMF_UNWRITTEN) {
if (create)
if (create) {
if (direct)
bh_result->b_private = inode;
set_buffer_mapped(bh_result);
}
set_buffer_unwritten(bh_result);
set_buffer_delay(bh_result);
}
......@@ -876,7 +903,7 @@ linvfs_get_block_core(
*/
if (create &&
((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
(offset >= inode->i_size))) {
(offset >= inode->i_size) || (pbmap.pbm_flags & PBMF_NEW))) {
set_buffer_new(bh_result);
}
......@@ -941,11 +968,21 @@ linvfs_direct_IO(
loff_t offset,
unsigned long nr_segs)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
vnode_t *vp = LINVFS_GET_VP(inode);
page_buf_bmap_t pbmap;
int maps = 1;
int error;
return blockdev_direct_IO(rw, iocb, inode, NULL,
iov, offset, nr_segs, linvfs_get_blocks_direct);
VOP_BMAP(vp, offset, 0, BMAP_DEVICE, &pbmap, &maps, error);
if (error)
return -error;
return blockdev_direct_IO(rw, iocb, inode, pbmap.pbm_target->pbr_bdev,
iov, offset, nr_segs,
linvfs_get_blocks_direct,
linvfs_unwritten_convert_direct);
}
......@@ -958,11 +995,6 @@ linvfs_bmap(
vnode_t *vp = LINVFS_GET_VP(inode);
int error;
/* block - Linux disk blocks 512b */
/* bmap input offset - bytes 1b */
/* bmap output bn - XFS BBs 512b */
/* bmap output delta - bytes 1b */
vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address);
VOP_RWLOCK(vp, VRWLOCK_READ);
......
......@@ -59,105 +59,102 @@ static struct vm_operations_struct linvfs_file_vm_ops;
STATIC ssize_t
linvfs_readv(
struct file *filp,
const struct iovec *iovp,
unsigned long nr_segs,
loff_t *ppos)
linvfs_read(
struct kiocb *iocb,
char __user *buf,
size_t count,
loff_t pos)
{
vnode_t *vp = LINVFS_GET_VP(filp->f_dentry->d_inode);
struct iovec iov = {buf, count};
vnode_t *vp;
int error;
VOP_READ(vp, filp, iovp, nr_segs, ppos, NULL, error);
BUG_ON(iocb->ki_pos != pos);
vp = LINVFS_GET_VP(iocb->ki_filp->f_dentry->d_inode);
VOP_READ(vp, iocb, &iov, 1, &iocb->ki_pos, NULL, error);
return error;
}
STATIC ssize_t
linvfs_writev(
struct file *filp,
const struct iovec *iovp,
unsigned long nr_segs,
loff_t *ppos)
linvfs_write(
struct kiocb *iocb,
const char *buf,
size_t count,
loff_t pos)
{
struct inode *inode = filp->f_dentry->d_inode;
vnode_t *vp = LINVFS_GET_VP(inode);
int error = filp->f_error;
struct iovec iov = {(void *)buf, count};
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
vnode_t *vp = LINVFS_GET_VP(inode);
int error;
int direct = file->f_flags & O_DIRECT;
if (unlikely(error)) {
filp->f_error = 0;
return error;
}
BUG_ON(iocb->ki_pos != pos);
/*
* We allow multiple direct writers in, there is no
* potential call to vmtruncate in that path.
*/
if (filp->f_flags & O_DIRECT) {
VOP_WRITE(vp, filp, iovp, nr_segs, ppos, NULL, error);
if (direct) {
VOP_WRITE(vp, iocb, &iov, 1, &iocb->ki_pos, NULL, error);
} else {
down(&inode->i_sem);
VOP_WRITE(vp, filp, iovp, nr_segs, ppos, NULL, error);
VOP_WRITE(vp, iocb, &iov, 1, &iocb->ki_pos, NULL, error);
up(&inode->i_sem);
}
return error;
}
STATIC ssize_t
linvfs_read(
struct file *filp,
char *buf,
size_t count,
linvfs_readv(
struct file *file,
const struct iovec *iov,
unsigned long nr_segs,
loff_t *ppos)
{
struct iovec iov = {buf, count};
struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
vnode_t *vp = LINVFS_GET_VP(inode);
struct kiocb kiocb;
int error;
return linvfs_readv(filp, &iov, 1, ppos);
}
init_sync_kiocb(&kiocb, file);
kiocb.ki_pos = *ppos;
VOP_READ(vp, &kiocb, iov, nr_segs, &kiocb.ki_pos, NULL, error);
if (-EIOCBQUEUED == error)
error = wait_on_sync_kiocb(&kiocb);
*ppos = kiocb.ki_pos;
return error;
}
STATIC ssize_t
linvfs_write(
linvfs_writev(
struct file *file,
const char *buf,
size_t count,
const struct iovec *iov,
unsigned long nr_segs,
loff_t *ppos)
{
struct iovec iov = {(void *)buf, count};
return linvfs_writev(file, &iov, 1, ppos);
}
STATIC ssize_t
linvfs_aio_read(
struct kiocb *iocb,
char *buf,
size_t count,
loff_t pos)
{
struct iovec iov = {buf, count};
return linvfs_readv(iocb->ki_filp, &iov, 1, &iocb->ki_pos);
}
struct inode *inode = file->f_dentry->d_inode->i_mapping->host;
vnode_t *vp = LINVFS_GET_VP(inode);
struct kiocb kiocb;
int error;
int direct = file->f_flags & O_DIRECT;
STATIC ssize_t
linvfs_aio_write(
struct kiocb *iocb,
const char *buf,
size_t count,
loff_t pos)
{
struct iovec iov = {(void *)buf, count};
init_sync_kiocb(&kiocb, file);
kiocb.ki_pos = *ppos;
if (direct) {
VOP_WRITE(vp, &kiocb, iov, nr_segs, &kiocb.ki_pos, NULL, error);
} else {
down(&inode->i_sem);
VOP_WRITE(vp, &kiocb, iov, nr_segs, &kiocb.ki_pos, NULL, error);
up(&inode->i_sem);
}
if (-EIOCBQUEUED == error)
error = wait_on_sync_kiocb(&kiocb);
*ppos = kiocb.ki_pos;
return linvfs_writev(iocb->ki_filp, &iov, 1, &iocb->ki_pos);
return error;
}
STATIC ssize_t
linvfs_sendfile(
struct file *filp,
......@@ -381,12 +378,12 @@ linvfs_mprotect(
struct file_operations linvfs_file_operations = {
.llseek = generic_file_llseek,
.read = linvfs_read,
.write = linvfs_write,
.read = do_sync_read,
.write = do_sync_write,
.readv = linvfs_readv,
.writev = linvfs_writev,
.aio_read = linvfs_aio_read,
.aio_write = linvfs_aio_write,
.aio_read = linvfs_read,
.aio_write = linvfs_write,
.sendfile = linvfs_sendfile,
.ioctl = linvfs_ioctl,
.mmap = linvfs_file_mmap,
......
......@@ -624,15 +624,11 @@ xfs_ioctl(
case XFS_IOC_DIOINFO: {
struct dioattr da;
pb_target_t *target =
(ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
mp->m_rtdev_targp : mp->m_ddev_targp;
/*
* this only really needs to be BBSIZE.
* it is set to the file system block size to
* avoid having to do block zeroing on short writes.
*/
da.d_miniosz = mp->m_sb.sb_blocksize;
da.d_mem = mp->m_sb.sb_blocksize;
da.d_mem = da.d_miniosz = 1 << target->pbr_sshift;
/* The size dio will do in one go */
da.d_maxiosz = 64 * PAGE_CACHE_SIZE;
......
......@@ -77,6 +77,7 @@ STATIC int
_xfs_imap_to_bmap(
xfs_iocore_t *io,
xfs_off_t offset,
int new,
xfs_bmbt_irec_t *imap,
page_buf_bmap_t *pbmapp,
int imaps, /* Number of imap entries */
......@@ -117,6 +118,9 @@ _xfs_imap_to_bmap(
pbmapp->pbm_flags |= PBMF_EOF;
}
if (new)
pbmapp->pbm_flags |= PBMF_NEW;
offset += pbmapp->pbm_bsize - pbmapp->pbm_delta;
}
return pbm; /* Return the number filled */
......@@ -134,16 +138,18 @@ xfs_iomap(
xfs_mount_t *mp = io->io_mount;
xfs_fileoff_t offset_fsb, end_fsb;
int error = 0;
int new = 0;
int lockmode = 0;
xfs_bmbt_irec_t imap;
int nimaps = 1;
int bmap_flags = 0;
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
return -XFS_ERROR(EIO);
switch (flags &
(BMAP_READ|BMAP_WRITE|BMAP_ALLOCATE|BMAP_UNWRITTEN)) {
(BMAP_READ | BMAP_WRITE | BMAP_ALLOCATE |
BMAP_UNWRITTEN | BMAP_DEVICE)) {
case BMAP_READ:
lockmode = XFS_LCK_MAP_SHARED(mp, io);
bmap_flags = XFS_BMAPI_ENTIRE;
......@@ -168,6 +174,13 @@ xfs_iomap(
break;
case BMAP_UNWRITTEN:
goto phase2;
case BMAP_DEVICE:
lockmode = XFS_LCK_MAP_SHARED(mp, io);
pbmapp->pbm_target = io->io_flags & XFS_IOCORE_RT ?
mp->m_rtdev_targp : mp->m_ddev_targp;
error = 0;
*npbmaps = 1;
goto out;
default:
BUG();
}
......@@ -200,6 +213,7 @@ xfs_iomap(
error = XFS_IOMAP_WRITE_DELAY(mp, io, offset, count,
flags, &imap, &nimaps);
}
new = 1;
break;
case BMAP_ALLOCATE:
/* If we found an extent, return it */
......@@ -219,8 +233,8 @@ xfs_iomap(
}
if (nimaps) {
*npbmaps = _xfs_imap_to_bmap(io, offset, &imap,
pbmapp, nimaps, *npbmaps);
*npbmaps = _xfs_imap_to_bmap(io, offset, new, &imap,
pbmapp, nimaps, *npbmaps);
} else if (npbmaps) {
*npbmaps = 0;
}
......
......@@ -127,8 +127,8 @@ xfs_iozero(
if (!status) {
pos += bytes;
count -= bytes;
if (pos > ip->i_size)
ip->i_size = pos < end_size ? pos : end_size;
if (pos > i_size_read(ip))
i_size_write(ip, pos < end_size ? pos : end_size);
}
unlock:
......@@ -145,12 +145,13 @@ xfs_iozero(
ssize_t /* bytes read, or (-) error */
xfs_read(
bhv_desc_t *bdp,
struct file *filp,
struct kiocb *iocb,
const struct iovec *iovp,
unsigned long segs,
loff_t *offp,
unsigned int segs,
loff_t *offset,
cred_t *credp)
{
struct file *file = iocb->ki_filp;
size_t size = 0;
ssize_t ret;
xfs_fsize_t n;
......@@ -158,8 +159,8 @@ xfs_read(
xfs_mount_t *mp;
vnode_t *vp;
unsigned long seg;
int direct = (filp->f_flags & O_DIRECT);
int invisible = (filp->f_mode & FINVIS);
int direct = (file->f_flags & O_DIRECT);
int invisible = (file->f_mode & FINVIS);
ip = XFS_BHVTOI(bdp);
vp = BHV_TO_VNODE(bdp);
......@@ -179,33 +180,23 @@ xfs_read(
size += iv->iov_len;
if (unlikely((ssize_t)(size|iv->iov_len) < 0))
return XFS_ERROR(-EINVAL);
if (direct) { /* XFS specific check */
if ((__psint_t)iv->iov_base & BBMASK) {
if (*offp == ip->i_d.di_size)
return 0;
return XFS_ERROR(-EINVAL);
}
}
if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len))
continue;
if (seg == 0)
return XFS_ERROR(-EFAULT);
segs = seg;
break;
}
/* END copy & waste from filemap.c */
if (direct) {
if ((*offp & mp->m_blockmask) ||
(size & mp->m_blockmask)) {
if (*offp == ip->i_d.di_size) {
pb_target_t *target =
(ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
mp->m_rtdev_targp : mp->m_ddev_targp;
if ((*offset & target->pbr_smask) ||
(size & target->pbr_smask)) {
if (*offset == ip->i_d.di_size) {
return (0);
}
return -XFS_ERROR(EINVAL);
}
}
n = XFS_MAXIOFFSET(mp) - *offp;
n = XFS_MAXIOFFSET(mp) - *offset;
if ((n <= 0) || (size == 0))
return 0;
......@@ -216,21 +207,27 @@ xfs_read(
return -EIO;
}
/* OK so we are holding the I/O lock for the duration
* of the submission, then what happens if the I/O
* does not really happen here, but is scheduled
* later?
*/
xfs_ilock(ip, XFS_IOLOCK_SHARED);
if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ) && !invisible) {
int error;
vrwlock_t locktype = VRWLOCK_READ;
error = XFS_SEND_DATA(mp, DM_EVENT_READ, bdp, *offp, size,
FILP_DELAY_FLAG(filp), &locktype);
error = XFS_SEND_DATA(mp, DM_EVENT_READ, bdp, *offset, size,
FILP_DELAY_FLAG(file), &locktype);
if (error) {
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
return -error;
}
}
ret = generic_file_readv(filp, iovp, segs, offp);
/* We need to deal with the iovec case seperately here */
ret = __generic_file_aio_read(iocb, iovp, segs, offset);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
XFS_STATS_ADD(xfsstats.xs_read_bytes, ret);
......@@ -245,7 +242,7 @@ ssize_t
xfs_sendfile(
bhv_desc_t *bdp,
struct file *filp,
loff_t *offp,
loff_t *offset,
size_t count,
read_actor_t actor,
void *target,
......@@ -265,7 +262,7 @@ xfs_sendfile(
XFS_STATS_INC(xfsstats.xs_read_calls);
n = XFS_MAXIOFFSET(mp) - *offp;
n = XFS_MAXIOFFSET(mp) - *offset;
if ((n <= 0) || (count == 0))
return 0;
......@@ -280,14 +277,14 @@ xfs_sendfile(
vrwlock_t locktype = VRWLOCK_READ;
int error;
error = XFS_SEND_DATA(mp, DM_EVENT_READ, bdp, *offp, count,
error = XFS_SEND_DATA(mp, DM_EVENT_READ, bdp, *offset, count,
FILP_DELAY_FLAG(filp), &locktype);
if (error) {
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
return -error;
}
}
ret = generic_file_sendfile(filp, offp, count, actor, target);
ret = generic_file_sendfile(filp, offset, count, actor, target);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
XFS_STATS_ADD(xfsstats.xs_read_bytes, ret);
......@@ -516,12 +513,13 @@ xfs_zero_eof(
ssize_t /* bytes written, or (-) error */
xfs_write(
bhv_desc_t *bdp,
struct file *file,
struct kiocb *iocb,
const struct iovec *iovp,
unsigned long segs,
unsigned int segs,
loff_t *offset,
cred_t *credp)
{
struct file *file = iocb->ki_filp;
size_t size = 0;
xfs_inode_t *xip;
xfs_mount_t *mp;
......@@ -555,16 +553,6 @@ xfs_write(
size += iv->iov_len;
if (unlikely((ssize_t)(size|iv->iov_len) < 0))
return XFS_ERROR(-EINVAL);
if (direct) { /* XFS specific check */
if ((__psint_t)iv->iov_base & BBMASK)
return XFS_ERROR(-EINVAL);
}
if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
continue;
if (seg == 0)
return XFS_ERROR(-EFAULT);
segs = seg;
break;
}
/* END copy & waste from filemap.c */
......@@ -576,13 +564,17 @@ xfs_write(
xfs_check_frozen(mp, bdp, XFS_FREEZE_WRITE);
if (XFS_FORCED_SHUTDOWN(xip->i_mount)) {
if (XFS_FORCED_SHUTDOWN(mp)) {
return -EIO;
}
if (direct) {
if ((*offset & mp->m_blockmask) ||
(size & mp->m_blockmask)) {
pb_target_t *target =
(xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
mp->m_rtdev_targp : mp->m_ddev_targp;
if ((*offset & target->pbr_smask) ||
(size & target->pbr_smask)) {
return XFS_ERROR(-EINVAL);
}
iolock = XFS_IOLOCK_SHARED;
......@@ -694,7 +686,7 @@ xfs_write(
xfs_inval_cached_pages(vp, &xip->i_iocore, *offset, 1, 1);
}
ret = generic_file_write_nolock(file, iovp, segs, offset);
ret = generic_file_aio_write_nolock(iocb, iovp, segs, offset);
if ((ret == -ENOSPC) &&
DM_EVENT_ENABLED(vp->v_vfsp, xip, DM_EVENT_NOSPACE) && !invisible) {
......@@ -711,25 +703,26 @@ xfs_write(
}
if (ret <= 0) {
xfs_rwunlock(bdp, locktype);
return ret;
}
XFS_STATS_ADD(xfsstats.xs_write_bytes, ret);
if (*offset > xip->i_d.di_size) {
xfs_ilock(xip, XFS_ILOCK_EXCL);
if (*offset > xip->i_d.di_size) {
struct inode *inode = LINVFS_GET_IP(vp);
inode->i_size = xip->i_d.di_size = *offset;
xip->i_d.di_size = *offset;
i_size_write(inode, *offset);
xip->i_update_core = 1;
xip->i_update_size = 1;
}
xfs_iunlock(xip, XFS_ILOCK_EXCL);
}
if (ret <= 0) {
xfs_rwunlock(bdp, locktype);
return ret;
}
XFS_STATS_ADD(xfsstats.xs_write_bytes, ret);
/* Handle various SYNC-type writes */
if ((file->f_flags & O_SYNC) || IS_SYNC(file->f_dentry->d_inode)) {
......
......@@ -54,11 +54,11 @@ extern int xfs_bdstrat_cb(struct page_buf_s *);
extern int xfs_zero_eof(struct vnode *, struct xfs_iocore *, xfs_off_t,
xfs_fsize_t, xfs_fsize_t);
extern ssize_t xfs_read(struct bhv_desc *, struct file *,
const struct iovec *, unsigned long,
extern ssize_t xfs_read(struct bhv_desc *, struct kiocb *,
const struct iovec *, unsigned int,
loff_t *, struct cred *);
extern ssize_t xfs_write(struct bhv_desc *, struct file *,
const struct iovec *, unsigned long,
extern ssize_t xfs_write(struct bhv_desc *, struct kiocb *,
const struct iovec *, unsigned int,
loff_t *, struct cred *);
extern ssize_t xfs_sendfile(struct bhv_desc *, struct file *,
loff_t *, size_t, read_actor_t,
......
......@@ -158,11 +158,11 @@ typedef enum vchange {
typedef int (*vop_open_t)(bhv_desc_t *, struct cred *);
typedef ssize_t (*vop_read_t)(bhv_desc_t *, struct file *,
const struct iovec *, unsigned long,
typedef ssize_t (*vop_read_t)(bhv_desc_t *, struct kiocb *,
const struct iovec *, unsigned int,
loff_t *, struct cred *);
typedef ssize_t (*vop_write_t)(bhv_desc_t *, struct file *,
const struct iovec *, unsigned long,
typedef ssize_t (*vop_write_t)(bhv_desc_t *, struct kiocb *,
const struct iovec *, unsigned int,
loff_t *, struct cred *);
typedef ssize_t (*vop_sendfile_t)(bhv_desc_t *, struct file *,
loff_t *, size_t, read_actor_t,
......
......@@ -79,8 +79,9 @@ typedef enum { /* pbm_flags values */
PBMF_EOF = 0x01, /* mapping contains EOF */
PBMF_HOLE = 0x02, /* mapping covers a hole */
PBMF_DELAY = 0x04, /* mapping covers delalloc region */
PBMF_UNWRITTEN = 0x20 /* mapping covers allocated */
PBMF_UNWRITTEN = 0x20, /* mapping covers allocated */
/* but uninitialized file data */
PBMF_NEW = 0x40 /* just allocated */
} bmap_flags_t;
typedef enum {
......@@ -95,6 +96,7 @@ typedef enum {
BMAP_MMAP = (1 << 6), /* allocate for mmap write */
BMAP_SYNC = (1 << 7), /* sync write */
BMAP_TRYLOCK = (1 << 8), /* non-blocking request */
BMAP_DEVICE = (1 << 9), /* we only want to know the device */
} bmapi_flags_t;
typedef enum page_buf_flags_e { /* pb_flags values */
......
......@@ -219,6 +219,8 @@ typedef int (get_block_t)(struct inode *inode, sector_t iblock,
typedef int (get_blocks_t)(struct inode *inode, sector_t iblock,
unsigned long max_blocks,
struct buffer_head *bh_result, int create);
typedef void (dio_iodone_t)(struct inode *inode, loff_t offset,
ssize_t bytes, void *private);
/*
* Attribute flags. These should be or-ed together to figure out what
......@@ -1274,6 +1276,7 @@ int generic_write_checks(struct inode *inode, struct file *file,
loff_t *pos, size_t *count, int isblk);
extern ssize_t generic_file_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t generic_file_aio_read(struct kiocb *, char __user *, size_t, loff_t);
extern ssize_t __generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t *);
extern ssize_t generic_file_aio_write(struct kiocb *, const char __user *, size_t, loff_t);
extern ssize_t generic_file_aio_write_nolock(struct kiocb *, const struct iovec *,
unsigned long, loff_t *);
......@@ -1290,7 +1293,7 @@ extern ssize_t generic_file_direct_IO(int rw, struct kiocb *iocb,
const struct iovec *iov, loff_t offset, unsigned long nr_segs);
extern int blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, const struct iovec *iov, loff_t offset,
unsigned long nr_segs, get_blocks_t *get_blocks);
unsigned long nr_segs, get_blocks_t *get_blocks, dio_iodone_t *end_io);
extern ssize_t generic_file_readv(struct file *filp, const struct iovec *iov,
unsigned long nr_segs, loff_t *ppos);
ssize_t generic_file_writev(struct file *filp, const struct iovec *iov,
......
......@@ -724,7 +724,7 @@ int file_read_actor(read_descriptor_t *desc, struct page *page,
* This is the "read()" routine for all filesystems
* that can use the page cache directly.
*/
static ssize_t
ssize_t
__generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t *ppos)
{
......@@ -809,6 +809,7 @@ generic_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t
return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos);
}
EXPORT_SYMBOL(generic_file_aio_read);
EXPORT_SYMBOL(__generic_file_aio_read);
ssize_t
generic_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment