Commit 00d9eb99 authored by Nathan Scott's avatar Nathan Scott

[XFS] Add some IO path tracing, move inval_cached_pages to a better home to help.

SGI Modid: 2.5.x-xfs:slinx:160171a
parent d01cc5eb
...@@ -75,6 +75,71 @@ ...@@ -75,6 +75,71 @@
#include <linux/capability.h> #include <linux/capability.h>
#if defined(XFS_RW_TRACE)
void
xfs_rw_enter_trace(
int tag,
xfs_iocore_t *io,
const struct iovec *iovp,
size_t segs,
loff_t offset,
int ioflags)
{
xfs_inode_t *ip = XFS_IO_INODE(io);
if (ip->i_rwtrace == NULL)
return;
ktrace_enter(ip->i_rwtrace,
(void *)(unsigned long)tag,
(void *)ip,
(void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
(void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
(void *)(__psint_t)iovp,
(void *)((unsigned long)segs),
(void *)((unsigned long)((offset >> 32) & 0xffffffff)),
(void *)((unsigned long)(offset & 0xffffffff)),
(void *)((unsigned long)ioflags),
(void *)((unsigned long)((io->io_new_size >> 32) & 0xffffffff)),
(void *)((unsigned long)(io->io_new_size & 0xffffffff)),
(void *)NULL,
(void *)NULL,
(void *)NULL,
(void *)NULL,
(void *)NULL);
}
void
xfs_inval_cached_trace(
xfs_iocore_t *io,
xfs_off_t offset,
xfs_off_t len,
xfs_off_t first,
xfs_off_t last)
{
xfs_inode_t *ip = XFS_IO_INODE(io);
if (ip->i_rwtrace == NULL)
return;
ktrace_enter(ip->i_rwtrace,
(void *)(__psint_t)XFS_INVAL_CACHED,
(void *)ip,
(void *)((unsigned long)((offset >> 32) & 0xffffffff)),
(void *)((unsigned long)(offset & 0xffffffff)),
(void *)((unsigned long)((len >> 32) & 0xffffffff)),
(void *)((unsigned long)(len & 0xffffffff)),
(void *)((unsigned long)((first >> 32) & 0xffffffff)),
(void *)((unsigned long)(first & 0xffffffff)),
(void *)((unsigned long)((last >> 32) & 0xffffffff)),
(void *)((unsigned long)(last & 0xffffffff)),
(void *)NULL,
(void *)NULL,
(void *)NULL,
(void *)NULL,
(void *)NULL,
(void *)NULL);
}
#endif
/* /*
* xfs_iozero * xfs_iozero
* *
...@@ -142,6 +207,59 @@ xfs_iozero( ...@@ -142,6 +207,59 @@ xfs_iozero(
return (-status); return (-status);
} }
/*
* xfs_inval_cached_pages
*
* This routine is responsible for keeping direct I/O and buffered I/O
* somewhat coherent. From here we make sure that we're at least
* temporarily holding the inode I/O lock exclusively and then call
* the page cache to flush and invalidate any cached pages. If there
* are no cached pages this routine will be very quick.
*/
void
xfs_inval_cached_pages(
vnode_t *vp,
xfs_iocore_t *io,
xfs_off_t offset,
int write,
int relock)
{
xfs_mount_t *mp;
if (!VN_CACHED(vp)) {
return;
}
mp = io->io_mount;
/*
* We need to get the I/O lock exclusively in order
* to safely invalidate pages and mappings.
*/
if (relock) {
XFS_IUNLOCK(mp, io, XFS_IOLOCK_SHARED);
XFS_ILOCK(mp, io, XFS_IOLOCK_EXCL);
}
/* Writing beyond EOF creates a hole that must be zeroed */
if (write && (offset > XFS_SIZE(mp, io))) {
xfs_fsize_t isize;
XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
isize = XFS_SIZE(mp, io);
if (offset > isize) {
xfs_zero_eof(vp, io, offset, isize, offset);
}
XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
}
xfs_inval_cached_trace(io, offset, -1, ctooff(offtoct(offset)), -1);
VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(offset)), -1, FI_REMAPF_LOCKED);
if (relock) {
XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
}
}
ssize_t /* bytes read, or (-) error */ ssize_t /* bytes read, or (-) error */
xfs_read( xfs_read(
bhv_desc_t *bdp, bhv_desc_t *bdp,
...@@ -684,9 +802,13 @@ xfs_write( ...@@ -684,9 +802,13 @@ xfs_write(
retry: retry:
if (ioflags & IO_ISDIRECT) { if (ioflags & IO_ISDIRECT) {
xfs_inval_cached_pages(vp, &xip->i_iocore, *offset, 1, 1); xfs_inval_cached_pages(vp, io, *offset, 1, 1);
xfs_rw_enter_trace(XFS_DIOWR_ENTER,
io, iovp, segs, *offset, ioflags);
} else {
xfs_rw_enter_trace(XFS_WRITE_ENTER,
io, iovp, segs, *offset, ioflags);
} }
ret = generic_file_aio_write_nolock(iocb, iovp, segs, offset); ret = generic_file_aio_write_nolock(iocb, iovp, segs, offset);
if ((ret == -ENOSPC) && if ((ret == -ENOSPC) &&
...@@ -702,7 +824,6 @@ xfs_write( ...@@ -702,7 +824,6 @@ xfs_write(
xfs_rwlock(bdp, locktype); xfs_rwlock(bdp, locktype);
*offset = xip->i_d.di_size; *offset = xip->i_d.di_size;
goto retry; goto retry;
} }
if (*offset > xip->i_d.di_size) { if (*offset > xip->i_d.di_size) {
......
...@@ -41,7 +41,43 @@ struct xfs_bmbt_irec; ...@@ -41,7 +41,43 @@ struct xfs_bmbt_irec;
struct page_buf_s; struct page_buf_s;
struct page_buf_bmap_s; struct page_buf_bmap_s;
#if defined(XFS_RW_TRACE)
/*
* Defines for the trace mechanisms in xfs_lrw.c.
*/
#define XFS_RW_KTRACE_SIZE 64
#define XFS_STRAT_KTRACE_SIZE 64
#define XFS_STRAT_GTRACE_SIZE 512
#define XFS_READ_ENTER 1
#define XFS_WRITE_ENTER 2
#define XFS_IOMAP_READ_ENTER 3 #define XFS_IOMAP_READ_ENTER 3
#define XFS_IOMAP_WRITE_ENTER 4
#define XFS_IOMAP_READ_MAP 5
#define XFS_IOMAP_WRITE_MAP 6
#define XFS_IOMAP_WRITE_NOSPACE 7
#define XFS_ITRUNC_START 8
#define XFS_ITRUNC_FINISH1 9
#define XFS_ITRUNC_FINISH2 10
#define XFS_CTRUNC1 11
#define XFS_CTRUNC2 12
#define XFS_CTRUNC3 13
#define XFS_CTRUNC4 14
#define XFS_CTRUNC5 15
#define XFS_CTRUNC6 16
#define XFS_BUNMAPI 17
#define XFS_INVAL_CACHED 18
#define XFS_DIORD_ENTER 19
#define XFS_DIOWR_ENTER 20
extern void xfs_rw_enter_trace(int, struct xfs_iocore *,
const struct iovec *, int, loff_t, int);
extern void xfs_inval_cached_trace(struct xfs_iocore *,
xfs_off_t, xfs_off_t, xfs_off_t, xfs_off_t);
#else
#define xfs_rw_enter_trace(tag, io, iovec, segs, offset, ioflags)
#define xfs_inval_cached_trace(io, offset, len, first, last)
#endif
/* /*
* Maximum count of bmaps used by read and write paths. * Maximum count of bmaps used by read and write paths.
*/ */
...@@ -54,6 +90,8 @@ extern int xfs_bdstrat_cb(struct page_buf_s *); ...@@ -54,6 +90,8 @@ extern int xfs_bdstrat_cb(struct page_buf_s *);
extern int xfs_zero_eof(struct vnode *, struct xfs_iocore *, xfs_off_t, extern int xfs_zero_eof(struct vnode *, struct xfs_iocore *, xfs_off_t,
xfs_fsize_t, xfs_fsize_t); xfs_fsize_t, xfs_fsize_t);
extern void xfs_inval_cached_pages(struct vnode *, struct xfs_iocore *,
xfs_off_t, int, int);
extern ssize_t xfs_read(struct bhv_desc *, struct kiocb *, extern ssize_t xfs_read(struct bhv_desc *, struct kiocb *,
const struct iovec *, unsigned int, const struct iovec *, unsigned int,
loff_t *, int, struct cred *); loff_t *, int, struct cred *);
......
/* /*
* Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as * under the terms of version 2 of the GNU General Public License as
...@@ -351,54 +351,3 @@ xfs_bwrite( ...@@ -351,54 +351,3 @@ xfs_bwrite(
} }
return (error); return (error);
} }
/*
* xfs_inval_cached_pages()
* This routine is responsible for keeping direct I/O and buffered I/O
* somewhat coherent. From here we make sure that we're at least
* temporarily holding the inode I/O lock exclusively and then call
* the page cache to flush and invalidate any cached pages. If there
* are no cached pages this routine will be very quick.
*/
void
xfs_inval_cached_pages(
vnode_t *vp,
xfs_iocore_t *io,
xfs_off_t offset,
int write,
int relock)
{
xfs_mount_t *mp;
if (!VN_CACHED(vp)) {
return;
}
mp = io->io_mount;
/*
* We need to get the I/O lock exclusively in order
* to safely invalidate pages and mappings.
*/
if (relock) {
XFS_IUNLOCK(mp, io, XFS_IOLOCK_SHARED);
XFS_ILOCK(mp, io, XFS_IOLOCK_EXCL);
}
/* Writing beyond EOF creates a hole that must be zeroed */
if (write && (offset > XFS_SIZE(mp, io))) {
xfs_fsize_t isize;
XFS_ILOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
isize = XFS_SIZE(mp, io);
if (offset > isize) {
xfs_zero_eof(vp, io, offset, isize, offset);
}
XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
}
VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(offset)), -1, FI_REMAPF_LOCKED);
if (relock) {
XFS_ILOCK_DEMOTE(mp, io, XFS_IOLOCK_EXCL);
}
}
/* /*
* Copyright (c) 2000-2002 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as * under the terms of version 2 of the GNU General Public License as
...@@ -32,16 +32,9 @@ ...@@ -32,16 +32,9 @@
#ifndef __XFS_RW_H__ #ifndef __XFS_RW_H__
#define __XFS_RW_H__ #define __XFS_RW_H__
struct bhv_desc;
struct bmapval;
struct xfs_buf; struct xfs_buf;
struct cred;
struct uio;
struct vnode;
struct xfs_inode; struct xfs_inode;
struct xfs_iocore;
struct xfs_mount; struct xfs_mount;
struct xfs_trans;
/* /*
* Maximum count of bmaps used by read and write paths. * Maximum count of bmaps used by read and write paths.
...@@ -90,44 +83,6 @@ xfs_daddr_t xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb); ...@@ -90,44 +83,6 @@ xfs_daddr_t xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb);
XFS_FSB_TO_BB((io)->io_mount, (fsb)) : \ XFS_FSB_TO_BB((io)->io_mount, (fsb)) : \
XFS_FSB_TO_DADDR((io)->io_mount, (fsb))) XFS_FSB_TO_DADDR((io)->io_mount, (fsb)))
/*
* Defines for the trace mechanisms in xfs_rw.c.
*/
#define XFS_RW_KTRACE_SIZE 64
#define XFS_STRAT_KTRACE_SIZE 64
#define XFS_STRAT_GTRACE_SIZE 512
#define XFS_READ_ENTER 1
#define XFS_WRITE_ENTER 2
#define XFS_IOMAP_READ_ENTER 3
#define XFS_IOMAP_WRITE_ENTER 4
#define XFS_IOMAP_READ_MAP 5
#define XFS_IOMAP_WRITE_MAP 6
#define XFS_IOMAP_WRITE_NOSPACE 7
#define XFS_ITRUNC_START 8
#define XFS_ITRUNC_FINISH1 9
#define XFS_ITRUNC_FINISH2 10
#define XFS_CTRUNC1 11
#define XFS_CTRUNC2 12
#define XFS_CTRUNC3 13
#define XFS_CTRUNC4 14
#define XFS_CTRUNC5 15
#define XFS_CTRUNC6 16
#define XFS_BUNMAPI 17
#define XFS_INVAL_CACHED 18
#define XFS_DIORD_ENTER 19
#define XFS_DIOWR_ENTER 20
#if defined(XFS_ALL_TRACE)
#define XFS_RW_TRACE
#define XFS_STRAT_TRACE
#endif
#if !defined(DEBUG)
#undef XFS_RW_TRACE
#undef XFS_STRAT_TRACE
#endif
/* /*
* Prototypes for functions in xfs_rw.c. * Prototypes for functions in xfs_rw.c.
*/ */
...@@ -141,14 +96,6 @@ xfs_bwrite( ...@@ -141,14 +96,6 @@ xfs_bwrite(
struct xfs_mount *mp, struct xfs_mount *mp,
struct xfs_buf *bp); struct xfs_buf *bp);
void
xfs_inval_cached_pages(
struct vnode *vp,
struct xfs_iocore *io,
xfs_off_t offset,
int write,
int relock);
int int
xfs_bioerror( xfs_bioerror(
struct xfs_buf *b); struct xfs_buf *b);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment