Commit b2677e18 authored by Christoph Hellwig's avatar Christoph Hellwig

[XFS] merge Steve's sync changes over to 2.5

SGI Modid: 2.5.x-xfs:slinx:147932a
parent d8475074
...@@ -125,6 +125,7 @@ xfs-y += $(addprefix linux/, \ ...@@ -125,6 +125,7 @@ xfs-y += $(addprefix linux/, \
xfs_iomap.o \ xfs_iomap.o \
xfs_iops.o \ xfs_iops.o \
xfs_lrw.o \ xfs_lrw.o \
xfs_syncd.o \
xfs_super.o \ xfs_super.o \
xfs_vfs.o \ xfs_vfs.o \
xfs_vnode.o) xfs_vnode.o)
......
...@@ -48,7 +48,7 @@ unsigned long xfs_physmem; ...@@ -48,7 +48,7 @@ unsigned long xfs_physmem;
* Tunable XFS parameters. xfs_params is required even when CONFIG_SYSCTL=n, * Tunable XFS parameters. xfs_params is required even when CONFIG_SYSCTL=n,
* other XFS code uses these values. * other XFS code uses these values.
*/ */
xfs_param_t xfs_params = { 0, 1, 0, 0, 0, 3 }; xfs_param_t xfs_params = { 0, 1, 0, 0, 0, 3, 30 * HZ };
/* /*
* Global system credential structure. * Global system credential structure.
......
...@@ -152,8 +152,6 @@ linvfs_mknod( ...@@ -152,8 +152,6 @@ linvfs_mknod(
ip->i_rdev = to_kdev_t(rdev); ip->i_rdev = to_kdev_t(rdev);
validate_fields(dir); validate_fields(dir);
d_instantiate(dentry, ip); d_instantiate(dentry, ip);
mark_inode_dirty_sync(ip);
mark_inode_dirty_sync(dir);
} }
if (!error && have_default_acl) { if (!error && have_default_acl) {
...@@ -240,7 +238,6 @@ linvfs_link( ...@@ -240,7 +238,6 @@ linvfs_link(
VN_HOLD(vp); VN_HOLD(vp);
validate_fields(ip); validate_fields(ip);
d_instantiate(dentry, ip); d_instantiate(dentry, ip);
mark_inode_dirty_sync(ip);
} }
return -error; return -error;
} }
...@@ -261,8 +258,6 @@ linvfs_unlink( ...@@ -261,8 +258,6 @@ linvfs_unlink(
if (!error) { if (!error) {
validate_fields(dir); /* For size only */ validate_fields(dir); /* For size only */
validate_fields(inode); validate_fields(inode);
mark_inode_dirty_sync(inode);
mark_inode_dirty_sync(dir);
} }
return -error; return -error;
...@@ -296,8 +291,6 @@ linvfs_symlink( ...@@ -296,8 +291,6 @@ linvfs_symlink(
d_instantiate(dentry, ip); d_instantiate(dentry, ip);
validate_fields(dir); validate_fields(dir);
validate_fields(ip); /* size needs update */ validate_fields(ip); /* size needs update */
mark_inode_dirty_sync(ip);
mark_inode_dirty_sync(dir);
} }
return -error; return -error;
} }
...@@ -315,8 +308,6 @@ linvfs_rmdir( ...@@ -315,8 +308,6 @@ linvfs_rmdir(
if (!error) { if (!error) {
validate_fields(inode); validate_fields(inode);
validate_fields(dir); validate_fields(dir);
mark_inode_dirty_sync(inode);
mark_inode_dirty_sync(dir);
} }
return -error; return -error;
} }
...@@ -346,7 +337,6 @@ linvfs_rename( ...@@ -346,7 +337,6 @@ linvfs_rename(
validate_fields(odir); validate_fields(odir);
if (ndir != odir) if (ndir != odir)
validate_fields(ndir); validate_fields(ndir);
mark_inode_dirty(ndir);
return 0; return 0;
} }
...@@ -520,7 +510,6 @@ linvfs_setattr( ...@@ -520,7 +510,6 @@ linvfs_setattr(
if (!error) { if (!error) {
vn_revalidate(vp); vn_revalidate(vp);
mark_inode_dirty_sync(inode);
} }
return error; return error;
} }
......
...@@ -331,9 +331,10 @@ destroy_inodecache( void ) ...@@ -331,9 +331,10 @@ destroy_inodecache( void )
} }
/* /*
* We do not actually write the inode here, just mark the * Attempt to flush the inode, this will actually fail
* super block dirty so that sync_supers calls us and * if the inode is pinned, but we dirty the inode again
* forces the flush. * at the point when it is unpinned after a log write,
* since this is when the inode itself becomes flushable.
*/ */
STATIC void STATIC void
linvfs_write_inode( linvfs_write_inode(
...@@ -348,8 +349,6 @@ linvfs_write_inode( ...@@ -348,8 +349,6 @@ linvfs_write_inode(
if (sync) if (sync)
flags |= FLUSH_SYNC; flags |= FLUSH_SYNC;
VOP_IFLUSH(vp, flags, error); VOP_IFLUSH(vp, flags, error);
if (error == EAGAIN)
inode->i_sb->s_dirt = 1;
} }
} }
...@@ -376,9 +375,11 @@ linvfs_put_super( ...@@ -376,9 +375,11 @@ linvfs_put_super(
vfs_t *vfsp = LINVFS_GET_VFS(sb); vfs_t *vfsp = LINVFS_GET_VFS(sb);
int error; int error;
linvfs_stop_syncd(vfsp);
VFS_SYNC(vfsp, SYNC_ATTR|SYNC_DELWRI, NULL, error); VFS_SYNC(vfsp, SYNC_ATTR|SYNC_DELWRI, NULL, error);
if (error == 0) if (error == 0) {
VFS_UNMOUNT(vfsp, 0, NULL, error); VFS_UNMOUNT(vfsp, 0, NULL, error);
}
if (error) { if (error) {
printk("XFS unmount got error %d\n", error); printk("XFS unmount got error %d\n", error);
printk("%s: vfsp/0x%p left dangling!\n", __FUNCTION__, vfsp); printk("%s: vfsp/0x%p left dangling!\n", __FUNCTION__, vfsp);
...@@ -395,10 +396,13 @@ linvfs_write_super( ...@@ -395,10 +396,13 @@ linvfs_write_super(
vfs_t *vfsp = LINVFS_GET_VFS(sb); vfs_t *vfsp = LINVFS_GET_VFS(sb);
int error; int error;
sb->s_dirt = 0; if (sb->s_flags & MS_RDONLY) {
if (sb->s_flags & MS_RDONLY) sb->s_dirt = 0; /* paranoia */
return; return;
VFS_SYNC(vfsp, SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR, NULL, error); }
/* Push the log and superblock a little */
VFS_SYNC(vfsp, SYNC_FSDATA, NULL, error);
sb->s_dirt = 0;
} }
STATIC int STATIC int
...@@ -652,7 +656,8 @@ linvfs_fill_super( ...@@ -652,7 +656,8 @@ linvfs_fill_super(
goto fail_vnrele; goto fail_vnrele;
if (is_bad_inode(sb->s_root->d_inode)) if (is_bad_inode(sb->s_root->d_inode))
goto fail_vnrele; goto fail_vnrele;
if (linvfs_start_syncd(vfsp))
goto fail_vnrele;
vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address); vn_trace_exit(rootvp, __FUNCTION__, (inst_t *)__return_address);
kmem_free(args, sizeof(*args)); kmem_free(args, sizeof(*args));
......
...@@ -104,5 +104,7 @@ extern void xfs_free_buftarg(struct pb_target *); ...@@ -104,5 +104,7 @@ extern void xfs_free_buftarg(struct pb_target *);
extern void xfs_setsize_buftarg(struct pb_target *, unsigned int, unsigned int); extern void xfs_setsize_buftarg(struct pb_target *, unsigned int, unsigned int);
extern unsigned int xfs_getsize_buftarg(struct pb_target *); extern unsigned int xfs_getsize_buftarg(struct pb_target *);
extern int linvfs_start_syncd(vfs_t *);
extern void linvfs_stop_syncd(vfs_t *);
#endif /* __XFS_SUPER_H__ */ #endif /* __XFS_SUPER_H__ */
/*
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
*/
#include <xfs.h>
static void sync_timeout(unsigned long __data)
{
struct task_struct * p = (struct task_struct *) __data;
wake_up_process(p);
}
#define SYNCD_FLAGS (SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR)
int syncd(void *arg)
{
vfs_t *vfsp = (vfs_t *) arg;
int error;
struct timer_list timer;
daemonize("xfs_syncd");
vfsp->vfs_sync_task = current;
wmb();
wake_up(&vfsp->vfs_wait_sync_task);
init_timer(&timer);
timer.data = (unsigned long)current;
timer.function = sync_timeout;
do {
mod_timer(&timer, jiffies + xfs_params.sync_interval);
interruptible_sleep_on(&vfsp->vfs_sync);
if (!(vfsp->vfs_flag & VFS_RDONLY))
VFS_SYNC(vfsp, SYNCD_FLAGS, NULL, error);
} while (!(vfsp->vfs_flag & VFS_UMOUNT));
del_timer_sync(&timer);
vfsp->vfs_sync_task = NULL;
wmb();
wake_up(&vfsp->vfs_wait_sync_task);
return 0;
}
int
linvfs_start_syncd(vfs_t *vfsp)
{
int pid;
pid = kernel_thread(syncd, (void *) vfsp,
CLONE_VM | CLONE_FS | CLONE_FILES);
if (pid < 0)
return pid;
wait_event(vfsp->vfs_wait_sync_task, vfsp->vfs_sync_task);
return 0;
}
void
linvfs_stop_syncd(vfs_t *vfsp)
{
vfsp->vfs_flag |= VFS_UMOUNT;
wmb();
wake_up(&vfsp->vfs_sync);
wait_event(vfsp->vfs_wait_sync_task, !vfsp->vfs_sync_task);
}
...@@ -36,8 +36,8 @@ ...@@ -36,8 +36,8 @@
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
STATIC ulong xfs_min[XFS_PARAM] = { 0, 0, 0, 0, 0, 0 }; STATIC ulong xfs_min[XFS_PARAM] = { 0, 0, 0, 0, 0, 0, HZ };
STATIC ulong xfs_max[XFS_PARAM] = { 1, 1, 1, 1, 127, 3 }; STATIC ulong xfs_max[XFS_PARAM] = { 1, 1, 1, 1, 127, 3, HZ * 60 };
static struct ctl_table_header *xfs_table_header; static struct ctl_table_header *xfs_table_header;
...@@ -92,6 +92,10 @@ STATIC ctl_table xfs_table[] = { ...@@ -92,6 +92,10 @@ STATIC ctl_table xfs_table[] = {
sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax, sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax,
&sysctl_intvec, NULL, &xfs_min[5], &xfs_max[5]}, &sysctl_intvec, NULL, &xfs_min[5], &xfs_max[5]},
{XFS_SYNC_INTERVAL, "sync_interval", &xfs_params.sync_interval,
sizeof(ulong), 0644, NULL, &proc_doulongvec_minmax,
&sysctl_intvec, NULL, &xfs_min[6], &xfs_max[6]},
{0} {0}
}; };
......
...@@ -49,6 +49,7 @@ typedef struct xfs_param { ...@@ -49,6 +49,7 @@ typedef struct xfs_param {
ulong symlink_mode; /* Symlink creat mode affected by umask. */ ulong symlink_mode; /* Symlink creat mode affected by umask. */
ulong panic_mask; /* bitmask to specify panics on errors. */ ulong panic_mask; /* bitmask to specify panics on errors. */
ulong error_level; /* Degree of reporting for internal probs*/ ulong error_level; /* Degree of reporting for internal probs*/
ulong sync_interval; /* time between sync calls */
} xfs_param_t; } xfs_param_t;
/* /*
...@@ -73,6 +74,7 @@ enum { ...@@ -73,6 +74,7 @@ enum {
XFS_SYMLINK_MODE = 4, XFS_SYMLINK_MODE = 4,
XFS_PANIC_MASK = 5, XFS_PANIC_MASK = 5,
XFS_ERRLEVEL = 6, XFS_ERRLEVEL = 6,
XFS_SYNC_INTERVAL = 7,
}; };
extern xfs_param_t xfs_params; extern xfs_param_t xfs_params;
......
...@@ -238,6 +238,8 @@ vfs_allocate( void ) ...@@ -238,6 +238,8 @@ vfs_allocate( void )
vfsp = kmem_zalloc(sizeof(vfs_t), KM_SLEEP); vfsp = kmem_zalloc(sizeof(vfs_t), KM_SLEEP);
bhv_head_init(VFS_BHVHEAD(vfsp), "vfs"); bhv_head_init(VFS_BHVHEAD(vfsp), "vfs");
init_waitqueue_head(&vfsp->vfs_wait_sync_task);
init_waitqueue_head(&vfsp->vfs_sync);
return vfsp; return vfsp;
} }
......
...@@ -48,6 +48,9 @@ typedef struct vfs { ...@@ -48,6 +48,9 @@ typedef struct vfs {
fsid_t *vfs_altfsid; /* An ID fixed for life of FS */ fsid_t *vfs_altfsid; /* An ID fixed for life of FS */
bhv_head_t vfs_bh; /* head of vfs behavior chain */ bhv_head_t vfs_bh; /* head of vfs behavior chain */
struct super_block *vfs_super; /* Linux superblock structure */ struct super_block *vfs_super; /* Linux superblock structure */
struct task_struct *vfs_sync_task;
wait_queue_head_t vfs_sync;
wait_queue_head_t vfs_wait_sync_task;
} vfs_t; } vfs_t;
#define vfs_fbhv vfs_bh.bh_first /* 1st on vfs behavior chain */ #define vfs_fbhv vfs_bh.bh_first /* 1st on vfs behavior chain */
...@@ -78,7 +81,8 @@ typedef enum { ...@@ -78,7 +81,8 @@ typedef enum {
#define VFS_RDONLY 0x0001 /* read-only vfs */ #define VFS_RDONLY 0x0001 /* read-only vfs */
#define VFS_GRPID 0x0002 /* group-ID assigned from directory */ #define VFS_GRPID 0x0002 /* group-ID assigned from directory */
#define VFS_DMI 0x0004 /* filesystem has the DMI enabled */ #define VFS_DMI 0x0004 /* filesystem has the DMI enabled */
#define VFS_END 0x0004 /* max flag */ #define VFS_UMOUNT 0x0008 /* unmount in progress */
#define VFS_END 0x0008 /* max flag */
#define SYNC_ATTR 0x0001 /* sync attributes */ #define SYNC_ATTR 0x0001 /* sync attributes */
#define SYNC_CLOSE 0x0002 /* close file system down */ #define SYNC_CLOSE 0x0002 /* close file system down */
...@@ -87,6 +91,7 @@ typedef enum { ...@@ -87,6 +91,7 @@ typedef enum {
#define SYNC_FSDATA 0x0020 /* flush fs data (e.g. superblocks) */ #define SYNC_FSDATA 0x0020 /* flush fs data (e.g. superblocks) */
#define SYNC_BDFLUSH 0x0010 /* BDFLUSH is calling -- don't block */ #define SYNC_BDFLUSH 0x0010 /* BDFLUSH is calling -- don't block */
typedef int (*vfs_mount_t)(bhv_desc_t *, typedef int (*vfs_mount_t)(bhv_desc_t *,
struct xfs_mount_args *, struct cred *); struct xfs_mount_args *, struct cred *);
typedef int (*vfs_parseargs_t)(bhv_desc_t *, char *, typedef int (*vfs_parseargs_t)(bhv_desc_t *, char *,
......
...@@ -562,8 +562,7 @@ static __inline__ void vn_flagclr(struct vnode *vp, uint flag) ...@@ -562,8 +562,7 @@ static __inline__ void vn_flagclr(struct vnode *vp, uint flag)
(!list_empty(&(LINVFS_GET_IP(vp)->i_mapping->i_mmap_shared)))) (!list_empty(&(LINVFS_GET_IP(vp)->i_mapping->i_mmap_shared))))
#define VN_CACHED(vp) (LINVFS_GET_IP(vp)->i_mapping->nrpages) #define VN_CACHED(vp) (LINVFS_GET_IP(vp)->i_mapping->nrpages)
#define VN_DIRTY(vp) (!list_empty(&(LINVFS_GET_IP(vp)->i_mapping->dirty_pages))) #define VN_DIRTY(vp) (!list_empty(&(LINVFS_GET_IP(vp)->i_mapping->dirty_pages)))
#define VMODIFY(vp) { VN_FLAGSET(vp, VMODIFIED); \ #define VMODIFY(vp) VN_FLAGSET(vp, VMODIFIED)
mark_inode_dirty(LINVFS_GET_IP(vp)); }
#define VUNMODIFY(vp) VN_FLAGCLR(vp, VMODIFIED) #define VUNMODIFY(vp) VN_FLAGCLR(vp, VMODIFIED)
/* /*
......
...@@ -252,6 +252,11 @@ xfs_iget_core( ...@@ -252,6 +252,11 @@ xfs_iget_core(
if (newnode) { if (newnode) {
xfs_iocore_inode_reinit(ip); xfs_iocore_inode_reinit(ip);
} }
XFS_MOUNT_ILOCK(mp);
list_del_init(&ip->i_reclaim);
XFS_MOUNT_IUNLOCK(mp);
vn_trace_exit(vp, "xfs_iget.found", vn_trace_exit(vp, "xfs_iget.found",
(inst_t *)__return_address); (inst_t *)__return_address);
goto return_ip; goto return_ip;
...@@ -467,8 +472,10 @@ xfs_iget( ...@@ -467,8 +472,10 @@ xfs_iget(
} }
bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops); bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
if (bdp == NULL) if (bdp == NULL) {
XFS_STATS_INC(xfsstats.xs_ig_dup);
goto inode_allocate; goto inode_allocate;
}
ip = XFS_BHVTOI(bdp); ip = XFS_BHVTOI(bdp);
if (lock_flags != 0) if (lock_flags != 0)
xfs_ilock(ip, lock_flags); xfs_ilock(ip, lock_flags);
...@@ -720,6 +727,9 @@ xfs_iextract( ...@@ -720,6 +727,9 @@ xfs_iextract(
} }
} }
/* Deal with the deleted inodes list */
list_del_init(&ip->i_reclaim);
mp->m_ireclaims++; mp->m_ireclaims++;
XFS_MOUNT_IUNLOCK(mp); XFS_MOUNT_IUNLOCK(mp);
} }
......
...@@ -976,6 +976,8 @@ xfs_iread( ...@@ -976,6 +976,8 @@ xfs_iread(
XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t); XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
} }
INIT_LIST_HEAD(&ip->i_reclaim);
/* /*
* The inode format changed when we moved the link count and * The inode format changed when we moved the link count and
* made it 32 bits long. If this is an old format inode, * made it 32 bits long. If this is an old format inode,
...@@ -2625,6 +2627,15 @@ xfs_iunpin( ...@@ -2625,6 +2627,15 @@ xfs_iunpin(
ASSERT(atomic_read(&ip->i_pincount) > 0); ASSERT(atomic_read(&ip->i_pincount) > 0);
if (atomic_dec_and_test(&ip->i_pincount)) { if (atomic_dec_and_test(&ip->i_pincount)) {
vnode_t *vp = XFS_ITOV_NULL(ip);
/* make sync come back and flush this inode */
if (vp) {
struct inode *inode = LINVFS_GET_IP(vp);
mark_inode_dirty_sync(inode);
}
wake_up(&ip->i_ipin_wait); wake_up(&ip->i_ipin_wait);
} }
} }
...@@ -3640,6 +3651,8 @@ xfs_ichgtime(xfs_inode_t *ip, ...@@ -3640,6 +3651,8 @@ xfs_ichgtime(xfs_inode_t *ip,
*/ */
SYNCHRONIZE(); SYNCHRONIZE();
ip->i_update_core = 1; ip->i_update_core = 1;
if (!(inode->i_state & I_LOCK))
mark_inode_dirty(inode);
} }
#ifdef XFS_ILOCK_TRACE #ifdef XFS_ILOCK_TRACE
......
...@@ -243,6 +243,7 @@ typedef struct xfs_inode { ...@@ -243,6 +243,7 @@ typedef struct xfs_inode {
struct xfs_inode *i_mprev; /* ptr to prev inode */ struct xfs_inode *i_mprev; /* ptr to prev inode */
struct xfs_inode **i_prevp; /* ptr to prev i_next */ struct xfs_inode **i_prevp; /* ptr to prev i_next */
struct xfs_mount *i_mount; /* fs mount struct ptr */ struct xfs_mount *i_mount; /* fs mount struct ptr */
struct list_head i_reclaim; /* reclaim list */
struct bhv_desc i_bhv_desc; /* inode behavior descriptor*/ struct bhv_desc i_bhv_desc; /* inode behavior descriptor*/
struct xfs_dquot *i_udquot; /* user dquot */ struct xfs_dquot *i_udquot; /* user dquot */
struct xfs_dquot *i_gdquot; /* group dquot */ struct xfs_dquot *i_gdquot; /* group dquot */
...@@ -477,7 +478,7 @@ void xfs_iunlock_map_shared(xfs_inode_t *, uint); ...@@ -477,7 +478,7 @@ void xfs_iunlock_map_shared(xfs_inode_t *, uint);
void xfs_ifunlock(xfs_inode_t *); void xfs_ifunlock(xfs_inode_t *);
void xfs_ireclaim(xfs_inode_t *); void xfs_ireclaim(xfs_inode_t *);
int xfs_finish_reclaim(xfs_inode_t *, int, int); int xfs_finish_reclaim(xfs_inode_t *, int, int);
int xfs_finish_reclaim_all(struct xfs_mount *); int xfs_finish_reclaim_all(struct xfs_mount *, int);
/* /*
* xfs_inode.c prototypes. * xfs_inode.c prototypes.
......
...@@ -546,6 +546,7 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp) ...@@ -546,6 +546,7 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
mp->m_blockmask = sbp->sb_blocksize - 1; mp->m_blockmask = sbp->sb_blocksize - 1;
mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG; mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
mp->m_blockwmask = mp->m_blockwsize - 1; mp->m_blockwmask = mp->m_blockwsize - 1;
INIT_LIST_HEAD(&mp->m_del_inodes);
if (XFS_SB_VERSION_HASLOGV2(sbp)) { if (XFS_SB_VERSION_HASLOGV2(sbp)) {
...@@ -601,7 +602,6 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp) ...@@ -601,7 +602,6 @@ xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
sbp->sb_inopblock); sbp->sb_inopblock);
mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog; mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog;
} }
/* /*
* xfs_mountfs * xfs_mountfs
* *
......
...@@ -296,6 +296,7 @@ typedef struct xfs_mount { ...@@ -296,6 +296,7 @@ typedef struct xfs_mount {
int m_ihsize; /* size of next field */ int m_ihsize; /* size of next field */
struct xfs_ihash *m_ihash; /* fs private inode hash table*/ struct xfs_ihash *m_ihash; /* fs private inode hash table*/
struct xfs_inode *m_inodes; /* active inode list */ struct xfs_inode *m_inodes; /* active inode list */
struct list_head m_del_inodes; /* inodes to reclaim */
mutex_t m_ilock; /* inode list mutex */ mutex_t m_ilock; /* inode list mutex */
uint m_ireclaims; /* count of calls to reclaim*/ uint m_ireclaims; /* count of calls to reclaim*/
uint m_readio_log; /* min read size log bytes */ uint m_readio_log; /* min read size log bytes */
......
...@@ -620,7 +620,7 @@ xfs_mntupdate( ...@@ -620,7 +620,7 @@ xfs_mntupdate(
if (*flags & MS_RDONLY) { if (*flags & MS_RDONLY) {
pagebuf_delwri_flush(mp->m_ddev_targp, 0, NULL); pagebuf_delwri_flush(mp->m_ddev_targp, 0, NULL);
xfs_finish_reclaim_all(mp); xfs_finish_reclaim_all(mp, 0);
do { do {
VFS_SYNC(vfsp, SYNC_ATTR|SYNC_WAIT, NULL, error); VFS_SYNC(vfsp, SYNC_ATTR|SYNC_WAIT, NULL, error);
...@@ -849,19 +849,14 @@ xfs_sync( ...@@ -849,19 +849,14 @@ xfs_sync(
* xfs sync routine for internal use * xfs sync routine for internal use
* *
* This routine supports all of the flags defined for the generic VFS_SYNC * This routine supports all of the flags defined for the generic VFS_SYNC
* interface as explained above under xys_sync. In the interests of not * interface as explained above under xfs_sync. In the interests of not
* changing interfaces within the 6.5 family, additional internallly- * changing interfaces within the 6.5 family, additional internallly-
* required functions are specified within a separate xflags parameter, * required functions are specified within a separate xflags parameter,
* only available by calling this routine. * only available by calling this routine.
* *
* xflags:
* XFS_XSYNC_RELOC - Sync for relocation. Don't try to get behavior
* locks as this will cause you to hang. Not all
* combinations of flags are necessarily supported
* when this is specified.
*/ */
int STATIC int
xfs_syncsub( xfs_sync_inodes(
xfs_mount_t *mp, xfs_mount_t *mp,
int flags, int flags,
int xflags, int xflags,
...@@ -877,12 +872,10 @@ xfs_syncsub( ...@@ -877,12 +872,10 @@ xfs_syncsub(
uint64_t fflag; uint64_t fflag;
uint lock_flags; uint lock_flags;
uint base_lock_flags; uint base_lock_flags;
uint log_flags;
boolean_t mount_locked; boolean_t mount_locked;
boolean_t vnode_refed; boolean_t vnode_refed;
int preempt; int preempt;
xfs_dinode_t *dip; xfs_dinode_t *dip;
xfs_buf_log_item_t *bip;
xfs_iptr_t *ipointer; xfs_iptr_t *ipointer;
#ifdef DEBUG #ifdef DEBUG
boolean_t ipointer_in = B_FALSE; boolean_t ipointer_in = B_FALSE;
...@@ -961,16 +954,6 @@ xfs_syncsub( ...@@ -961,16 +954,6 @@ xfs_syncsub(
base_lock_flags |= XFS_IOLOCK_SHARED; base_lock_flags |= XFS_IOLOCK_SHARED;
} }
/*
* Sync out the log. This ensures that the log is periodically
* flushed even if there is not enough activity to fill it up.
*/
if (flags & SYNC_WAIT) {
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
} else {
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
}
XFS_MOUNT_ILOCK(mp); XFS_MOUNT_ILOCK(mp);
ip = mp->m_inodes; ip = mp->m_inodes;
...@@ -1016,9 +999,6 @@ xfs_syncsub( ...@@ -1016,9 +999,6 @@ xfs_syncsub(
ip = ip->i_mnext; ip = ip->i_mnext;
continue; continue;
} }
if ((ip->i_update_core == 0) &&
((ip->i_itemp == NULL) ||
!(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL))) {
if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) { if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0) {
ip = ip->i_mnext; ip = ip->i_mnext;
} else if ((xfs_ipincount(ip) == 0) && } else if ((xfs_ipincount(ip) == 0) &&
...@@ -1026,7 +1006,7 @@ xfs_syncsub( ...@@ -1026,7 +1006,7 @@ xfs_syncsub(
IPOINTER_INSERT(ip, mp); IPOINTER_INSERT(ip, mp);
xfs_finish_reclaim(ip, 1, xfs_finish_reclaim(ip, 1,
XFS_IFLUSH_DELWRI_ELSE_SYNC); XFS_IFLUSH_DELWRI_ELSE_ASYNC);
XFS_MOUNT_ILOCK(mp); XFS_MOUNT_ILOCK(mp);
mount_locked = B_TRUE; mount_locked = B_TRUE;
...@@ -1037,7 +1017,6 @@ xfs_syncsub( ...@@ -1037,7 +1017,6 @@ xfs_syncsub(
} }
continue; continue;
} }
}
if (XFS_FORCED_SHUTDOWN(mp) && !(flags & SYNC_CLOSE)) { if (XFS_FORCED_SHUTDOWN(mp) && !(flags & SYNC_CLOSE)) {
XFS_MOUNT_IUNLOCK(mp); XFS_MOUNT_IUNLOCK(mp);
...@@ -1148,22 +1127,10 @@ xfs_syncsub( ...@@ -1148,22 +1127,10 @@ xfs_syncsub(
xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_iunlock(ip, XFS_ILOCK_SHARED);
if (XFS_FORCED_SHUTDOWN(mp)) { if (XFS_FORCED_SHUTDOWN(mp)) {
if (xflags & XFS_XSYNC_RELOC) {
fs_tosspages(XFS_ITOBHV(ip), 0, -1,
FI_REMAPF);
}
else {
VOP_TOSS_PAGES(vp, 0, -1, FI_REMAPF); VOP_TOSS_PAGES(vp, 0, -1, FI_REMAPF);
}
} else { } else {
if (xflags & XFS_XSYNC_RELOC) {
fs_flushinval_pages(XFS_ITOBHV(ip),
0, -1, FI_REMAPF);
}
else {
VOP_FLUSHINVAL_PAGES(vp, 0, -1, FI_REMAPF); VOP_FLUSHINVAL_PAGES(vp, 0, -1, FI_REMAPF);
} }
}
xfs_ilock(ip, XFS_ILOCK_SHARED); xfs_ilock(ip, XFS_ILOCK_SHARED);
...@@ -1418,16 +1385,55 @@ xfs_syncsub( ...@@ -1418,16 +1385,55 @@ xfs_syncsub(
ASSERT(ipointer_in == B_FALSE); ASSERT(ipointer_in == B_FALSE);
kmem_free(ipointer, sizeof(xfs_iptr_t));
return XFS_ERROR(last_error);
}
/*
* xfs sync routine for internal use
*
* This routine supports all of the flags defined for the generic VFS_SYNC
* interface as explained above under xfs_sync. In the interests of not
* changing interfaces within the 6.5 family, additional internallly-
* required functions are specified within a separate xflags parameter,
* only available by calling this routine.
*
*/
int
xfs_syncsub(
xfs_mount_t *mp,
int flags,
int xflags,
int *bypassed)
{
int error = 0;
int last_error = 0;
uint log_flags = XFS_LOG_FORCE;
xfs_buf_t *bp;
xfs_buf_log_item_t *bip;
/* /*
* Flushing out dirty data above probably generated more * Sync out the log. This ensures that the log is periodically
* log activity, so if this isn't vfs_sync() then flush * flushed even if there is not enough activity to fill it up.
* the log again. If SYNC_WAIT is set then do it synchronously.
*/ */
if (!(flags & SYNC_BDFLUSH)) { if (flags & SYNC_WAIT)
log_flags = XFS_LOG_FORCE;
if (flags & SYNC_WAIT) {
log_flags |= XFS_LOG_SYNC; log_flags |= XFS_LOG_SYNC;
xfs_log_force(mp, (xfs_lsn_t)0, log_flags);
if (flags & (SYNC_ATTR|SYNC_DELWRI)) {
if (flags & SYNC_BDFLUSH)
xfs_finish_reclaim_all(mp, 1);
else
error = xfs_sync_inodes(mp, flags, xflags, bypassed);
} }
/*
* Flushing out dirty data above probably generated more
* log activity, so if this isn't vfs_sync() then flush
* the log again.
*/
if (flags & SYNC_DELWRI) {
xfs_log_force(mp, (xfs_lsn_t)0, log_flags); xfs_log_force(mp, (xfs_lsn_t)0, log_flags);
} }
...@@ -1463,11 +1469,10 @@ xfs_syncsub( ...@@ -1463,11 +1469,10 @@ xfs_syncsub(
* that point so it can become pinned in between * that point so it can become pinned in between
* there and here. * there and here.
*/ */
if (XFS_BUF_ISPINNED(bp)) { if (XFS_BUF_ISPINNED(bp))
xfs_log_force(mp, (xfs_lsn_t)0, xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
XFS_LOG_FORCE); if (!(flags & SYNC_WAIT))
} XFS_BUF_BFLAGS(bp) |= XFS_B_ASYNC;
XFS_BUF_BFLAGS(bp) |= fflag;
error = xfs_bwrite(mp, bp); error = xfs_bwrite(mp, bp);
} }
if (error) { if (error) {
...@@ -1478,9 +1483,9 @@ xfs_syncsub( ...@@ -1478,9 +1483,9 @@ xfs_syncsub(
/* /*
* Now check to see if the log needs a "dummy" transaction. * Now check to see if the log needs a "dummy" transaction.
*/ */
if (xfs_log_need_covered(mp)) { if (xfs_log_need_covered(mp)) {
xfs_trans_t *tp; xfs_trans_t *tp;
xfs_inode_t *ip;
/* /*
* Put a dummy transaction in the log to tell * Put a dummy transaction in the log to tell
...@@ -1491,7 +1496,6 @@ xfs_syncsub( ...@@ -1491,7 +1496,6 @@ xfs_syncsub(
XFS_ICHANGE_LOG_RES(mp), XFS_ICHANGE_LOG_RES(mp),
0, 0, 0))) { 0, 0, 0))) {
xfs_trans_cancel(tp, 0); xfs_trans_cancel(tp, 0);
kmem_free(ipointer, sizeof(xfs_iptr_t));
return error; return error;
} }
...@@ -1503,6 +1507,7 @@ xfs_syncsub( ...@@ -1503,6 +1507,7 @@ xfs_syncsub(
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
error = xfs_trans_commit(tp, 0, NULL); error = xfs_trans_commit(tp, 0, NULL);
xfs_iunlock(ip, XFS_ILOCK_EXCL); xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
} }
/* /*
...@@ -1516,7 +1521,6 @@ xfs_syncsub( ...@@ -1516,7 +1521,6 @@ xfs_syncsub(
} }
} }
kmem_free(ipointer, sizeof(xfs_iptr_t));
return XFS_ERROR(last_error); return XFS_ERROR(last_error);
} }
......
...@@ -3786,27 +3786,30 @@ xfs_inode_flush( ...@@ -3786,27 +3786,30 @@ xfs_inode_flush(
flush_flags = XFS_IFLUSH_SYNC; flush_flags = XFS_IFLUSH_SYNC;
else else
#endif #endif
flush_flags = XFS_IFLUSH_DELWRI; flush_flags = XFS_IFLUSH_DELWRI_ELSE_ASYNC;
xfs_ifunlock(ip); xfs_ifunlock(ip);
xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_iunlock(ip, XFS_ILOCK_SHARED);
error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0); error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0);
if (error) if (error)
goto eagain; return error;
xfs_buf_relse(bp); xfs_buf_relse(bp);
if (xfs_ilock_nowait(ip, XFS_ILOCK_SHARED) == 0) if (xfs_ilock_nowait(ip, XFS_ILOCK_SHARED) == 0)
goto eagain; return EAGAIN;
if (xfs_ipincount(ip) ||
!xfs_iflock_nowait(ip)) {
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return EAGAIN;
}
if ((xfs_ipincount(ip) == 0) &&
xfs_iflock_nowait(ip))
error = xfs_iflush(ip, flush_flags); error = xfs_iflush(ip, flush_flags);
} else { } else {
error = EAGAIN; error = EAGAIN;
} }
xfs_iunlock(ip, XFS_ILOCK_SHARED); xfs_iunlock(ip, XFS_ILOCK_SHARED);
} else { } else {
eagain:
error = EAGAIN; error = EAGAIN;
} }
} }
...@@ -3934,6 +3937,8 @@ xfs_reclaim( ...@@ -3934,6 +3937,8 @@ xfs_reclaim(
/* Protect sync from us */ /* Protect sync from us */
XFS_MOUNT_ILOCK(mp); XFS_MOUNT_ILOCK(mp);
vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip)); vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip));
list_add_tail(&ip->i_reclaim, &mp->m_del_inodes);
XFS_MOUNT_IUNLOCK(mp); XFS_MOUNT_IUNLOCK(mp);
} }
return 0; return 0;
...@@ -4010,40 +4015,33 @@ xfs_finish_reclaim( ...@@ -4010,40 +4015,33 @@ xfs_finish_reclaim(
} }
int int
xfs_finish_reclaim_all(xfs_mount_t *mp) xfs_finish_reclaim_all(xfs_mount_t *mp, int noblock)
{ {
int purged; int purged;
struct list_head *curr, *next;
xfs_inode_t *ip; xfs_inode_t *ip;
vnode_t *vp;
int done = 0; int done = 0;
while (!done) { while (!done) {
purged = 0; purged = 0;
XFS_MOUNT_ILOCK(mp); XFS_MOUNT_ILOCK(mp);
ip = mp->m_inodes; list_for_each_safe(curr, next, &mp->m_del_inodes) {
if (ip == NULL) { ip = list_entry(curr, xfs_inode_t, i_reclaim);
break; if (noblock) {
} if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL) == 0)
do { continue;
/* Make sure we skip markers inserted by sync */ if (xfs_ipincount(ip) ||
if (ip->i_mount == NULL) { !xfs_iflock_nowait(ip)) {
ip = ip->i_mnext; xfs_iunlock(ip, XFS_ILOCK_EXCL);
continue; continue;
} }
}
/*
* It's up to our caller to purge the root
* and quota vnodes later.
*/
vp = XFS_ITOV_NULL(ip);
if (!vp) {
XFS_MOUNT_IUNLOCK(mp); XFS_MOUNT_IUNLOCK(mp);
xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC); xfs_finish_reclaim(ip, noblock,
XFS_IFLUSH_DELWRI_ELSE_ASYNC);
purged = 1; purged = 1;
break; break;
} }
} while (ip != mp->m_inodes);
done = !purged; done = !purged;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment